aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGerrit Code Review <noreply-gerritcodereview@google.com>2016-08-16 00:10:35 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2016-08-16 00:10:35 +0000
commitc307e1628c1569fcd589d7e1a9b3adbe367b98e6 (patch)
treeeb744c13e08419158d4e22520c4ea9b0a4345af6
parentd47bcd157cdf0e937dbca01939d0c287e7c49acd (diff)
parent88c8b7c7f98f9df718fa9cdf45f4a46726753add (diff)
downloadgo-c307e1628c1569fcd589d7e1a9b3adbe367b98e6.tar.gz
go-c307e1628c1569fcd589d7e1a9b3adbe367b98e6.zip
Merge "Merge remote-tracking branch 'origin/dev.ssa' into merge"
-rw-r--r--src/cmd/asm/internal/asm/operand_test.go1
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go165
-rw-r--r--src/cmd/compile/internal/arm/prog.go2
-rw-r--r--src/cmd/compile/internal/arm/ssa.go970
-rw-r--r--src/cmd/compile/internal/arm64/galign.go6
-rw-r--r--src/cmd/compile/internal/arm64/prog.go19
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go865
-rw-r--r--src/cmd/compile/internal/gc/builtin.go17
-rw-r--r--src/cmd/compile/internal/gc/builtin/runtime.go2
-rw-r--r--src/cmd/compile/internal/gc/ssa.go226
-rw-r--r--src/cmd/compile/internal/gc/testdata/arith_ssa.go440
-rw-r--r--src/cmd/compile/internal/gc/testdata/string_ssa.go64
-rw-r--r--src/cmd/compile/internal/gc/type.go1
-rw-r--r--src/cmd/compile/internal/gc/walk.go48
-rw-r--r--src/cmd/compile/internal/ppc64/galign.go5
-rw-r--r--src/cmd/compile/internal/ppc64/prog.go16
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go1021
-rw-r--r--src/cmd/compile/internal/ssa/config.go79
-rw-r--r--src/cmd/compile/internal/ssa/cse.go23
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go2
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go47
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go6
-rw-r--r--src/cmd/compile/internal/ssa/flagalloc.go26
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules1265
-rw-r--r--src/cmd/compile/internal/ssa/gen/386Ops.go508
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules227
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go305
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules1192
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules1057
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go455
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARMOps.go506
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules573
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go395
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64.rules407
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64Ops.go20
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules32
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go181
-rw-r--r--src/cmd/compile/internal/ssa/gen/main.go59
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go409
-rw-r--r--src/cmd/compile/internal/ssa/html.go2
-rw-r--r--src/cmd/compile/internal/ssa/location.go13
-rw-r--r--src/cmd/compile/internal/ssa/lower.go9
-rw-r--r--src/cmd/compile/internal/ssa/op.go38
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go11201
-rw-r--r--src/cmd/compile/internal/ssa/opt.go3
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go233
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go18
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go14822
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go10351
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go18060
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go15213
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go7277
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go2534
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go456
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go39
-rw-r--r--src/cmd/compile/internal/ssa/tighten.go8
-rw-r--r--src/cmd/compile/internal/ssa/type.go57
-rw-r--r--src/cmd/compile/internal/ssa/type_test.go1
-rw-r--r--src/cmd/compile/internal/x86/387.go386
-rw-r--r--src/cmd/compile/internal/x86/galign.go5
-rw-r--r--src/cmd/compile/internal/x86/ssa.go1020
-rw-r--r--src/cmd/internal/obj/arm/a.out.go2
-rw-r--r--src/cmd/internal/obj/arm/anames.go2
-rw-r--r--src/cmd/internal/obj/arm/asm5.go8
-rw-r--r--src/cmd/internal/obj/arm/obj5.go4
-rw-r--r--src/cmd/internal/obj/arm64/a.out.go8
-rw-r--r--src/cmd/internal/obj/arm64/anames7.go1
-rw-r--r--src/cmd/internal/obj/arm64/asm7.go358
-rw-r--r--src/cmd/internal/obj/arm64/obj7.go30
-rw-r--r--src/cmd/internal/obj/link.go6
-rw-r--r--src/cmd/internal/obj/ppc64/a.out.go15
-rw-r--r--src/cmd/internal/obj/ppc64/anames.go2
-rw-r--r--src/cmd/internal/obj/ppc64/asm9.go18
-rw-r--r--src/cmd/internal/obj/util.go25
-rw-r--r--src/cmd/internal/obj/x86/asm6.go23
-rw-r--r--src/cmd/internal/obj/x86/obj6.go31
-rw-r--r--src/cmd/link/internal/x86/asm.go44
-rw-r--r--src/runtime/asm_386.s22
-rw-r--r--src/runtime/mgc.go9
-rw-r--r--src/runtime/runtime1.go1
-rw-r--r--src/runtime/softfloat_arm.go35
-rw-r--r--src/runtime/vlrt.go14
-rw-r--r--test/live.go2
-rw-r--r--test/live_ssa.go2
-rw-r--r--test/nilptr3.go2
-rw-r--r--test/nilptr3_ssa.go4
-rw-r--r--test/sliceopt.go2
87 files changed, 87569 insertions, 6489 deletions
diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go
index eafc8a361e..a8d8f5f34f 100644
--- a/src/cmd/asm/internal/asm/operand_test.go
+++ b/src/cmd/asm/internal/asm/operand_test.go
@@ -17,6 +17,7 @@ import (
func setArch(goarch string) (*arch.Arch, *obj.Link) {
os.Setenv("GOOS", "linux") // obj can handle this OS for all architectures.
+ os.Setenv("GOARCH", goarch)
architecture := arch.Set(goarch)
if architecture == nil {
panic("asm: unrecognized architecture " + goarch)
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 0350c295ec..688025753c 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -239,89 +239,87 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1]))
- case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW,
- ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU,
- ssa.OpAMD64MODQ, ssa.OpAMD64MODL, ssa.OpAMD64MODW,
- ssa.OpAMD64MODQU, ssa.OpAMD64MODLU, ssa.OpAMD64MODWU:
+ case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := gc.SSARegNum(v.Args[1])
- // Arg[0] is already in AX as it's the only register we allow
- // and AX is the only output
- x := gc.SSARegNum(v.Args[1])
-
- // CPU faults upon signed overflow, which occurs when most
- // negative int is divided by -1.
- var j *obj.Prog
- if v.Op == ssa.OpAMD64DIVQ || v.Op == ssa.OpAMD64DIVL ||
- v.Op == ssa.OpAMD64DIVW || v.Op == ssa.OpAMD64MODQ ||
- v.Op == ssa.OpAMD64MODL || v.Op == ssa.OpAMD64MODW {
-
- var c *obj.Prog
- switch v.Op {
- case ssa.OpAMD64DIVQ, ssa.OpAMD64MODQ:
- c = gc.Prog(x86.ACMPQ)
- j = gc.Prog(x86.AJEQ)
- // go ahead and sign extend to save doing it later
- gc.Prog(x86.ACQO)
-
- case ssa.OpAMD64DIVL, ssa.OpAMD64MODL:
- c = gc.Prog(x86.ACMPL)
- j = gc.Prog(x86.AJEQ)
- gc.Prog(x86.ACDQ)
+ // Zero extend dividend.
+ c := gc.Prog(x86.AXORL)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x86.REG_DX
+ c.To.Type = obj.TYPE_REG
+ c.To.Reg = x86.REG_DX
- case ssa.OpAMD64DIVW, ssa.OpAMD64MODW:
- c = gc.Prog(x86.ACMPW)
- j = gc.Prog(x86.AJEQ)
- gc.Prog(x86.ACWD)
- }
- c.From.Type = obj.TYPE_REG
- c.From.Reg = x
- c.To.Type = obj.TYPE_CONST
- c.To.Offset = -1
+ // Issue divide.
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
- j.To.Type = obj.TYPE_BRANCH
+ case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := gc.SSARegNum(v.Args[1])
+ // CPU faults upon signed overflow, which occurs when the most
+ // negative int is divided by -1. Handle divide by -1 as a special case.
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ c = gc.Prog(x86.ACMPQ)
+ case ssa.OpAMD64DIVL:
+ c = gc.Prog(x86.ACMPL)
+ case ssa.OpAMD64DIVW:
+ c = gc.Prog(x86.ACMPW)
}
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = r
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+ j1 := gc.Prog(x86.AJEQ)
+ j1.To.Type = obj.TYPE_BRANCH
- // for unsigned ints, we sign extend by setting DX = 0
- // signed ints were sign extended above
- if v.Op == ssa.OpAMD64DIVQU || v.Op == ssa.OpAMD64MODQU ||
- v.Op == ssa.OpAMD64DIVLU || v.Op == ssa.OpAMD64MODLU ||
- v.Op == ssa.OpAMD64DIVWU || v.Op == ssa.OpAMD64MODWU {
- c := gc.Prog(x86.AXORQ)
- c.From.Type = obj.TYPE_REG
- c.From.Reg = x86.REG_DX
- c.To.Type = obj.TYPE_REG
- c.To.Reg = x86.REG_DX
+ // Sign extend dividend.
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ gc.Prog(x86.ACQO)
+ case ssa.OpAMD64DIVL:
+ gc.Prog(x86.ACDQ)
+ case ssa.OpAMD64DIVW:
+ gc.Prog(x86.ACWD)
}
+ // Issue divide.
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
- p.From.Reg = x
+ p.From.Reg = r
- // signed division, rest of the check for -1 case
- if j != nil {
- j2 := gc.Prog(obj.AJMP)
- j2.To.Type = obj.TYPE_BRANCH
+ // Skip over -1 fixup code.
+ j2 := gc.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
- var n *obj.Prog
- if v.Op == ssa.OpAMD64DIVQ || v.Op == ssa.OpAMD64DIVL ||
- v.Op == ssa.OpAMD64DIVW {
- // n * -1 = -n
- n = gc.Prog(x86.ANEGQ)
- n.To.Type = obj.TYPE_REG
- n.To.Reg = x86.REG_AX
- } else {
- // n % -1 == 0
- n = gc.Prog(x86.AXORQ)
- n.From.Type = obj.TYPE_REG
- n.From.Reg = x86.REG_DX
- n.To.Type = obj.TYPE_REG
- n.To.Reg = x86.REG_DX
- }
+ // Issue -1 fixup code.
+ // n / -1 = -n
+ n1 := gc.Prog(x86.ANEGQ)
+ n1.To.Type = obj.TYPE_REG
+ n1.To.Reg = x86.REG_AX
- j.To.Val = n
- j2.To.Val = s.Pc()
- }
+ // n % -1 == 0
+ n2 := gc.Prog(x86.AXORL)
+ n2.From.Type = obj.TYPE_REG
+ n2.From.Reg = x86.REG_DX
+ n2.To.Type = obj.TYPE_REG
+ n2.To.Reg = x86.REG_DX
+
+ // TODO(khr): issue only the -1 fixup code we need.
+ // For instance, if only the quotient is used, no point in zeroing the remainder.
+
+ j1.To.Val = n1
+ j2.To.Val = s.Pc()
case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULW, ssa.OpAMD64HMULB,
ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU, ssa.OpAMD64HMULWU, ssa.OpAMD64HMULBU:
@@ -500,8 +498,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
- case ssa.OpAMD64LEAQ:
- p := gc.Prog(x86.ALEAQ)
+ case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL:
+ p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
@@ -705,7 +703,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
p.To.Offset = v.AuxInt
- case ssa.OpCopy, ssa.OpAMD64MOVQconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
+ case ssa.OpCopy, ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
if v.Type.IsMemory() {
return
}
@@ -754,27 +752,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Name = obj.NAME_AUTO
}
case ssa.OpPhi:
- // just check to make sure regalloc and stackalloc did it right
- if v.Type.IsMemory() {
- return
- }
- f := v.Block.Func
- loc := f.RegAlloc[v.ID]
- for _, a := range v.Args {
- if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
- v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
- }
- }
+ gc.CheckLoweredPhi(v)
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpAMD64LoweredGetClosurePtr:
- // Output is hardwired to DX only,
- // and DX contains the closure pointer on
- // closure entry, and this "instruction"
- // is scheduled to the very beginning
- // of the entry block.
+ // Closure pointer is DX.
+ gc.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
r := gc.SSARegNum(v)
// See the comments in cmd/internal/obj/x86/obj6.go
@@ -871,6 +856,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = gc.SSARegNum(v)
case ssa.OpSP, ssa.OpSB:
// nothing to do
+ case ssa.OpSelect0, ssa.OpSelect1:
+ // nothing to do
case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
diff --git a/src/cmd/compile/internal/arm/prog.go b/src/cmd/compile/internal/arm/prog.go
index f69548a30f..1cbaa2699d 100644
--- a/src/cmd/compile/internal/arm/prog.go
+++ b/src/cmd/compile/internal/arm/prog.go
@@ -79,6 +79,8 @@ var progtable = [arm.ALAST & obj.AMask]obj.ProgInfo{
arm.AMULF & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
arm.ASUBD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
arm.ASUBF & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
+ arm.ANEGD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
+ arm.ANEGF & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
arm.ASQRTD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
// Conversions.
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 8f466e338a..f16dc0f95f 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -5,6 +5,9 @@
package arm
import (
+ "fmt"
+ "math"
+
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
@@ -16,7 +19,145 @@ var ssaRegToReg = []int16{
arm.REG_R1,
arm.REG_R2,
arm.REG_R3,
+ arm.REG_R4,
+ arm.REG_R5,
+ arm.REG_R6,
+ arm.REG_R7,
+ arm.REG_R8,
+ arm.REG_R9,
+ arm.REGG, // aka R10
+ arm.REG_R11,
+ arm.REG_R12,
arm.REGSP, // aka R13
+ arm.REG_R14,
+ arm.REG_R15,
+
+ arm.REG_F0,
+ arm.REG_F1,
+ arm.REG_F2,
+ arm.REG_F3,
+ arm.REG_F4,
+ arm.REG_F5,
+ arm.REG_F6,
+ arm.REG_F7,
+ arm.REG_F8,
+ arm.REG_F9,
+ arm.REG_F10,
+ arm.REG_F11,
+ arm.REG_F12,
+ arm.REG_F13,
+ arm.REG_F14,
+ arm.REG_F15,
+
+ arm.REG_CPSR, // flag
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+// Smallest possible faulting page at address zero,
+// see ../../../../runtime/internal/sys/arch_arm.go
+const minZeroPage = 4096
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm.AMOVB
+ } else {
+ return arm.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm.AMOVH
+ } else {
+ return arm.AMOVHU
+ }
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm.AMOVB
+ case 2:
+ return arm.AMOVH
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad store type")
+}
+
+// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
+type shift int64
+
+// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
+func (v shift) String() string {
+ op := "<<>>->@>"[((v>>5)&3)<<1:]
+ if v&(1<<4) != 0 {
+ // register shift
+ return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ // constant shift
+ return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+}
+
+// makeshift encodes a register shifted by a constant
+func makeshift(reg int16, typ int64, s int64) shift {
+ return shift(int64(reg&0xf) | typ | (s&31)<<7)
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by s)
+func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeshift(r1, typ, s))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// makeregshift encodes a register shifted by a register
+func makeregshift(r1 int16, typ int64, r2 int16) shift {
+ return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
+}
+
+// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
+func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeregshift(r1, typ, r2))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
@@ -26,12 +167,44 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// memory arg needs no code
case ssa.OpArg:
// input args need no code
- case ssa.OpSP, ssa.OpSB:
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ if x == y {
+ return
+ }
+ as := arm.AMOVW
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm.AMOVF
+ case 8:
+ as = arm.AMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARMMOVWnop:
+ if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
// nothing to do
- case ssa.OpCopy:
case ssa.OpLoadReg:
- // TODO: by type
- p := gc.Prog(arm.AMOVW)
+ if v.Type.IsFlags() {
+ v.Unimplementedf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(loadByType(v.Type))
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
@@ -45,10 +218,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
-
+ case ssa.OpPhi:
+ gc.CheckLoweredPhi(v)
case ssa.OpStoreReg:
- // TODO: by type
- p := gc.Prog(arm.AMOVW)
+ if v.Type.IsFlags() {
+ v.Unimplementedf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
n, off := gc.AutoVar(v)
@@ -62,55 +239,483 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else {
p.To.Name = obj.NAME_AUTO
}
- case ssa.OpARMADD:
+ case ssa.OpARMDIV,
+ ssa.OpARMDIVU,
+ ssa.OpARMMOD,
+ ssa.OpARMMODU:
+ // Note: for software division the assembler rewrite these
+ // instructions to sequence of instructions:
+ // - it puts numerator in R11 and denominator in g.m.divmod
+ // and call (say) _udiv
+ // - _udiv saves R0-R3 on stack and call udiv, restores R0-R3
+ // before return
+ // - udiv does the actual work
+ //TODO: set approperiate regmasks and call udiv directly?
+ // need to be careful for negative case
+ // Or, as soft div is already expensive, we don't care?
+ fallthrough
+ case ssa.OpARMADD,
+ ssa.OpARMADC,
+ ssa.OpARMSUB,
+ ssa.OpARMSBC,
+ ssa.OpARMRSB,
+ ssa.OpARMAND,
+ ssa.OpARMOR,
+ ssa.OpARMXOR,
+ ssa.OpARMBIC,
+ ssa.OpARMMUL,
+ ssa.OpARMADDF,
+ ssa.OpARMADDD,
+ ssa.OpARMSUBF,
+ ssa.OpARMSUBD,
+ ssa.OpARMMULF,
+ ssa.OpARMMULD,
+ ssa.OpARMDIVF,
+ ssa.OpARMDIVD:
r := gc.SSARegNum(v)
r1 := gc.SSARegNum(v.Args[0])
r2 := gc.SSARegNum(v.Args[1])
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
- p.From.Reg = r1
- p.Reg = r2
+ p.From.Reg = r2
+ p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- case ssa.OpARMADDconst:
+ case ssa.OpARMADDS,
+ ssa.OpARMSUBS:
+ r := gc.SSARegNum1(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSLL,
+ ssa.OpARMSRL,
+ ssa.OpARMSRA:
+ r := gc.SSARegNum(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSRAcond:
+ // ARM shift instructions uses only the low-order byte of the shift amount
+ // generate conditional instructions to deal with large shifts
+ // flag is already set
+ // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit
+ // SRA.LO Rarg1, Rarg0, Rdst
+ r := gc.SSARegNum(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 31
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p = gc.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_LO
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMADDconst,
+ ssa.OpARMADCconst,
+ ssa.OpARMSUBconst,
+ ssa.OpARMSBCconst,
+ ssa.OpARMRSBconst,
+ ssa.OpARMRSCconst,
+ ssa.OpARMANDconst,
+ ssa.OpARMORconst,
+ ssa.OpARMXORconst,
+ ssa.OpARMBICconst,
+ ssa.OpARMSLLconst,
+ ssa.OpARMSRLconst,
+ ssa.OpARMSRAconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
- if v.Aux != nil {
- panic("can't handle symbolic constant yet")
- }
p.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARMADDSconst,
+ ssa.OpARMSUBSconst,
+ ssa.OpARMRSBSconst:
+ p := gc.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum1(v)
+ case ssa.OpARMSRRconst:
+ genshift(arm.AMOVW, 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMADDshiftLL,
+ ssa.OpARMADCshiftLL,
+ ssa.OpARMSUBshiftLL,
+ ssa.OpARMSBCshiftLL,
+ ssa.OpARMRSBshiftLL,
+ ssa.OpARMRSCshiftLL,
+ ssa.OpARMANDshiftLL,
+ ssa.OpARMORshiftLL,
+ ssa.OpARMXORshiftLL,
+ ssa.OpARMBICshiftLL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMADDSshiftLL,
+ ssa.OpARMSUBSshiftLL,
+ ssa.OpARMRSBSshiftLL:
+ p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_LL, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRL,
+ ssa.OpARMADCshiftRL,
+ ssa.OpARMSUBshiftRL,
+ ssa.OpARMSBCshiftRL,
+ ssa.OpARMRSBshiftRL,
+ ssa.OpARMRSCshiftRL,
+ ssa.OpARMANDshiftRL,
+ ssa.OpARMORshiftRL,
+ ssa.OpARMXORshiftRL,
+ ssa.OpARMBICshiftRL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMADDSshiftRL,
+ ssa.OpARMSUBSshiftRL,
+ ssa.OpARMRSBSshiftRL:
+ p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_LR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRA,
+ ssa.OpARMADCshiftRA,
+ ssa.OpARMSUBshiftRA,
+ ssa.OpARMSBCshiftRA,
+ ssa.OpARMRSBshiftRA,
+ ssa.OpARMRSCshiftRA,
+ ssa.OpARMANDshiftRA,
+ ssa.OpARMORshiftRA,
+ ssa.OpARMXORshiftRA,
+ ssa.OpARMBICshiftRA:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMADDSshiftRA,
+ ssa.OpARMSUBSshiftRA,
+ ssa.OpARMRSBSshiftRA:
+ p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_AR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMMVNshiftLL:
+ genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMMVNshiftRL:
+ genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMMVNshiftRA:
+ genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMMVNshiftLLreg:
+ genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL)
+ case ssa.OpARMMVNshiftRLreg:
+ genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR)
+ case ssa.OpARMMVNshiftRAreg:
+ genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR)
+ case ssa.OpARMADDshiftLLreg,
+ ssa.OpARMADCshiftLLreg,
+ ssa.OpARMSUBshiftLLreg,
+ ssa.OpARMSBCshiftLLreg,
+ ssa.OpARMRSBshiftLLreg,
+ ssa.OpARMRSCshiftLLreg,
+ ssa.OpARMANDshiftLLreg,
+ ssa.OpARMORshiftLLreg,
+ ssa.OpARMXORshiftLLreg,
+ ssa.OpARMBICshiftLLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL)
+ case ssa.OpARMADDSshiftLLreg,
+ ssa.OpARMSUBSshiftLLreg,
+ ssa.OpARMRSBSshiftLLreg:
+ p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_LL)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRLreg,
+ ssa.OpARMADCshiftRLreg,
+ ssa.OpARMSUBshiftRLreg,
+ ssa.OpARMSBCshiftRLreg,
+ ssa.OpARMRSBshiftRLreg,
+ ssa.OpARMRSCshiftRLreg,
+ ssa.OpARMANDshiftRLreg,
+ ssa.OpARMORshiftRLreg,
+ ssa.OpARMXORshiftRLreg,
+ ssa.OpARMBICshiftRLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR)
+ case ssa.OpARMADDSshiftRLreg,
+ ssa.OpARMSUBSshiftRLreg,
+ ssa.OpARMRSBSshiftRLreg:
+ p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_LR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRAreg,
+ ssa.OpARMADCshiftRAreg,
+ ssa.OpARMSUBshiftRAreg,
+ ssa.OpARMSBCshiftRAreg,
+ ssa.OpARMRSBshiftRAreg,
+ ssa.OpARMRSCshiftRAreg,
+ ssa.OpARMANDshiftRAreg,
+ ssa.OpARMORshiftRAreg,
+ ssa.OpARMXORshiftRAreg,
+ ssa.OpARMBICshiftRAreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR)
+ case ssa.OpARMADDSshiftRAreg,
+ ssa.OpARMSUBSshiftRAreg,
+ ssa.OpARMRSBSshiftRAreg:
+ p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_AR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMHMUL,
+ ssa.OpARMHMULU:
+ // 32-bit high multiplication
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = gc.SSARegNum(v)
+ p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
+ case ssa.OpARMMULLU:
+ // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = gc.SSARegNum0(v) // high 32-bit
+ p.To.Offset = int64(gc.SSARegNum1(v)) // low 32-bit
+ case ssa.OpARMMULA:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_REGREG2
+ p.To.Reg = gc.SSARegNum(v) // result
+ p.To.Offset = int64(gc.SSARegNum(v.Args[2])) // addend
case ssa.OpARMMOVWconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
- case ssa.OpARMCMP:
+ case ssa.OpARMMOVFconst,
+ ssa.OpARMMOVDconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARMCMP,
+ ssa.OpARMCMN,
+ ssa.OpARMTST,
+ ssa.OpARMTEQ,
+ ssa.OpARMCMPF,
+ ssa.OpARMCMPD:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
// Special layout in ARM assembly
// Comparing to x86, the operands of ARM's CMP are reversed.
p.From.Reg = gc.SSARegNum(v.Args[1])
p.Reg = gc.SSARegNum(v.Args[0])
- case ssa.OpARMMOVWload:
+ case ssa.OpARMCMPconst,
+ ssa.OpARMCMNconst,
+ ssa.OpARMTSTconst,
+ ssa.OpARMTEQconst:
+ // Special layout in ARM assembly
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMCMPF0,
+ ssa.OpARMCMPD0:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMCMPshiftLL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMCMPshiftRL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMCMPshiftRA:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMCMPshiftLLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LL)
+ case ssa.OpARMCMPshiftRLreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LR)
+ case ssa.OpARMCMPshiftRAreg:
+ genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_AR)
+ case ssa.OpARMMOVWaddr:
+ p := gc.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *ssa.ExternSymbol:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *ssa.ArgSymbol, *ssa.AutoSymbol:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = arm.REGSP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
+ }
+
+ case ssa.OpARMMOVBload,
+ ssa.OpARMMOVBUload,
+ ssa.OpARMMOVHload,
+ ssa.OpARMMOVHUload,
+ ssa.OpARMMOVWload,
+ ssa.OpARMMOVFload,
+ ssa.OpARMMOVDload:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
- case ssa.OpARMMOVWstore:
+ case ssa.OpARMMOVBstore,
+ ssa.OpARMMOVHstore,
+ ssa.OpARMMOVWstore,
+ ssa.OpARMMOVFstore,
+ ssa.OpARMMOVDstore:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.To, v)
+ case ssa.OpARMMOVWloadidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWloadshiftLL:
+ p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt)
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMMOVWloadshiftRL:
+ p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt)
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMMOVWloadshiftRA:
+ p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt)
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARMMOVWstoreidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWstoreshiftLL:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LL, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRL:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LR, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRA:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_AR, v.AuxInt))
+ case ssa.OpARMMOVBreg,
+ ssa.OpARMMOVBUreg,
+ ssa.OpARMMOVHreg,
+ ssa.OpARMMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
+ return
+ }
+ p := gc.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARMMVN,
+ ssa.OpARMSQRTD,
+ ssa.OpARMNEGF,
+ ssa.OpARMNEGD,
+ ssa.OpARMMOVWF,
+ ssa.OpARMMOVWD,
+ ssa.OpARMMOVFW,
+ ssa.OpARMMOVDW,
+ ssa.OpARMMOVFD,
+ ssa.OpARMMOVDF:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARMMOVWUF,
+ ssa.OpARMMOVWUD,
+ ssa.OpARMMOVFWU,
+ ssa.OpARMMOVDWU:
+ p := gc.Prog(v.Op.Asm())
+ p.Scond = arm.C_UBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARMCMOVWHSconst:
+ p := gc.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARMCMOVWLSconst:
+ p := gc.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_LS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
case ssa.OpARMCALLstatic:
- // TODO: deferreturn
+ if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an actual hardware NOP that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ ginsnop()
+ }
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
@@ -118,37 +723,346 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
+ case ssa.OpARMCALLclosure:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARMCALLdefer:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARMCALLgo:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Newproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARMCALLinter:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARMDUFFZERO:
+ p := gc.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMDUFFCOPY:
+ p := gc.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMLoweredNilCheck:
+ // Optimization - if the subsequent block has a load or store
+ // at the same address, we don't need to issue this instruction.
+ mem := v.Args[1]
+ for _, w := range v.Block.Succs[0].Block().Values {
+ if w.Op == ssa.OpPhi {
+ if w.Type.IsMemory() {
+ mem = w
+ }
+ continue
+ }
+ if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
+ // w doesn't use a store - can't be a memory op.
+ continue
+ }
+ if w.Args[len(w.Args)-1] != mem {
+ v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
+ }
+ switch w.Op {
+ case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload,
+ ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload,
+ ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore,
+ ssa.OpARMMOVFstore, ssa.OpARMMOVDstore:
+ // arg0 is ptr, auxint is offset
+ if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero:
+ // arg0 is ptr
+ if w.Args[0] == v.Args[0] {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove:
+ // arg0 is dst ptr, arg1 is src ptr
+ if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ default:
+ }
+ if w.Type.IsMemory() {
+ if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
+ // these ops are OK
+ mem = w
+ continue
+ }
+ // We can't delay the nil check past the next store.
+ break
+ }
+ }
+ // Issue a load which will fault if arg is nil.
+ p := gc.Prog(arm.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
+ gc.Warnl(v.Line, "generated nil check")
+ }
+ case ssa.OpARMLoweredZero:
+ // MOVW.P Rarg2, 4(R1)
+ // CMP Rarg1, R1
+ // BLE -2(PC)
+ // arg1 is the address of the last element to zero
+ // arg2 is known to be zero
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := gc.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REG_R1
+ p.To.Offset = sz
+ p2 := gc.Prog(arm.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = gc.SSARegNum(v.Args[1])
+ p2.Reg = arm.REG_R1
+ p3 := gc.Prog(arm.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ case ssa.OpARMLoweredMove:
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP Rarg2, R1
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := gc.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm.REG_R1
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ p2 := gc.Prog(mov)
+ p2.Scond = arm.C_PBIT
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm.REG_R2
+ p2.To.Offset = sz
+ p3 := gc.Prog(arm.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = gc.SSARegNum(v.Args[2])
+ p3.Reg = arm.REG_R1
+ p4 := gc.Prog(arm.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
- case ssa.OpARMLessThan:
- v.Fatalf("pseudo-op made it to output: %s", v.LongString())
+ case ssa.OpKeepAlive:
+ if !v.Args[0].Type.IsPtrShaped() {
+ v.Fatalf("keeping non-pointer alive %v", v.Args[0])
+ }
+ n, off := gc.AutoVar(v.Args[0])
+ if n == nil {
+ v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
+ }
+ if off != 0 {
+ v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
+ }
+ gc.Gvarlive(n)
+ case ssa.OpARMEqual,
+ ssa.OpARMNotEqual,
+ ssa.OpARMLessThan,
+ ssa.OpARMLessEqual,
+ ssa.OpARMGreaterThan,
+ ssa.OpARMGreaterEqual,
+ ssa.OpARMLessThanU,
+ ssa.OpARMLessEqualU,
+ ssa.OpARMGreaterThanU,
+ ssa.OpARMGreaterEqualU:
+ // generate boolean values
+ // use conditional move
+ p := gc.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ p = gc.Prog(arm.AMOVW)
+ p.Scond = condBits[v.Op]
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpSelect0, ssa.OpSelect1:
+ // nothing to do
+ case ssa.OpARMLoweredGetClosurePtr:
+ // Closure pointer is R7 (arm.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARMFlagEQ,
+ ssa.OpARMFlagLT_ULT,
+ ssa.OpARMFlagLT_UGT,
+ ssa.OpARMFlagGT_ULT,
+ ssa.OpARMFlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpARMInvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
default:
v.Unimplementedf("genValue not implemented: %s", v.LongString())
}
}
+var condBits = map[ssa.Op]uint8{
+ ssa.OpARMEqual: arm.C_SCOND_EQ,
+ ssa.OpARMNotEqual: arm.C_SCOND_NE,
+ ssa.OpARMLessThan: arm.C_SCOND_LT,
+ ssa.OpARMLessThanU: arm.C_SCOND_LO,
+ ssa.OpARMLessEqual: arm.C_SCOND_LE,
+ ssa.OpARMLessEqualU: arm.C_SCOND_LS,
+ ssa.OpARMGreaterThan: arm.C_SCOND_GT,
+ ssa.OpARMGreaterThanU: arm.C_SCOND_HI,
+ ssa.OpARMGreaterEqual: arm.C_SCOND_GE,
+ ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE},
+ ssa.BlockARMNE: {arm.ABNE, arm.ABEQ},
+ ssa.BlockARMLT: {arm.ABLT, arm.ABGE},
+ ssa.BlockARMGE: {arm.ABGE, arm.ABLT},
+ ssa.BlockARMLE: {arm.ABLE, arm.ABGT},
+ ssa.BlockARMGT: {arm.ABGT, arm.ABLE},
+ ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
+ ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
+ ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
+ ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
+}
+
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line)
switch b.Kind {
- case ssa.BlockCall:
+ case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
- case ssa.BlockRet:
- gc.Prog(obj.ARET)
- case ssa.BlockARMLT:
- p := gc.Prog(arm.ABLT)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- p = gc.Prog(obj.AJMP)
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := gc.Prog(arm.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm.REG_R0
+ p = gc.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit:
+ gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+
+ case ssa.BlockRet:
+ gc.Prog(obj.ARET)
+
+ case ssa.BlockRetJmp:
+ p := gc.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
+
+ case ssa.BlockARMEQ, ssa.BlockARMNE,
+ ssa.BlockARMLT, ssa.BlockARMGE,
+ ssa.BlockARMLE, ssa.BlockARMGT,
+ ssa.BlockARMULT, ssa.BlockARMUGT,
+ ssa.BlockARMULE, ssa.BlockARMUGE:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = gc.Prog(jmp.invasm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ case b.Succs[1].Block():
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ default:
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ q := gc.Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
+
+ default:
+ b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
}
}
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index 7acc4e08eb..12f9b0515f 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -6,6 +6,7 @@ package arm64
import (
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
"cmd/internal/obj/arm64"
)
@@ -61,6 +62,11 @@ func Main() {
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
+ gc.Thearch.SSARegToReg = ssaRegToReg
+ gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ gc.Thearch.SSAGenValue = ssaGenValue
+ gc.Thearch.SSAGenBlock = ssaGenBlock
+
gc.Main()
gc.Exit(0)
}
diff --git a/src/cmd/compile/internal/arm64/prog.go b/src/cmd/compile/internal/arm64/prog.go
index d504d0f0ee..af43163ece 100644
--- a/src/cmd/compile/internal/arm64/prog.go
+++ b/src/cmd/compile/internal/arm64/prog.go
@@ -44,24 +44,37 @@ var progtable = [arm64.ALAST & obj.AMask]obj.ProgInfo{
// Integer
arm64.AADD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.ASUB & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- arm64.ANEG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.ANEG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, // why RegRead? revisit once the old backend gone
arm64.AAND & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.AORR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.AEOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.ABIC & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AMVN & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite},
arm64.AMUL & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AMULW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.ASMULL & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.AUMULL & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
- arm64.ASMULH & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
- arm64.AUMULH & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.ASMULH & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AUMULH & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.ASDIV & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.AUDIV & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.ASDIVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AUDIVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AREM & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AUREM & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AREMW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.AUREMW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.ALSL & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.ALSR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.AASR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
+ arm64.ACMPW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
arm64.AADC & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite | gc.UseCarry},
arm64.AROR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ arm64.ARORW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
arm64.AADDS & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite | gc.SetCarry},
+ arm64.ACSET & obj.AMask: {Flags: gc.SizeQ | gc.RightWrite},
+ arm64.ACSEL & obj.AMask: {Flags: gc.SizeQ | gc.RegRead | gc.RightWrite},
// Floating point.
arm64.AFADDD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
new file mode 100644
index 0000000000..1f96909716
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -0,0 +1,865 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+)
+
+var ssaRegToReg = []int16{
+ arm64.REG_R0,
+ arm64.REG_R1,
+ arm64.REG_R2,
+ arm64.REG_R3,
+ arm64.REG_R4,
+ arm64.REG_R5,
+ arm64.REG_R6,
+ arm64.REG_R7,
+ arm64.REG_R8,
+ arm64.REG_R9,
+ arm64.REG_R10,
+ arm64.REG_R11,
+ arm64.REG_R12,
+ arm64.REG_R13,
+ arm64.REG_R14,
+ arm64.REG_R15,
+ arm64.REG_R16,
+ arm64.REG_R17,
+ arm64.REG_R18, // platform register, not used
+ arm64.REG_R19,
+ arm64.REG_R20,
+ arm64.REG_R21,
+ arm64.REG_R22,
+ arm64.REG_R23,
+ arm64.REG_R24,
+ arm64.REG_R25,
+ arm64.REG_R26,
+ // R27 = REGTMP not used in regalloc
+ arm64.REGG, // R28
+ arm64.REG_R29, // frame pointer, not used
+ // R30 = REGLINK not used in regalloc
+ arm64.REGSP, // R31
+
+ arm64.REG_F0,
+ arm64.REG_F1,
+ arm64.REG_F2,
+ arm64.REG_F3,
+ arm64.REG_F4,
+ arm64.REG_F5,
+ arm64.REG_F6,
+ arm64.REG_F7,
+ arm64.REG_F8,
+ arm64.REG_F9,
+ arm64.REG_F10,
+ arm64.REG_F11,
+ arm64.REG_F12,
+ arm64.REG_F13,
+ arm64.REG_F14,
+ arm64.REG_F15,
+ arm64.REG_F16,
+ arm64.REG_F17,
+ arm64.REG_F18,
+ arm64.REG_F19,
+ arm64.REG_F20,
+ arm64.REG_F21,
+ arm64.REG_F22,
+ arm64.REG_F23,
+ arm64.REG_F24,
+ arm64.REG_F25,
+ arm64.REG_F26,
+ arm64.REG_F27,
+ arm64.REG_F28,
+ arm64.REG_F29,
+ arm64.REG_F30,
+ arm64.REG_F31,
+
+ arm64.REG_NZCV, // flag
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+// Smallest possible faulting page at address zero,
+// see ../../../../runtime/mheap.go:/minPhysPageSize
+const minZeroPage = 4096
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm64.AMOVB
+ } else {
+ return arm64.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm64.AMOVH
+ } else {
+ return arm64.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return arm64.AMOVW
+ } else {
+ return arm64.AMOVWU
+ }
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm64.AMOVB
+ case 2:
+ return arm64.AMOVH
+ case 4:
+ return arm64.AMOVW
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// makeshift encodes a register shifted by a constant, used as an Offset in Prog
+func makeshift(reg int16, typ int64, s int64) int64 {
+ return int64(reg&31)<<16 | typ | (s&63)<<10
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by s)
+func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = makeshift(r1, typ, s)
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ s.SetLineno(v.Line)
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpCopy, ssa.OpARM64MOVDconvert, ssa.OpARM64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ if x == y {
+ return
+ }
+ as := arm64.AMOVD
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm64.AFMOVS
+ case 8:
+ as = arm64.AFMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARM64MOVDnop:
+ if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(loadByType(v.Type))
+ n, off := gc.AutoVar(v.Args[0])
+ p.From.Type = obj.TYPE_MEM
+ p.From.Node = n
+ p.From.Sym = gc.Linksym(n.Sym)
+ p.From.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.From.Name = obj.NAME_PARAM
+ p.From.Offset += n.Xoffset
+ } else {
+ p.From.Name = obj.NAME_AUTO
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpPhi:
+ gc.CheckLoweredPhi(v)
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ n, off := gc.AutoVar(v)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Node = n
+ p.To.Sym = gc.Linksym(n.Sym)
+ p.To.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.To.Name = obj.NAME_PARAM
+ p.To.Offset += n.Xoffset
+ } else {
+ p.To.Name = obj.NAME_AUTO
+ }
+ case ssa.OpARM64ADD,
+ ssa.OpARM64SUB,
+ ssa.OpARM64AND,
+ ssa.OpARM64OR,
+ ssa.OpARM64XOR,
+ ssa.OpARM64BIC,
+ ssa.OpARM64MUL,
+ ssa.OpARM64MULW,
+ ssa.OpARM64MULH,
+ ssa.OpARM64UMULH,
+ ssa.OpARM64MULL,
+ ssa.OpARM64UMULL,
+ ssa.OpARM64DIV,
+ ssa.OpARM64UDIV,
+ ssa.OpARM64DIVW,
+ ssa.OpARM64UDIVW,
+ ssa.OpARM64MOD,
+ ssa.OpARM64UMOD,
+ ssa.OpARM64MODW,
+ ssa.OpARM64UMODW,
+ ssa.OpARM64SLL,
+ ssa.OpARM64SRL,
+ ssa.OpARM64SRA,
+ ssa.OpARM64FADDS,
+ ssa.OpARM64FADDD,
+ ssa.OpARM64FSUBS,
+ ssa.OpARM64FSUBD,
+ ssa.OpARM64FMULS,
+ ssa.OpARM64FMULD,
+ ssa.OpARM64FDIVS,
+ ssa.OpARM64FDIVD:
+ r := gc.SSARegNum(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64ADDconst,
+ ssa.OpARM64SUBconst,
+ ssa.OpARM64ANDconst,
+ ssa.OpARM64ORconst,
+ ssa.OpARM64XORconst,
+ ssa.OpARM64BICconst,
+ ssa.OpARM64SLLconst,
+ ssa.OpARM64SRLconst,
+ ssa.OpARM64SRAconst,
+ ssa.OpARM64RORconst,
+ ssa.OpARM64RORWconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64ADDshiftLL,
+ ssa.OpARM64SUBshiftLL,
+ ssa.OpARM64ANDshiftLL,
+ ssa.OpARM64ORshiftLL,
+ ssa.OpARM64XORshiftLL,
+ ssa.OpARM64BICshiftLL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64ADDshiftRL,
+ ssa.OpARM64SUBshiftRL,
+ ssa.OpARM64ANDshiftRL,
+ ssa.OpARM64ORshiftRL,
+ ssa.OpARM64XORshiftRL,
+ ssa.OpARM64BICshiftRL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64ADDshiftRA,
+ ssa.OpARM64SUBshiftRA,
+ ssa.OpARM64ANDshiftRA,
+ ssa.OpARM64ORshiftRA,
+ ssa.OpARM64XORshiftRA,
+ ssa.OpARM64BICshiftRA:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64MOVDconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64FMOVSconst,
+ ssa.OpARM64FMOVDconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64CMP,
+ ssa.OpARM64CMPW,
+ ssa.OpARM64CMN,
+ ssa.OpARM64CMNW,
+ ssa.OpARM64FCMPS,
+ ssa.OpARM64FCMPD:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARM64CMPconst,
+ ssa.OpARM64CMPWconst,
+ ssa.OpARM64CMNconst,
+ ssa.OpARM64CMNWconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARM64CMPshiftLL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64CMPshiftRL:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64CMPshiftRA:
+ genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64MOVDaddr:
+ p := gc.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ var wantreg string
+ // MOVD $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *ssa.ExternSymbol:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *ssa.ArgSymbol, *ssa.AutoSymbol:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVD $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = arm64.REGSP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
+ }
+ case ssa.OpARM64MOVBload,
+ ssa.OpARM64MOVBUload,
+ ssa.OpARM64MOVHload,
+ ssa.OpARM64MOVHUload,
+ ssa.OpARM64MOVWload,
+ ssa.OpARM64MOVWUload,
+ ssa.OpARM64MOVDload,
+ ssa.OpARM64FMOVSload,
+ ssa.OpARM64FMOVDload:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64MOVBstore,
+ ssa.OpARM64MOVHstore,
+ ssa.OpARM64MOVWstore,
+ ssa.OpARM64MOVDstore,
+ ssa.OpARM64FMOVSstore,
+ ssa.OpARM64FMOVDstore:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstorezero,
+ ssa.OpARM64MOVHstorezero,
+ ssa.OpARM64MOVWstorezero,
+ ssa.OpARM64MOVDstorezero:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBreg,
+ ssa.OpARM64MOVBUreg,
+ ssa.OpARM64MOVHreg,
+ ssa.OpARM64MOVHUreg,
+ ssa.OpARM64MOVWreg,
+ ssa.OpARM64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
+ return
+ }
+ p := gc.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARM64MVN,
+ ssa.OpARM64NEG,
+ ssa.OpARM64FNEGS,
+ ssa.OpARM64FNEGD,
+ ssa.OpARM64FSQRTD,
+ ssa.OpARM64FCVTZSSW,
+ ssa.OpARM64FCVTZSDW,
+ ssa.OpARM64FCVTZUSW,
+ ssa.OpARM64FCVTZUDW,
+ ssa.OpARM64FCVTZSS,
+ ssa.OpARM64FCVTZSD,
+ ssa.OpARM64FCVTZUS,
+ ssa.OpARM64FCVTZUD,
+ ssa.OpARM64SCVTFWS,
+ ssa.OpARM64SCVTFWD,
+ ssa.OpARM64SCVTFS,
+ ssa.OpARM64SCVTFD,
+ ssa.OpARM64UCVTFWS,
+ ssa.OpARM64UCVTFWD,
+ ssa.OpARM64UCVTFS,
+ ssa.OpARM64UCVTFD,
+ ssa.OpARM64FCVTSD,
+ ssa.OpARM64FCVTDS:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64CSELULT,
+ ssa.OpARM64CSELULT0:
+ r1 := int16(arm64.REGZERO)
+ if v.Op == ssa.OpARM64CSELULT {
+ r1 = gc.SSARegNum(v.Args[1])
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = arm64.COND_LO
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r1}
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64DUFFZERO:
+ // runtime.duffzero expects start address - 8 in R16
+ p := gc.Prog(arm64.ASUB)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REG_R16
+ p = gc.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredZero:
+ // MOVD.P ZR, 8(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // arg1 is the address of the last element to zero
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = arm64.AMOVD
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm64.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm64.AMOVH
+ default:
+ sz = 1
+ mov = arm64.AMOVB
+ }
+ p := gc.Prog(mov)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REG_R16
+ p.To.Offset = sz
+ p2 := gc.Prog(arm64.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = gc.SSARegNum(v.Args[1])
+ p2.Reg = arm64.REG_R16
+ p3 := gc.Prog(arm64.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ case ssa.OpARM64LoweredMove:
+ // MOVD.P 8(R16), Rtmp
+ // MOVD.P Rtmp, 8(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = arm64.AMOVD
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm64.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm64.AMOVH
+ default:
+ sz = 1
+ mov = arm64.AMOVB
+ }
+ p := gc.Prog(mov)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm64.REG_R16
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p2 := gc.Prog(mov)
+ p2.Scond = arm64.C_XPOST
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm64.REG_R17
+ p2.To.Offset = sz
+ p3 := gc.Prog(arm64.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = gc.SSARegNum(v.Args[2])
+ p3.Reg = arm64.REG_R16
+ p4 := gc.Prog(arm64.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
+ case ssa.OpARM64CALLstatic:
+ if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an actual hardware NOP that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ ginsnop()
+ }
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLclosure:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLdefer:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLgo:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Newproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLinter:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64LoweredNilCheck:
+ // Optimization - if the subsequent block has a load or store
+ // at the same address, we don't need to issue this instruction.
+ mem := v.Args[1]
+ for _, w := range v.Block.Succs[0].Block().Values {
+ if w.Op == ssa.OpPhi {
+ if w.Type.IsMemory() {
+ mem = w
+ }
+ continue
+ }
+ if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
+ // w doesn't use a store - can't be a memory op.
+ continue
+ }
+ if w.Args[len(w.Args)-1] != mem {
+ v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
+ }
+ switch w.Op {
+ case ssa.OpARM64MOVBload, ssa.OpARM64MOVBUload, ssa.OpARM64MOVHload, ssa.OpARM64MOVHUload,
+ ssa.OpARM64MOVWload, ssa.OpARM64MOVWUload, ssa.OpARM64MOVDload,
+ ssa.OpARM64FMOVSload, ssa.OpARM64FMOVDload,
+ ssa.OpARM64MOVBstore, ssa.OpARM64MOVHstore, ssa.OpARM64MOVWstore, ssa.OpARM64MOVDstore,
+ ssa.OpARM64FMOVSstore, ssa.OpARM64FMOVDstore:
+ // arg0 is ptr, auxint is offset
+ if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ case ssa.OpARM64DUFFZERO, ssa.OpARM64LoweredZero:
+ // arg0 is ptr
+ if w.Args[0] == v.Args[0] {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ case ssa.OpARM64LoweredMove:
+ // arg0 is dst ptr, arg1 is src ptr
+ if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ default:
+ }
+ if w.Type.IsMemory() {
+ if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
+ // these ops are OK
+ mem = w
+ continue
+ }
+ // We can't delay the nil check past the next store.
+ break
+ }
+ }
+ // Issue a load which will fault if arg is nil.
+ p := gc.Prog(arm64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
+ gc.Warnl(v.Line, "generated nil check")
+ }
+ case ssa.OpVarDef:
+ gc.Gvardef(v.Aux.(*gc.Node))
+ case ssa.OpVarKill:
+ gc.Gvarkill(v.Aux.(*gc.Node))
+ case ssa.OpVarLive:
+ gc.Gvarlive(v.Aux.(*gc.Node))
+ case ssa.OpKeepAlive:
+ if !v.Args[0].Type.IsPtrShaped() {
+ v.Fatalf("keeping non-pointer alive %v", v.Args[0])
+ }
+ n, off := gc.AutoVar(v.Args[0])
+ if n == nil {
+ v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
+ }
+ if off != 0 {
+ v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
+ }
+ gc.Gvarlive(n)
+ case ssa.OpARM64Equal,
+ ssa.OpARM64NotEqual,
+ ssa.OpARM64LessThan,
+ ssa.OpARM64LessEqual,
+ ssa.OpARM64GreaterThan,
+ ssa.OpARM64GreaterEqual,
+ ssa.OpARM64LessThanU,
+ ssa.OpARM64LessEqualU,
+ ssa.OpARM64GreaterThanU,
+ ssa.OpARM64GreaterEqualU:
+ // generate boolean values using CSET
+ p := gc.Prog(arm64.ACSET)
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = condBits[v.Op]
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpSelect0, ssa.OpSelect1:
+ // nothing to do
+ case ssa.OpARM64LoweredGetClosurePtr:
+ // Closure pointer is R26 (arm64.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64FlagEQ,
+ ssa.OpARM64FlagLT_ULT,
+ ssa.OpARM64FlagLT_UGT,
+ ssa.OpARM64FlagGT_ULT,
+ ssa.OpARM64FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpARM64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ default:
+ v.Unimplementedf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]int16{
+ ssa.OpARM64Equal: arm64.COND_EQ,
+ ssa.OpARM64NotEqual: arm64.COND_NE,
+ ssa.OpARM64LessThan: arm64.COND_LT,
+ ssa.OpARM64LessThanU: arm64.COND_LO,
+ ssa.OpARM64LessEqual: arm64.COND_LE,
+ ssa.OpARM64LessEqualU: arm64.COND_LS,
+ ssa.OpARM64GreaterThan: arm64.COND_GT,
+ ssa.OpARM64GreaterThanU: arm64.COND_HI,
+ ssa.OpARM64GreaterEqual: arm64.COND_GE,
+ ssa.OpARM64GreaterEqualU: arm64.COND_HS,
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
+ ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
+ ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
+ ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
+ ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
+ ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
+ ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
+ ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ s.SetLineno(b.Line)
+
+ switch b.Kind {
+ case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := gc.Prog(arm64.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm64.REG_R0
+ p = gc.Prog(arm64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit:
+ gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+
+ case ssa.BlockRet:
+ gc.Prog(obj.ARET)
+
+ case ssa.BlockRetJmp:
+ p := gc.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
+
+ case ssa.BlockARM64EQ, ssa.BlockARM64NE,
+ ssa.BlockARM64LT, ssa.BlockARM64GE,
+ ssa.BlockARM64LE, ssa.BlockARM64GT,
+ ssa.BlockARM64ULT, ssa.BlockARM64UGT,
+ ssa.BlockARM64ULE, ssa.BlockARM64UGE:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = gc.Prog(jmp.invasm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ case b.Succs[1].Block():
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ default:
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ q := gc.Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
+
+ default:
+ b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index c1a641874e..9520870514 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -95,14 +95,15 @@ const runtimeimport = "" +
"4div\x00\x03\n\x00\n\x00\x01\n\x00\t\x11uint64div\x00\x03\x14\x00\x14\x00\x01\x14\x00\t\x0fint64" +
"mod\x00\x03\n\x00\n\x00\x01\n\x00\t\x11uint64mod\x00\x03\x14\x00\x14\x00\x01\x14\x00\t\x1bfloat6" +
"4toint64\x00\x01\x1a\x00\x01\n\x00\t\x1dfloat64touint64\x00\x01\x1a\x00\x01\x14\x00\t" +
- "\x1bint64tofloat64\x00\x01\n\x00\x01\x1a\x00\t\x1duint64tofloat64\x00" +
- "\x01\x14\x00\x01\x1a\x00\t\x19complex128div\x00\x04\x1e\vnum·2\x00\x00\x1e\vden·" +
- "3\x00\x00\x02\x1e\vquo·1\x00\x00\t\x19racefuncenter\x00\x01\x16d\x00\t\x17race" +
- "funcexit\x00\x00\x00\t\x0fraceread\x00\x01\x16d\x00\t\x11racewrite\x00\x01\x16" +
- "d\x00\t\x19racereadrange\x00\x04\x16\raddr·1\x00d\x16\rsize·2\x00" +
- "d\x00\t\x1bracewriterange\x00\x04\x16\x94\x03\x00d\x16\x96\x03\x00d\x00\t\x0fmsanrea" +
- "d\x00\x04\x16\x94\x03\x00d\x16\x96\x03\x00d\x00\t\x11msanwrite\x00\x04\x16\x94\x03\x00d\x16\x96\x03\x00d\x00\v\xf4" +
- "\x01\x02\v\x00\x01\x00\n$$\n"
+ "\x1dfloat64touint32\x00\x01\x1a\x00\x01\x12\x00\t\x1bint64tofloat64\x00" +
+ "\x01\n\x00\x01\x1a\x00\t\x1duint64tofloat64\x00\x01\x14\x00\x01\x1a\x00\t\x1duint32to" +
+ "float64\x00\x01\x12\x00\x01\x1a\x00\t\x19complex128div\x00\x04\x1e\vnum·2\x00" +
+ "\x00\x1e\vden·3\x00\x00\x02\x1e\vquo·1\x00\x00\t\x19racefuncenter\x00\x01\x16" +
+ "d\x00\t\x17racefuncexit\x00\x00\x00\t\x0fraceread\x00\x01\x16d\x00\t\x11race" +
+ "write\x00\x01\x16d\x00\t\x19racereadrange\x00\x04\x16\raddr·1\x00d\x16\r" +
+ "size·2\x00d\x00\t\x1bracewriterange\x00\x04\x16\x98\x03\x00d\x16\x9a\x03\x00d\x00\t" +
+ "\x0fmsanread\x00\x04\x16\x98\x03\x00d\x16\x9a\x03\x00d\x00\t\x11msanwrite\x00\x04\x16\x98\x03\x00d" +
+ "\x16\x9a\x03\x00d\x00\v\xf8\x01\x02\v\x00\x01\x00\n$$\n"
const unsafeimport = "" +
"cn\x00\x03v1\x01\vunsafe\x00\x05\r\rPointer\x00\x16\x00\t\x0fOffsetof\x00\x01" +
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index e9316cb313..ef7e408959 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -150,8 +150,10 @@ func int64mod(int64, int64) int64
func uint64mod(uint64, uint64) uint64
func float64toint64(float64) int64
func float64touint64(float64) uint64
+func float64touint32(float64) uint32
func int64tofloat64(int64) float64
func uint64tofloat64(uint64) float64
+func uint32tofloat64(uint32) float64
func complex128div(num complex128, den complex128) (quo complex128)
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 62ea44f776..997e972e0a 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -26,6 +26,9 @@ func initssa() *ssa.Config {
ssaExp.mustImplement = true
if ssaConfig == nil {
ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
+ if Thearch.LinkArch.Name == "386" {
+ ssaConfig.Set387(Thearch.Use387)
+ }
}
return ssaConfig
}
@@ -37,8 +40,8 @@ func shouldssa(fn *Node) bool {
if os.Getenv("SSATEST") == "" {
return false
}
+ case "amd64", "amd64p32", "arm", "386", "arm64":
// Generally available.
- case "amd64":
}
if !ssaEnabled {
return false
@@ -1146,6 +1149,7 @@ var opToSSA = map[opAndType]ssa.Op{
opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
opAndType{OEQ, TMAP}: ssa.OpEqPtr,
opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
+ opAndType{OEQ, TPTR32}: ssa.OpEqPtr,
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
@@ -1166,6 +1170,7 @@ var opToSSA = map[opAndType]ssa.Op{
opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
opAndType{ONE, TMAP}: ssa.OpNeqPtr,
opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
+ opAndType{ONE, TPTR32}: ssa.OpNeqPtr,
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
@@ -1330,6 +1335,15 @@ var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
}
+// this map is used only for 32-bit arch, and only includes the difference
+// on 32-bit arch, don't use int64<->float conversion for uint32
+var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
+ twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
+ twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
+ twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
+ twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
+}
+
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
@@ -1646,6 +1660,11 @@ func (s *state) expr(n *Node) *ssa.Value {
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
+ if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" {
+ if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
if !ok {
s.Fatalf("weird float conversion %s -> %s", ft, tt)
}
@@ -1954,7 +1973,7 @@ func (s *state) expr(n *Node) *ssa.Value {
case n.Left.Type.IsString():
a := s.expr(n.Left)
i := s.expr(n.Right)
- i = s.extendIndex(i)
+ i = s.extendIndex(i, Panicindex)
if !n.Bounded {
len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
s.boundsCheck(i, len)
@@ -2043,13 +2062,13 @@ func (s *state) expr(n *Node) *ssa.Value {
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
- i = s.extendIndex(s.expr(low))
+ i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
- j = s.extendIndex(s.expr(high))
+ j = s.extendIndex(s.expr(high), panicslice)
}
if max != nil {
- k = s.extendIndex(s.expr(max))
+ k = s.extendIndex(s.expr(max), panicslice)
}
p, l, c := s.slice(n.Left.Type, v, i, j, k)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
@@ -2059,10 +2078,10 @@ func (s *state) expr(n *Node) *ssa.Value {
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
- i = s.extendIndex(s.expr(low))
+ i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
- j = s.extendIndex(s.expr(high))
+ j = s.extendIndex(s.expr(high), panicslice)
}
p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue2(ssa.OpStringMake, n.Type, p, l)
@@ -2246,7 +2265,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
if haspointers(et) {
s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile)
} else {
- s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
+ s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(et), addr, arg.v, s.mem())
}
}
}
@@ -2379,14 +2398,14 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32,
if deref {
// Treat as a mem->mem move.
if right == nil {
- s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
+ s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, SizeAlignAuxInt(t), addr, s.mem())
return
}
if wb {
s.insertWBmove(t, addr, right, line, rightIsVolatile)
return
}
- s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
+ s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), addr, right, s.mem())
return
}
// Treat as a store.
@@ -2585,7 +2604,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
s.nilCheck(itab)
}
itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
- itab = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], itabidx, itab)
+ itab = s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
} else {
@@ -2608,16 +2627,18 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
if k != callNormal {
argStart += int64(2 * Widthptr)
}
- addr := s.entryNewValue1I(ssa.OpOffPtr, Types[TUINTPTR], argStart, s.sp)
+ addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart, s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem())
}
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
+ argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(Types[TUINT32], int32(stksize))
- s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, s.sp, argsize, s.mem())
- addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), int64(Widthptr), s.sp)
+ addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT32]), argStart, s.sp)
+ s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem())
+ addr = s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
@@ -2762,7 +2783,7 @@ func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) {
if n.Left.Type.IsSlice() {
a := s.expr(n.Left)
i := s.expr(n.Right)
- i = s.extendIndex(i)
+ i = s.extendIndex(i, Panicindex)
len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
if !n.Bounded {
s.boundsCheck(i, len)
@@ -2772,7 +2793,7 @@ func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) {
} else { // array
a, isVolatile := s.addr(n.Left, bounded)
i := s.expr(n.Right)
- i = s.extendIndex(i)
+ i = s.extendIndex(i, Panicindex)
len := s.constInt(Types[TINT], n.Left.Type.NumElem())
if !n.Bounded {
s.boundsCheck(i, len)
@@ -2913,12 +2934,11 @@ func (s *state) nilCheck(ptr *ssa.Value) {
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return.
+// idx is already converted to full int width.
func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
- // TODO: convert index to full width?
- // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
// bounds check
cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
@@ -2927,19 +2947,18 @@ func (s *state) boundsCheck(idx, len *ssa.Value) {
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return.
+// idx and len are already converted to full int width.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
- // TODO: convert index to full width?
- // TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
// bounds check
cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
s.check(cmp, panicslice)
}
-// If cmp (a bool) is true, panic using the given function.
+// If cmp (a bool) is false, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *Node) {
b := s.endBlock()
b.Kind = ssa.BlockIf
@@ -2969,19 +2988,23 @@ func (s *state) check(cmp *ssa.Value, fn *Node) {
// is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
- var off int64 // TODO: arch-dependent starting offset?
+ off := Ctxt.FixedFrameSize()
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
- ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
+ ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp)
}
size := t.Size()
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem())
off += size
}
off = Rnd(off, int64(Widthptr))
+ if Thearch.LinkArch.Name == "amd64p32" {
+ // amd64p32 wants 8-byte alignment of the start of the return values.
+ off = Rnd(off, 8)
+ }
// Issue call
call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
@@ -2992,7 +3015,7 @@ func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Val
if !returns {
b.Kind = ssa.BlockExit
b.SetControl(call)
- call.AuxInt = off
+ call.AuxInt = off - Ctxt.FixedFrameSize()
if len(results) > 0 {
Fatalf("panic call can't have results")
}
@@ -3015,7 +3038,7 @@ func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Val
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
- ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
+ ptr = s.newValue1I(ssa.OpOffPtr, Ptrto(t), off, s.sp)
}
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
off += t.Size()
@@ -3049,10 +3072,9 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI
aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
- // TODO: select the .enabled field. It is currently first, so not needed for now.
- // Load word, test byte, avoiding partial register write from load byte.
+ // Load word, test word, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
- flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
+ flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
@@ -3073,7 +3095,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI
tmp := temp(t)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
tmpaddr, _ := s.addr(tmp, true)
- s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), tmpaddr, right, s.mem())
+ s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), tmpaddr, right, s.mem())
// Issue typedmemmove call.
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr)
@@ -3083,7 +3105,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightI
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bElse)
- s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem())
+ s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), left, right, s.mem())
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd)
@@ -3117,10 +3139,9 @@ func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip
aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
- // TODO: select the .enabled field. It is currently first, so not needed for now.
- // Load word, test byte, avoiding partial register write from load byte.
+ // Load word, test word, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
- flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
+ flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
@@ -3930,6 +3951,11 @@ type SSAGenState struct {
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
+
+ // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
+ SSEto387 map[int16]int16
+ // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
+ ScratchFpMem *Node
}
// Pc returns the current Prog.
@@ -3966,6 +3992,13 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
blockProgs[Pc] = f.Blocks[0]
}
+ if Thearch.Use387 {
+ s.SSEto387 = map[int16]int16{}
+ }
+ if f.Config.NeedsFpScratch {
+ s.ScratchFpMem = temp(Types[TUINT64])
+ }
+
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = Pc
@@ -4128,12 +4161,25 @@ func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJ
}
}
+func AuxOffset(v *ssa.Value) (offset int64) {
+ if v.Aux == nil {
+ return 0
+ }
+ switch sym := v.Aux.(type) {
+
+ case *ssa.AutoSymbol:
+ n := sym.Node.(*Node)
+ return n.Xoffset
+ }
+ return 0
+}
+
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
- if a.Type != obj.TYPE_MEM {
+ if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
v.Fatalf("bad AddAux addr %v", a)
}
// add integer offset
@@ -4171,17 +4217,27 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
}
}
+// SizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t.
+func SizeAlignAuxInt(t *Type) int64 {
+ return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64()
+}
+
// extendIndex extends v to a full int width.
-func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
+// panic using the given function if v does not fit in an int (only on 32-bit archs).
+func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value {
size := v.Type.Size()
if size == s.config.IntSize {
return v
}
if size > s.config.IntSize {
- // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
- // the high word and branch to out-of-bounds failure if it is not 0.
- s.Unimplementedf("64->32 index truncation not implemented")
- return v
+ // truncate 64-bit indexes on 32-bit pointer archs. Test the
+ // high word and branch to out-of-bounds failure if it is not 0.
+ if Debug['B'] == 0 {
+ hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v)
+ cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0))
+ s.check(cmp, panicfn)
+ }
+ return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v)
}
// Extend value to the required size
@@ -4220,17 +4276,74 @@ func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
return s.newValue1(op, Types[TINT], v)
}
-// SSARegNum returns the register (in cmd/internal/obj numbering) to
-// which v has been allocated. Panics if v is not assigned to a
-// register.
-// TODO: Make this panic again once it stops happening routinely.
-func SSARegNum(v *ssa.Value) int16 {
+// SSAReg returns the register to which v has been allocated.
+func SSAReg(v *ssa.Value) *ssa.Register {
reg := v.Block.Func.RegAlloc[v.ID]
if reg == nil {
- v.Unimplementedf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func)
- return 0
+ v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*ssa.Register)
+}
+
+// SSAReg0 returns the register to which the first output of v has been allocated.
+func SSAReg0(v *ssa.Value) *ssa.Register {
+ reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[0]
+ if reg == nil {
+ v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*ssa.Register)
+}
+
+// SSAReg1 returns the register to which the second output of v has been allocated.
+func SSAReg1(v *ssa.Value) *ssa.Register {
+ reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[1]
+ if reg == nil {
+ v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*ssa.Register)
+}
+
+// SSARegNum returns the register number (in cmd/internal/obj numbering) to which v has been allocated.
+func SSARegNum(v *ssa.Value) int16 {
+ return Thearch.SSARegToReg[SSAReg(v).Num]
+}
+
+// SSARegNum0 returns the register number (in cmd/internal/obj numbering) to which the first output of v has been allocated.
+func SSARegNum0(v *ssa.Value) int16 {
+ return Thearch.SSARegToReg[SSAReg0(v).Num]
+}
+
+// SSARegNum1 returns the register number (in cmd/internal/obj numbering) to which the second output of v has been allocated.
+func SSARegNum1(v *ssa.Value) int16 {
+ return Thearch.SSARegToReg[SSAReg1(v).Num]
+}
+
+// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
+// Called during ssaGenValue.
+func CheckLoweredPhi(v *ssa.Value) {
+ if v.Op != ssa.OpPhi {
+ v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
+ }
+ if v.Type.IsMemory() {
+ return
+ }
+ f := v.Block.Func
+ loc := f.RegAlloc[v.ID]
+ for _, a := range v.Args {
+ if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
+ v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
+ }
+ }
+}
+
+// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
+// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
+// That register contains the closure pointer on closure entry.
+func CheckLoweredGetClosurePtr(v *ssa.Value) {
+ entry := v.Block.Func.Entry
+ if entry != v.Block || entry.Values[0] != v {
+ Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
- return Thearch.SSARegToReg[reg.(*ssa.Register).Num]
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
@@ -4372,6 +4485,25 @@ func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSl
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
}
+func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+ n := name.N.(*Node)
+ var t *Type
+ if name.Type.IsSigned() {
+ t = Types[TINT32]
+ } else {
+ t = Types[TUINT32]
+ }
+ if n.Class == PAUTO && !n.Addrtaken {
+ // Split this int64 up into two separate variables.
+ h := e.namedAuto(n.Sym.Name+".hi", t)
+ l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32])
+ return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
+ }
+ // Return the two parts of the larger variable.
+ // Assuming little endian (we don't support big endian 32-bit architecture yet)
+ return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
+}
+
func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
diff --git a/src/cmd/compile/internal/gc/testdata/arith_ssa.go b/src/cmd/compile/internal/gc/testdata/arith_ssa.go
index 7c82bbd6ce..d850ce27b2 100644
--- a/src/cmd/compile/internal/gc/testdata/arith_ssa.go
+++ b/src/cmd/compile/internal/gc/testdata/arith_ssa.go
@@ -553,6 +553,445 @@ func testOrPhi() {
}
}
+//go:noinline
+func addshiftLL_ssa(a, b uint32) uint32 {
+ return a + b<<3
+}
+
+//go:noinline
+func subshiftLL_ssa(a, b uint32) uint32 {
+ return a - b<<3
+}
+
+//go:noinline
+func rsbshiftLL_ssa(a, b uint32) uint32 {
+ return a<<3 - b
+}
+
+//go:noinline
+func andshiftLL_ssa(a, b uint32) uint32 {
+ return a & (b << 3)
+}
+
+//go:noinline
+func orshiftLL_ssa(a, b uint32) uint32 {
+ return a | b<<3
+}
+
+//go:noinline
+func xorshiftLL_ssa(a, b uint32) uint32 {
+ return a ^ b<<3
+}
+
+//go:noinline
+func bicshiftLL_ssa(a, b uint32) uint32 {
+ return a &^ (b << 3)
+}
+
+//go:noinline
+func notshiftLL_ssa(a uint32) uint32 {
+ return ^(a << 3)
+}
+
+//go:noinline
+func addshiftRL_ssa(a, b uint32) uint32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRL_ssa(a, b uint32) uint32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRL_ssa(a, b uint32) uint32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRL_ssa(a, b uint32) uint32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRL_ssa(a, b uint32) uint32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRL_ssa(a, b uint32) uint32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRL_ssa(a, b uint32) uint32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRL_ssa(a uint32) uint32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftRA_ssa(a, b int32) int32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRA_ssa(a, b int32) int32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRA_ssa(a, b int32) int32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRA_ssa(a, b int32) int32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRA_ssa(a, b int32) int32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRA_ssa(a, b int32) int32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRA_ssa(a, b int32) int32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRA_ssa(a int32) int32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b<<s
+}
+
+//go:noinline
+func subshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b<<s
+}
+
+//go:noinline
+func rsbshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a<<s - b
+}
+
+//go:noinline
+func andshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b << s)
+}
+
+//go:noinline
+func orshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b<<s
+}
+
+//go:noinline
+func xorshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b<<s
+}
+
+//go:noinline
+func bicshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b << s)
+}
+
+//go:noinline
+func notshiftLLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a << s)
+}
+
+//go:noinline
+func addshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a >> s)
+}
+
+//go:noinline
+func addshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRAreg_ssa(a int32, s uint8) int32 {
+ return ^(a >> s)
+}
+
+// test ARM shifted ops
+func testShiftedOps() {
+ a, b := uint32(10), uint32(42)
+ if want, got := a+b<<3, addshiftLL_ssa(a, b); got != want {
+ println("addshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b<<3, subshiftLL_ssa(a, b); got != want {
+ println("subshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a<<3-b, rsbshiftLL_ssa(a, b); got != want {
+ println("rsbshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b<<3), andshiftLL_ssa(a, b); got != want {
+ println("andshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b<<3, orshiftLL_ssa(a, b); got != want {
+ println("orshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b<<3, xorshiftLL_ssa(a, b); got != want {
+ println("xorshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b<<3), bicshiftLL_ssa(a, b); got != want {
+ println("bicshiftLL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a << 3), notshiftLL_ssa(a); got != want {
+ println("notshiftLL_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a+b>>3, addshiftRL_ssa(a, b); got != want {
+ println("addshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b>>3, subshiftRL_ssa(a, b); got != want {
+ println("subshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a>>3-b, rsbshiftRL_ssa(a, b); got != want {
+ println("rsbshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b>>3), andshiftRL_ssa(a, b); got != want {
+ println("andshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b>>3, orshiftRL_ssa(a, b); got != want {
+ println("orshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b>>3, xorshiftRL_ssa(a, b); got != want {
+ println("xorshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b>>3), bicshiftRL_ssa(a, b); got != want {
+ println("bicshiftRL_ssa(10, 42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a >> 3), notshiftRL_ssa(a); got != want {
+ println("notshiftRL_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ c, d := int32(10), int32(-42)
+ if want, got := c+d>>3, addshiftRA_ssa(c, d); got != want {
+ println("addshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c-d>>3, subshiftRA_ssa(c, d); got != want {
+ println("subshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c>>3-d, rsbshiftRA_ssa(c, d); got != want {
+ println("rsbshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&(d>>3), andshiftRA_ssa(c, d); got != want {
+ println("andshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c|d>>3, orshiftRA_ssa(c, d); got != want {
+ println("orshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c^d>>3, xorshiftRA_ssa(c, d); got != want {
+ println("xorshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&^(d>>3), bicshiftRA_ssa(c, d); got != want {
+ println("bicshiftRA_ssa(10, -42) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(d >> 3), notshiftRA_ssa(d); got != want {
+ println("notshiftRA_ssa(-42) =", got, " want ", want)
+ failed = true
+ }
+ s := uint8(3)
+ if want, got := a+b<<s, addshiftLLreg_ssa(a, b, s); got != want {
+ println("addshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b<<s, subshiftLLreg_ssa(a, b, s); got != want {
+ println("subshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a<<s-b, rsbshiftLLreg_ssa(a, b, s); got != want {
+ println("rsbshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b<<s), andshiftLLreg_ssa(a, b, s); got != want {
+ println("andshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b<<s, orshiftLLreg_ssa(a, b, s); got != want {
+ println("orshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b<<s, xorshiftLLreg_ssa(a, b, s); got != want {
+ println("xorshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b<<s), bicshiftLLreg_ssa(a, b, s); got != want {
+ println("bicshiftLLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a << s), notshiftLLreg_ssa(a, s); got != want {
+ println("notshiftLLreg_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a+b>>s, addshiftRLreg_ssa(a, b, s); got != want {
+ println("addshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a-b>>s, subshiftRLreg_ssa(a, b, s); got != want {
+ println("subshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a>>s-b, rsbshiftRLreg_ssa(a, b, s); got != want {
+ println("rsbshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&(b>>s), andshiftRLreg_ssa(a, b, s); got != want {
+ println("andshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a|b>>s, orshiftRLreg_ssa(a, b, s); got != want {
+ println("orshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a^b>>s, xorshiftRLreg_ssa(a, b, s); got != want {
+ println("xorshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := a&^(b>>s), bicshiftRLreg_ssa(a, b, s); got != want {
+ println("bicshiftRLreg_ssa(10, 42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(a >> s), notshiftRLreg_ssa(a, s); got != want {
+ println("notshiftRLreg_ssa(10) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c+d>>s, addshiftRAreg_ssa(c, d, s); got != want {
+ println("addshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c-d>>s, subshiftRAreg_ssa(c, d, s); got != want {
+ println("subshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c>>s-d, rsbshiftRAreg_ssa(c, d, s); got != want {
+ println("rsbshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&(d>>s), andshiftRAreg_ssa(c, d, s); got != want {
+ println("andshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c|d>>s, orshiftRAreg_ssa(c, d, s); got != want {
+ println("orshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c^d>>s, xorshiftRAreg_ssa(c, d, s); got != want {
+ println("xorshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := c&^(d>>s), bicshiftRAreg_ssa(c, d, s); got != want {
+ println("bicshiftRAreg_ssa(10, -42, 3) =", got, " want ", want)
+ failed = true
+ }
+ if want, got := ^(d >> s), notshiftRAreg_ssa(d, s); got != want {
+ println("notshiftRAreg_ssa(-42, 3) =", got, " want ", want)
+ failed = true
+ }
+}
+
var failed = false
func main() {
@@ -573,6 +1012,7 @@ func main() {
testLoadCombine()
testLoadSymCombine()
testShiftRemoval()
+ testShiftedOps()
if failed {
panic("failed")
diff --git a/src/cmd/compile/internal/gc/testdata/string_ssa.go b/src/cmd/compile/internal/gc/testdata/string_ssa.go
index b47c2f1d07..897e874ee5 100644
--- a/src/cmd/compile/internal/gc/testdata/string_ssa.go
+++ b/src/cmd/compile/internal/gc/testdata/string_ssa.go
@@ -111,6 +111,67 @@ func testSmallIndexType() {
}
//go:noinline
+func testInt64Index_ssa(s string, i int64) byte {
+ return s[i]
+}
+
+//go:noinline
+func testInt64Slice_ssa(s string, i, j int64) string {
+ return s[i:j]
+}
+
+func testInt64Index() {
+ tests := []struct {
+ i int64
+ j int64
+ b byte
+ s string
+ }{
+ {0, 5, 'B', "Below"},
+ {5, 10, 'E', "Exact"},
+ {10, 15, 'A', "Above"},
+ }
+
+ str := "BelowExactAbove"
+ for i, t := range tests {
+ if got := testInt64Index_ssa(str, t.i); got != t.b {
+ println("#", i, "got ", got, ", wanted", t.b)
+ failed = true
+ }
+ if got := testInt64Slice_ssa(str, t.i, t.j); got != t.s {
+ println("#", i, "got ", got, ", wanted", t.s)
+ failed = true
+ }
+ }
+}
+
+func testInt64IndexPanic() {
+ defer func() {
+ if r := recover(); r != nil {
+ println("paniced as expected")
+ }
+ }()
+
+ str := "foobar"
+ println("got ", testInt64Index_ssa(str, 1<<32+1))
+ println("expected to panic, but didn't")
+ failed = true
+}
+
+func testInt64SlicePanic() {
+ defer func() {
+ if r := recover(); r != nil {
+ println("paniced as expected")
+ }
+ }()
+
+ str := "foobar"
+ println("got ", testInt64Slice_ssa(str, 1<<32, 1<<32+1))
+ println("expected to panic, but didn't")
+ failed = true
+}
+
+//go:noinline
func testStringElem_ssa(s string, i int) byte {
return s[i]
}
@@ -153,6 +214,9 @@ func main() {
testSmallIndexType()
testStringElem()
testStringElemConst()
+ testInt64Index()
+ testInt64IndexPanic()
+ testInt64SlicePanic()
if failed {
panic("failed")
diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go
index ab13df6eba..460b395c2e 100644
--- a/src/cmd/compile/internal/gc/type.go
+++ b/src/cmd/compile/internal/gc/type.go
@@ -1207,6 +1207,7 @@ func (t *Type) ChanDir() ChanDir {
func (t *Type) IsMemory() bool { return false }
func (t *Type) IsFlags() bool { return false }
func (t *Type) IsVoid() bool { return false }
+func (t *Type) IsTuple() bool { return false }
// IsUntyped reports whether t is an untyped type.
func (t *Type) IsUntyped() bool {
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 66eb7e97ac..1e7d80d3e9 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -1094,12 +1094,45 @@ opswitch:
if n.Type.IsFloat() {
if n.Left.Type.Etype == TINT64 {
- n = mkcall("int64tofloat64", n.Type, init, conv(n.Left, Types[TINT64]))
+ n = conv(mkcall("int64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TINT64])), n.Type)
break
}
if n.Left.Type.Etype == TUINT64 {
- n = mkcall("uint64tofloat64", n.Type, init, conv(n.Left, Types[TUINT64]))
+ n = conv(mkcall("uint64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT64])), n.Type)
+ break
+ }
+ }
+ }
+
+ if Thearch.LinkArch.Family == sys.I386 {
+ if n.Left.Type.IsFloat() {
+ if n.Type.Etype == TINT64 {
+ n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ break
+ }
+
+ if n.Type.Etype == TUINT64 {
+ n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ break
+ }
+ if n.Type.Etype == TUINT32 || n.Type.Etype == TUINTPTR {
+ n = mkcall("float64touint32", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ break
+ }
+ }
+ if n.Type.IsFloat() {
+ if n.Left.Type.Etype == TINT64 {
+ n = conv(mkcall("int64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TINT64])), n.Type)
+ break
+ }
+
+ if n.Left.Type.Etype == TUINT64 {
+ n = conv(mkcall("uint64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT64])), n.Type)
+ break
+ }
+ if n.Left.Type.Etype == TUINT32 || n.Left.Type.Etype == TUINTPTR {
+ n = conv(mkcall("uint32tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT32])), n.Type)
break
}
}
@@ -3303,6 +3336,7 @@ func samecheap(a *Node, b *Node) bool {
// The result of walkrotate MUST be assigned back to n, e.g.
// n.Left = walkrotate(n.Left)
func walkrotate(n *Node) *Node {
+ //TODO: enable LROT on ARM64 once the old backend is gone
if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM64, sys.PPC64) {
return n
}
@@ -3496,16 +3530,6 @@ func walkdiv(n *Node, init *Nodes) *Node {
goto ret
}
- // TODO(zhongwei) Test shows that TUINT8, TINT8, TUINT16 and TINT16's "quick division" method
- // on current arm64 backend is slower than hardware div instruction on ARM64 due to unnecessary
- // data movement between registers. It could be enabled when generated code is good enough.
- if Thearch.LinkArch.Family == sys.ARM64 {
- switch Simtype[nl.Type.Etype] {
- case TUINT8, TINT8, TUINT16, TINT16:
- return n
- }
- }
-
switch Simtype[nl.Type.Etype] {
default:
return n
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
index a83dff9a8b..4c984d81c2 100644
--- a/src/cmd/compile/internal/ppc64/galign.go
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -66,6 +66,11 @@ func Main() {
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
+ gc.Thearch.SSARegToReg = ssaRegToReg
+ gc.Thearch.SSAMarkMoves = ssaMarkMoves
+ gc.Thearch.SSAGenValue = ssaGenValue
+ gc.Thearch.SSAGenBlock = ssaGenBlock
+
initvariants()
initproginfo()
diff --git a/src/cmd/compile/internal/ppc64/prog.go b/src/cmd/compile/internal/ppc64/prog.go
index e2d81ae6c5..117eb24299 100644
--- a/src/cmd/compile/internal/ppc64/prog.go
+++ b/src/cmd/compile/internal/ppc64/prog.go
@@ -42,22 +42,34 @@ var progtable = [ppc64.ALAST & obj.AMask]obj.ProgInfo{
// Integer
ppc64.AADD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AADDC & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASUB & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AADDME & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ANEG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AAND & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AANDN & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AORN & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AXOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AEQV & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULHD & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AMULHDU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ADIVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ADIVDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ADIVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ADIVWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASRD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ASRAD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ASLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ASRW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.ASRAW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
ppc64.ACMPU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
+ ppc64.ACMPW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
+ ppc64.ACMPWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
ppc64.ATD & obj.AMask: {Flags: gc.SizeQ | gc.RightRead},
// Floating point.
@@ -70,11 +82,13 @@ var progtable = [ppc64.ALAST & obj.AMask]obj.ProgInfo{
ppc64.AFDIV & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFDIVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFCTIDZ & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ ppc64.AFCTIWZ & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFCFID & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFCFIDU & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.AFCMPU & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
ppc64.AFRSP & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
ppc64.AFSQRT & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
+ ppc64.AFNEG & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
// Moves
ppc64.AMOVB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
@@ -91,6 +105,8 @@ var progtable = [ppc64.ALAST & obj.AMask]obj.ProgInfo{
ppc64.AMOVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
ppc64.AMOVDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc},
ppc64.AFMOVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AFMOVSX & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ ppc64.AFMOVSZ & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
ppc64.AFMOVD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
// Jumps
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
new file mode 100644
index 0000000000..b6d06f88e1
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -0,0 +1,1021 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "math"
+)
+
+var ssaRegToReg = []int16{
+ // ppc64.REGZERO, // not an SSA reg
+ ppc64.REGSP,
+ ppc64.REG_R2,
+ ppc64.REG_R3,
+ ppc64.REG_R4,
+ ppc64.REG_R5,
+ ppc64.REG_R6,
+ ppc64.REG_R7,
+ ppc64.REG_R8,
+ ppc64.REG_R9,
+ ppc64.REG_R10,
+ ppc64.REGCTXT,
+ ppc64.REG_R12,
+ ppc64.REG_R13,
+ ppc64.REG_R14,
+ ppc64.REG_R15,
+ ppc64.REG_R16,
+ ppc64.REG_R17,
+ ppc64.REG_R18,
+ ppc64.REG_R19,
+ ppc64.REG_R20,
+ ppc64.REG_R21,
+ ppc64.REG_R22,
+ ppc64.REG_R23,
+ ppc64.REG_R24,
+ ppc64.REG_R25,
+ ppc64.REG_R26,
+ ppc64.REG_R27,
+ ppc64.REG_R28,
+ ppc64.REG_R29,
+ ppc64.REGG,
+ ppc64.REGTMP,
+
+ ppc64.REG_F0,
+ ppc64.REG_F1,
+ ppc64.REG_F2,
+ ppc64.REG_F3,
+ ppc64.REG_F4,
+ ppc64.REG_F5,
+ ppc64.REG_F6,
+ ppc64.REG_F7,
+ ppc64.REG_F8,
+ ppc64.REG_F9,
+ ppc64.REG_F10,
+ ppc64.REG_F11,
+ ppc64.REG_F12,
+ ppc64.REG_F13,
+ ppc64.REG_F14,
+ ppc64.REG_F15,
+ ppc64.REG_F16,
+ ppc64.REG_F17,
+ ppc64.REG_F18,
+ ppc64.REG_F19,
+ ppc64.REG_F20,
+ ppc64.REG_F21,
+ ppc64.REG_F22,
+ ppc64.REG_F23,
+ ppc64.REG_F24,
+ ppc64.REG_F25,
+ ppc64.REG_F26,
+ // ppc64.REG_F27, // reserved for "floating conversion constant"
+ // ppc64.REG_F28, // 0.0
+ // ppc64.REG_F29, // 0.5
+ // ppc64.REG_F30, // 1.0
+ // ppc64.REG_F31, // 2.0
+
+ // ppc64.REG_CR0,
+ // ppc64.REG_CR1,
+ // ppc64.REG_CR2,
+ // ppc64.REG_CR3,
+ // ppc64.REG_CR4,
+ // ppc64.REG_CR5,
+ // ppc64.REG_CR6,
+ // ppc64.REG_CR7,
+
+ // ppc64.REG_CR,
+ // ppc64.REG_XER,
+ // ppc64.REG_LR,
+ // ppc64.REG_CTR,
+}
+
+var condOps = map[ssa.Op]obj.As{
+ ssa.OpPPC64Equal: ppc64.ABEQ,
+ ssa.OpPPC64NotEqual: ppc64.ABNE,
+ ssa.OpPPC64LessThan: ppc64.ABLT,
+ ssa.OpPPC64GreaterEqual: ppc64.ABGE,
+ ssa.OpPPC64GreaterThan: ppc64.ABGT,
+ ssa.OpPPC64LessEqual: ppc64.ABLE,
+
+ ssa.OpPPC64FLessThan: ppc64.ABLT, // 1 branch for FCMP
+ ssa.OpPPC64FGreaterThan: ppc64.ABGT, // 1 branch for FCMP
+ ssa.OpPPC64FLessEqual: ppc64.ABLT, // 2 branches for FCMP <=, second is BEQ
+ ssa.OpPPC64FGreaterEqual: ppc64.ABGT, // 2 branches for FCMP >=, second is BEQ
+}
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+ // flive := b.FlagsLiveAtEnd
+ // if b.Control != nil && b.Control.Type.IsFlags() {
+ // flive = true
+ // }
+ // for i := len(b.Values) - 1; i >= 0; i-- {
+ // v := b.Values[i]
+ // if flive && (v.Op == ssa.OpPPC64MOVWconst || v.Op == ssa.OpPPC64MOVDconst) {
+ // // The "mark" is any non-nil Aux value.
+ // v.Aux = v
+ // }
+ // if v.Type.IsFlags() {
+ // flive = false
+ // }
+ // for _, a := range v.Args {
+ // if a.Type.IsFlags() {
+ // flive = true
+ // }
+ // }
+ // }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return ppc64.AMOVB
+ } else {
+ return ppc64.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return ppc64.AMOVH
+ } else {
+ return ppc64.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return ppc64.AMOVW
+ } else {
+ return ppc64.AMOVWZ
+ }
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return ppc64.AMOVB
+ case 2:
+ return ppc64.AMOVH
+ case 4:
+ return ppc64.AMOVW
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// scratchFpMem initializes an Addr (field of a Prog)
+// to reference the scratchpad memory for movement between
+// F and G registers for FP conversions.
+func scratchFpMem(s *gc.SSAGenState, a *obj.Addr) {
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_AUTO
+ a.Node = s.ScratchFpMem
+ a.Sym = gc.Linksym(s.ScratchFpMem.Sym)
+ a.Reg = ppc64.REGSP
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ s.SetLineno(v.Line)
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+
+ case ssa.OpCopy, ssa.OpPPC64MOVDconvert:
+ t := v.Type
+ if t.IsMemory() {
+ return
+ }
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ if x != y {
+ rt := obj.TYPE_REG
+ op := ppc64.AMOVD
+
+ if t.IsFloat() {
+ op = ppc64.AFMOVD
+ }
+ p := gc.Prog(op)
+ p.From.Type = rt
+ p.From.Reg = x
+ p.To.Type = rt
+ p.To.Reg = y
+ }
+
+ case ssa.OpPPC64Xf2i64:
+ {
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ p := gc.Prog(ppc64.AFMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ scratchFpMem(s, &p.To)
+ p = gc.Prog(ppc64.AMOVD)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ scratchFpMem(s, &p.From)
+ }
+ case ssa.OpPPC64Xi2f64:
+ {
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ p := gc.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ scratchFpMem(s, &p.To)
+ p = gc.Prog(ppc64.AFMOVD)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ scratchFpMem(s, &p.From)
+ }
+
+ case ssa.OpPPC64LoweredGetClosurePtr:
+ // Closure pointer is R11 (already)
+ gc.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpLoadReg:
+ loadOp := loadByType(v.Type)
+ n, off := gc.AutoVar(v.Args[0])
+ p := gc.Prog(loadOp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Node = n
+ p.From.Sym = gc.Linksym(n.Sym)
+ p.From.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.From.Name = obj.NAME_PARAM
+ p.From.Offset += n.Xoffset
+ } else {
+ p.From.Name = obj.NAME_AUTO
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.OpStoreReg:
+ storeOp := storeByType(v.Type)
+ n, off := gc.AutoVar(v)
+ p := gc.Prog(storeOp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Node = n
+ p.To.Sym = gc.Linksym(n.Sym)
+ p.To.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.To.Name = obj.NAME_PARAM
+ p.To.Offset += n.Xoffset
+ } else {
+ p.To.Name = obj.NAME_AUTO
+ }
+
+ case ssa.OpPPC64DIVD:
+ // For now,
+ //
+ // cmp arg1, -1
+ // be ahead
+ // v = arg0 / arg1
+ // b over
+ // ahead: v = - arg0
+ // over: nop
+ r := gc.SSARegNum(v)
+ r0 := gc.SSARegNum(v.Args[0])
+ r1 := gc.SSARegNum(v.Args[1])
+
+ p := gc.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := gc.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := gc.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = gc.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ gc.Patch(pbahead, p)
+
+ p = gc.Prog(obj.ANOP)
+ gc.Patch(pbover, p)
+
+ case ssa.OpPPC64DIVW:
+ // word-width version of above
+ r := gc.SSARegNum(v)
+ r0 := gc.SSARegNum(v.Args[0])
+ r1 := gc.SSARegNum(v.Args[1])
+
+ p := gc.Prog(ppc64.ACMPW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := gc.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := gc.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = gc.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ gc.Patch(pbahead, p)
+
+ p = gc.Prog(obj.ANOP)
+ gc.Patch(pbover, p)
+
+ case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
+ ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
+ ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
+ ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU,
+ ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS,
+ ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64XOR, ssa.OpPPC64EQV:
+ r := gc.SSARegNum(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64MaskIfNotCarry:
+ r := gc.SSARegNum(v)
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ADDconstForCarry:
+ r1 := gc.SSARegNum(v.Args[0])
+ p := gc.Prog(v.Op.Asm())
+ p.Reg = r1
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect.
+
+ case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP:
+ r := gc.SSARegNum(v)
+ p := gc.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+
+ case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
+ ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst:
+ p := gc.Prog(v.Op.Asm())
+ p.Reg = gc.SSARegNum(v.Args[0])
+
+ if v.Aux != nil {
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = gc.AuxOffset(v)
+ } else {
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ }
+
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.OpPPC64MOVDaddr:
+ p := gc.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ var wantreg string
+ // Suspect comment, copied from ARM code
+ // MOVD $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *ssa.ExternSymbol:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *ssa.ArgSymbol, *ssa.AutoSymbol:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVD $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = ppc64.REGSP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
+ }
+
+ case ssa.OpPPC64MOVDconst, ssa.OpPPC64MOVWconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v.Args[1])
+
+ case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
+ // Shift in register to required size
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Reg = gc.SSARegNum(v)
+ p.To.Type = obj.TYPE_REG
+
+ case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVBload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+
+ case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+
+ case ssa.OpPPC64Equal,
+ ssa.OpPPC64NotEqual,
+ ssa.OpPPC64LessThan,
+ ssa.OpPPC64FLessThan,
+ ssa.OpPPC64LessEqual,
+ ssa.OpPPC64GreaterThan,
+ ssa.OpPPC64FGreaterThan,
+ ssa.OpPPC64GreaterEqual:
+ // On Power7 or later, can use isel instruction:
+ // for a < b, a > b, a = b:
+ // rt := 1
+ // isel rt,rt,r0,cond
+
+ // for a >= b, a <= b, a != b:
+ // rt := 1
+ // isel rt,0,rt,!cond
+
+ // However, PPCbe support is for older machines than that,
+ // and isel (which looks a lot like fsel) isn't recognized
+ // yet by the Go assembler. So for now, use the old instruction
+ // sequence, which we'll need anyway.
+ // TODO: add support for isel on PPCle and use it.
+
+ // generate boolean values
+ // use conditional move
+
+ p := gc.Prog(ppc64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ pb := gc.Prog(condOps[v.Op])
+ pb.To.Type = obj.TYPE_BRANCH
+
+ p = gc.Prog(ppc64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ p = gc.Prog(obj.ANOP)
+ gc.Patch(pb, p)
+
+ case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion
+ ssa.OpPPC64FGreaterEqual:
+
+ p := gc.Prog(ppc64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ pb0 := gc.Prog(condOps[v.Op])
+ pb0.To.Type = obj.TYPE_BRANCH
+ pb1 := gc.Prog(ppc64.ABEQ)
+ pb1.To.Type = obj.TYPE_BRANCH
+
+ p = gc.Prog(ppc64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ p = gc.Prog(obj.ANOP)
+ gc.Patch(pb0, p)
+ gc.Patch(pb1, p)
+
+ case ssa.OpPPC64LoweredZero:
+ // Similar to how this is done on ARM,
+ // except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off
+ // not store-and-increment.
+ // Therefore R3 should be dest-align
+ // and arg1 should be dest+size-align
+ // HOWEVER, the input dest address cannot be dest-align because
+ // that does not necessarily address valid memory and it's not
+ // known how that might be optimized. Therefore, correct it in
+ // in the expansion:
+ //
+ // ADD -8,R3,R3
+ // MOVDU R0, 8(R3)
+ // CMP R3, Rarg1
+ // BL -2(PC)
+ // arg1 is the address of the last element to zero
+ // auxint is alignment
+ var sz int64
+ var movu obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ movu = ppc64.AMOVDU
+ case v.AuxInt%4 == 0:
+ sz = 4
+ movu = ppc64.AMOVWZU // MOVWU instruction not implemented
+ case v.AuxInt%2 == 0:
+ sz = 2
+ movu = ppc64.AMOVHU
+ default:
+ sz = 1
+ movu = ppc64.AMOVBU
+ }
+
+ p := gc.Prog(ppc64.AADD)
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+
+ p = gc.Prog(movu)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Offset = sz
+
+ p2 := gc.Prog(ppc64.ACMPU)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = gc.SSARegNum(v.Args[0])
+ p2.To.Reg = gc.SSARegNum(v.Args[1])
+ p2.To.Type = obj.TYPE_REG
+
+ p3 := gc.Prog(ppc64.ABLT)
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+
+ case ssa.OpPPC64LoweredMove:
+ // Similar to how this is done on ARM,
+ // except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off,
+ // not store-and-increment.
+ // Inputs must be valid pointers to memory,
+ // so adjust arg0 and arg1 as part of the expansion.
+ // arg2 should be src+size-align,
+ //
+ // ADD -8,R3,R3
+ // ADD -8,R4,R4
+ // MOVDU 8(R4), Rtmp
+ // MOVDU Rtmp, 8(R3)
+ // CMP R4, Rarg2
+ // BL -3(PC)
+ // arg2 is the address of the last element of src
+ // auxint is alignment
+ var sz int64
+ var movu obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ movu = ppc64.AMOVDU
+ case v.AuxInt%4 == 0:
+ sz = 4
+ movu = ppc64.AMOVWZU // MOVWU instruction not implemented
+ case v.AuxInt%2 == 0:
+ sz = 2
+ movu = ppc64.AMOVHU
+ default:
+ sz = 1
+ movu = ppc64.AMOVBU
+ }
+
+ p := gc.Prog(ppc64.AADD)
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+
+ p = gc.Prog(ppc64.AADD)
+ p.Reg = gc.SSARegNum(v.Args[1])
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v.Args[1])
+
+ p = gc.Prog(movu)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p2 := gc.Prog(movu)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = gc.SSARegNum(v.Args[0])
+ p2.To.Offset = sz
+
+ p3 := gc.Prog(ppc64.ACMPU)
+ p3.From.Reg = gc.SSARegNum(v.Args[1])
+ p3.From.Type = obj.TYPE_REG
+ p3.To.Reg = gc.SSARegNum(v.Args[2])
+ p3.To.Type = obj.TYPE_REG
+
+ p4 := gc.Prog(ppc64.ABLT)
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
+
+ case ssa.OpPPC64CALLstatic:
+ if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert two actual hardware NOPs that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ // PPC64 is unusual because TWO nops are required
+ // (see gc/cgen.go, gc/plive.go)
+ ginsnop()
+ ginsnop()
+ }
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpPPC64CALLclosure:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpPPC64CALLdefer:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpPPC64CALLgo:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Newproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpPPC64CALLinter:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+
+ case ssa.OpVarDef:
+ gc.Gvardef(v.Aux.(*gc.Node))
+ case ssa.OpVarKill:
+ gc.Gvarkill(v.Aux.(*gc.Node))
+ case ssa.OpVarLive:
+ gc.Gvarlive(v.Aux.(*gc.Node))
+ case ssa.OpKeepAlive:
+ if !v.Args[0].Type.IsPtrShaped() {
+ v.Fatalf("keeping non-pointer alive %v", v.Args[0])
+ }
+ n, off := gc.AutoVar(v.Args[0])
+ if n == nil {
+ v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
+ }
+ if off != 0 {
+ v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
+ }
+ gc.Gvarlive(n)
+
+ case ssa.OpPhi:
+ // just check to make sure regalloc and stackalloc did it right
+ if v.Type.IsMemory() {
+ return
+ }
+ f := v.Block.Func
+ loc := f.RegAlloc[v.ID]
+ for _, a := range v.Args {
+ if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
+ v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
+ }
+ }
+
+ case ssa.OpPPC64LoweredNilCheck:
+ // Optimization - if the subsequent block has a load or store
+ // at the same address, we don't need to issue this instruction.
+ // mem := v.Args[1]
+ // for _, w := range v.Block.Succs[0].Block().Values {
+ // if w.Op == ssa.OpPhi {
+ // if w.Type.IsMemory() {
+ // mem = w
+ // }
+ // continue
+ // }
+ // if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
+ // // w doesn't use a store - can't be a memory op.
+ // continue
+ // }
+ // if w.Args[len(w.Args)-1] != mem {
+ // v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
+ // }
+ // switch w.Op {
+ // case ssa.OpPPC64MOVBload, ssa.OpPPC64MOVBUload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVHUload,
+ // ssa.OpPPC64MOVWload, ssa.OpPPC64MOVFload, ssa.OpPPC64MOVDload,
+ // ssa.OpPPC64MOVBstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVWstore,
+ // ssa.OpPPC64MOVFstore, ssa.OpPPC64MOVDstore:
+ // // arg0 is ptr, auxint is offset
+ // if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
+ // if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ // gc.Warnl(v.Line, "removed nil check")
+ // }
+ // return
+ // }
+ // case ssa.OpPPC64DUFFZERO, ssa.OpPPC64LoweredZero, ssa.OpPPC64LoweredZeroU:
+ // // arg0 is ptr
+ // if w.Args[0] == v.Args[0] {
+ // if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ // gc.Warnl(v.Line, "removed nil check")
+ // }
+ // return
+ // }
+ // case ssa.OpPPC64DUFFCOPY, ssa.OpPPC64LoweredMove, ssa.OpPPC64LoweredMoveU:
+ // // arg0 is dst ptr, arg1 is src ptr
+ // if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
+ // if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ // gc.Warnl(v.Line, "removed nil check")
+ // }
+ // return
+ // }
+ // default:
+ // }
+ // if w.Type.IsMemory() {
+ // if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
+ // // these ops are OK
+ // mem = w
+ // continue
+ // }
+ // // We can't delay the nil check past the next store.
+ // break
+ // }
+ // }
+ // Issue a load which will fault if arg is nil.
+ p := gc.Prog(ppc64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
+ gc.Warnl(v.Line, "generated nil check")
+ }
+
+ case ssa.OpPPC64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+
+ default:
+ v.Unimplementedf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+ asmeq, invasmeq bool
+}{
+ ssa.BlockPPC64EQ: {ppc64.ABEQ, ppc64.ABNE, false, false},
+ ssa.BlockPPC64NE: {ppc64.ABNE, ppc64.ABEQ, false, false},
+
+ ssa.BlockPPC64LT: {ppc64.ABLT, ppc64.ABGE, false, false},
+ ssa.BlockPPC64GE: {ppc64.ABGE, ppc64.ABLT, false, false},
+ ssa.BlockPPC64LE: {ppc64.ABLE, ppc64.ABGT, false, false},
+ ssa.BlockPPC64GT: {ppc64.ABGT, ppc64.ABLE, false, false},
+
+ // TODO: need to work FP comparisons into block jumps
+ ssa.BlockPPC64FLT: {ppc64.ABLT, ppc64.ABGT, false, true},
+ ssa.BlockPPC64FGE: {ppc64.ABGT, ppc64.ABLT, true, false},
+ ssa.BlockPPC64FLE: {ppc64.ABLT, ppc64.ABGT, true, false},
+ ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLT, false, true},
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ s.SetLineno(b.Line)
+
+ switch b.Kind {
+
+ case ssa.BlockDefer:
+ // defer returns in R3:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := gc.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+
+ p = gc.Prog(ppc64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ case ssa.BlockRet:
+ gc.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
+
+ case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
+ ssa.BlockPPC64LT, ssa.BlockPPC64GE,
+ ssa.BlockPPC64LE, ssa.BlockPPC64GT,
+ ssa.BlockPPC64FLT, ssa.BlockPPC64FGE,
+ ssa.BlockPPC64FLE, ssa.BlockPPC64FGT:
+ jmp := blockJump[b.Kind]
+ likely := b.Likely
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = gc.Prog(jmp.invasm)
+ likely *= -1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if jmp.invasmeq {
+ // TODO: The second branch is probably predict-not-taken since it is for FP equality
+ q := gc.Prog(ppc64.ABEQ)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
+ case b.Succs[1].Block():
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ if jmp.asmeq {
+ q := gc.Prog(ppc64.ABEQ)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
+ }
+ default:
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ if jmp.asmeq {
+ q := gc.Prog(ppc64.ABEQ)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
+ }
+ q := gc.Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
+
+ // liblink reorders the instruction stream as it sees fit.
+ // Pass along what we know so liblink can make use of it.
+ // TODO: Once we've fully switched to SSA,
+ // make liblink leave our output alone.
+ //switch likely {
+ //case ssa.BranchUnlikely:
+ // p.From.Type = obj.TYPE_CONST
+ // p.From.Offset = 0
+ //case ssa.BranchLikely:
+ // p.From.Type = obj.TYPE_CONST
+ // p.From.Offset = 1
+ //}
+
+ default:
+ b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index e8ab17806c..149ceae379 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -20,11 +20,18 @@ type Config struct {
lowerBlock func(*Block) bool // lowering function
lowerValue func(*Value, *Config) bool // lowering function
registers []Register // machine registers
+ gpRegMask regMask // general purpose integer register mask
+ fpRegMask regMask // floating point register mask
+ FPReg int8 // register number of frame pointer, -1 if not used
+ hasGReg bool // has hardware g register
fe Frontend // callbacks into compiler frontend
HTML *HTMLWriter // html writer, for debugging
ctxt *obj.Link // Generic arch information
optimize bool // Do optimization
noDuffDevice bool // Don't use Duff's device
+ nacl bool // GOOS=nacl
+ use387 bool // GO386=387
+ NeedsFpScratch bool // No direct move between GP and FP register sets
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
curFunc *Func
@@ -106,6 +113,7 @@ type Frontend interface {
SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot)
SplitComplex(LocalSlot) (LocalSlot, LocalSlot)
SplitStruct(LocalSlot, int) LocalSlot
+ SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
// Line returns a string describing the given line number.
Line(int32) string
@@ -128,29 +136,87 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
c.registers = registersAMD64[:]
- case "386":
+ c.gpRegMask = gpRegMaskAMD64
+ c.fpRegMask = fpRegMaskAMD64
+ c.FPReg = framepointerRegAMD64
+ c.hasGReg = false
+ case "amd64p32":
c.IntSize = 4
c.PtrSize = 4
c.lowerBlock = rewriteBlockAMD64
- c.lowerValue = rewriteValueAMD64 // TODO(khr): full 32-bit support
+ c.lowerValue = rewriteValueAMD64
+ c.registers = registersAMD64[:]
+ c.gpRegMask = gpRegMaskAMD64
+ c.fpRegMask = fpRegMaskAMD64
+ c.FPReg = framepointerRegAMD64
+ c.hasGReg = false
+ c.noDuffDevice = true
+ case "386":
+ c.IntSize = 4
+ c.PtrSize = 4
+ c.lowerBlock = rewriteBlock386
+ c.lowerValue = rewriteValue386
+ c.registers = registers386[:]
+ c.gpRegMask = gpRegMask386
+ c.fpRegMask = fpRegMask386
+ c.FPReg = framepointerReg386
+ c.hasGReg = false
case "arm":
c.IntSize = 4
c.PtrSize = 4
c.lowerBlock = rewriteBlockARM
c.lowerValue = rewriteValueARM
c.registers = registersARM[:]
+ c.gpRegMask = gpRegMaskARM
+ c.fpRegMask = fpRegMaskARM
+ c.FPReg = framepointerRegARM
+ c.hasGReg = true
+ case "arm64":
+ c.IntSize = 8
+ c.PtrSize = 8
+ c.lowerBlock = rewriteBlockARM64
+ c.lowerValue = rewriteValueARM64
+ c.registers = registersARM64[:]
+ c.gpRegMask = gpRegMaskARM64
+ c.fpRegMask = fpRegMaskARM64
+ c.FPReg = framepointerRegARM64
+ c.hasGReg = true
+ case "ppc64le":
+ c.IntSize = 8
+ c.PtrSize = 8
+ c.lowerBlock = rewriteBlockPPC64
+ c.lowerValue = rewriteValuePPC64
+ c.registers = registersPPC64[:]
+ c.gpRegMask = gpRegMaskPPC64
+ c.fpRegMask = fpRegMaskPPC64
+ c.FPReg = framepointerRegPPC64
+ c.noDuffDevice = true // TODO: Resolve PPC64 DuffDevice (has zero, but not copy)
+ c.NeedsFpScratch = true
+ c.hasGReg = true
default:
fe.Unimplementedf(0, "arch %s not implemented", arch)
}
c.ctxt = ctxt
c.optimize = optimize
+ c.nacl = obj.Getgoos() == "nacl"
- // Don't use Duff's device on Plan 9, because floating
+ // Don't use Duff's device on Plan 9 AMD64, because floating
// point operations are not allowed in note handler.
- if obj.Getgoos() == "plan9" {
+ if obj.Getgoos() == "plan9" && arch == "amd64" {
c.noDuffDevice = true
}
+ if c.nacl {
+ c.noDuffDevice = true // Don't use Duff's device on NaCl
+
+ // ARM assembler rewrites DIV/MOD to runtime calls, which
+ // clobber R12 on nacl
+ opcodeTable[OpARMDIV].reg.clobbers |= 1 << 12 // R12
+ opcodeTable[OpARMDIVU].reg.clobbers |= 1 << 12 // R12
+ opcodeTable[OpARMMOD].reg.clobbers |= 1 << 12 // R12
+ opcodeTable[OpARMMODU].reg.clobbers |= 1 << 12 // R12
+ }
+
// Assign IDs to preallocated values/blocks.
for i := range c.values {
c.values[i].ID = ID(i)
@@ -180,6 +246,11 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
return c
}
+func (c *Config) Set387(b bool) {
+ c.NeedsFpScratch = b
+ c.use387 = b
+}
+
func (c *Config) Frontend() Frontend { return c.fe }
func (c *Config) SparsePhiCutoff() uint64 { return c.sparsePhiCutoff }
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
index ad4e416159..a000c577d1 100644
--- a/src/cmd/compile/internal/ssa/cse.go
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -163,6 +163,29 @@ func cse(f *Func) {
}
}
+ // if we rewrite a tuple generator to a new one in a different block,
+ // copy its selectors to the new generator's block, so tuple generator
+ // and selectors stay together.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if rewrite[v.ID] != nil {
+ continue
+ }
+ if v.Op != OpSelect0 && v.Op != OpSelect1 {
+ continue
+ }
+ if !v.Args[0].Type.IsTuple() {
+ f.Fatalf("arg of tuple selector %s is not a tuple: %s", v.String(), v.Args[0].LongString())
+ }
+ t := rewrite[v.Args[0].ID]
+ if t != nil && t.Block != b {
+ // v.Args[0] is tuple generator, CSE'd into a different block as t, v is left behind
+ c := v.copyInto(t.Block)
+ rewrite[v.ID] = c
+ }
+ }
+ }
+
rewrites := int64(0)
// Apply substitutions
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 5129c171bb..a1fc5fd0bb 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -89,7 +89,7 @@ func dse(f *Func) {
} else {
// zero addr mem
sz := v.Args[0].Type.ElemType().Size()
- if v.AuxInt != sz {
+ if SizeAndAlign(v.AuxInt).Size() != sz {
f.Fatalf("mismatched zero/store sizes: %d and %d [%s]",
v.AuxInt, sz, v.LongString())
}
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
index 53116ba593..2f637e45b7 100644
--- a/src/cmd/compile/internal/ssa/decompose.go
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -25,6 +25,22 @@ func decomposeBuiltIn(f *Func) {
for _, name := range f.Names {
t := name.Type
switch {
+ case t.IsInteger() && t.Size() == 8 && f.Config.IntSize == 4:
+ var elemType Type
+ if t.IsSigned() {
+ elemType = f.Config.fe.TypeInt32()
+ } else {
+ elemType = f.Config.fe.TypeUInt32()
+ }
+ hiName, loName := f.Config.fe.SplitInt64(name)
+ newNames = append(newNames, hiName, loName)
+ for _, v := range f.NamedValues[name] {
+ hi := v.Block.NewValue1(v.Line, OpInt64Hi, elemType, v)
+ lo := v.Block.NewValue1(v.Line, OpInt64Lo, f.Config.fe.TypeUInt32(), v)
+ f.NamedValues[hiName] = append(f.NamedValues[hiName], hi)
+ f.NamedValues[loName] = append(f.NamedValues[loName], lo)
+ }
+ delete(f.NamedValues, name)
case t.IsComplex():
var elemType Type
if t.Size() == 16 {
@@ -78,6 +94,8 @@ func decomposeBuiltIn(f *Func) {
f.NamedValues[dataName] = append(f.NamedValues[dataName], data)
}
delete(f.NamedValues, name)
+ case t.IsFloat():
+ // floats are never decomposed, even ones bigger than IntSize
case t.Size() > f.Config.IntSize:
f.Unimplementedf("undecomposed named type %s %s", name, t)
default:
@@ -88,8 +106,13 @@ func decomposeBuiltIn(f *Func) {
}
func decomposeBuiltInPhi(v *Value) {
- // TODO: decompose 64-bit ops on 32-bit archs?
switch {
+ case v.Type.IsInteger() && v.Type.Size() == 8 && v.Block.Func.Config.IntSize == 4:
+ if v.Block.Func.Config.arch == "amd64p32" {
+ // Even though ints are 32 bits, we have 64-bit ops.
+ break
+ }
+ decomposeInt64Phi(v)
case v.Type.IsComplex():
decomposeComplexPhi(v)
case v.Type.IsString():
@@ -98,6 +121,8 @@ func decomposeBuiltInPhi(v *Value) {
decomposeSlicePhi(v)
case v.Type.IsInterface():
decomposeInterfacePhi(v)
+ case v.Type.IsFloat():
+ // floats are never decomposed, even ones bigger than IntSize
case v.Type.Size() > v.Block.Func.Config.IntSize:
v.Unimplementedf("undecomposed type %s", v.Type)
}
@@ -138,6 +163,26 @@ func decomposeSlicePhi(v *Value) {
v.AddArg(cap)
}
+func decomposeInt64Phi(v *Value) {
+ fe := v.Block.Func.Config.fe
+ var partType Type
+ if v.Type.IsSigned() {
+ partType = fe.TypeInt32()
+ } else {
+ partType = fe.TypeUInt32()
+ }
+
+ hi := v.Block.NewValue0(v.Line, OpPhi, partType)
+ lo := v.Block.NewValue0(v.Line, OpPhi, fe.TypeUInt32())
+ for _, a := range v.Args {
+ hi.AddArg(a.Block.NewValue1(v.Line, OpInt64Hi, partType, a))
+ lo.AddArg(a.Block.NewValue1(v.Line, OpInt64Lo, fe.TypeUInt32(), a))
+ }
+ v.reset(OpInt64Make)
+ v.AddArg(hi)
+ v.AddArg(lo)
+}
+
func decomposeComplexPhi(v *Value) {
fe := v.Block.Func.Config.fe
var partType Type
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 27892a8dc1..c26e55517c 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -49,6 +49,12 @@ func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
}
return LocalSlot{s.N, d.TypeFloat32(), s.Off}, LocalSlot{s.N, d.TypeFloat32(), s.Off + 4}
}
+func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
+ if s.Type.IsSigned() {
+ return LocalSlot{s.N, d.TypeInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
+ }
+ return LocalSlot{s.N, d.TypeUInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
+}
func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
return LocalSlot{s.N, s.Type.FieldType(i), s.Off + s.Type.FieldOff(i)}
}
diff --git a/src/cmd/compile/internal/ssa/flagalloc.go b/src/cmd/compile/internal/ssa/flagalloc.go
index f6c457dc9e..aefa81b5b3 100644
--- a/src/cmd/compile/internal/ssa/flagalloc.go
+++ b/src/cmd/compile/internal/ssa/flagalloc.go
@@ -4,8 +4,6 @@
package ssa
-const flagRegMask = regMask(1) << 33 // TODO: arch-specific
-
// flagalloc allocates the flag register among all the flag-generating
// instructions. Flag values are recomputed if they need to be
// spilled/restored.
@@ -33,7 +31,7 @@ func flagalloc(f *Func) {
if v == flag {
flag = nil
}
- if opcodeTable[v.Op].reg.clobbers&flagRegMask != 0 {
+ if opcodeTable[v.Op].clobberFlags {
flag = nil
}
for _, a := range v.Args {
@@ -97,7 +95,7 @@ func flagalloc(f *Func) {
continue
}
// Recalculate a
- c := a.copyInto(b)
+ c := copyFlags(a, b)
// Update v.
v.SetArg(i, c)
// Remember the most-recently computed flag value.
@@ -105,7 +103,7 @@ func flagalloc(f *Func) {
}
// Issue v.
b.Values = append(b.Values, v)
- if opcodeTable[v.Op].reg.clobbers&flagRegMask != 0 {
+ if opcodeTable[v.Op].clobberFlags {
flag = nil
}
if v.Type.IsFlags() {
@@ -121,7 +119,7 @@ func flagalloc(f *Func) {
if v := end[b.ID]; v != nil && v != flag {
// Need to reissue flag generator for use by
// subsequent blocks.
- _ = v.copyInto(b)
+ copyFlags(v, b)
// Note: this flag generator is not properly linked up
// with the flag users. This breaks the SSA representation.
// We could fix up the users with another pass, but for now
@@ -135,3 +133,19 @@ func flagalloc(f *Func) {
b.FlagsLiveAtEnd = end[b.ID] != nil
}
}
+
+// copyFlags copies v (flag generator) into b, returns the copy.
+// If v's arg is also flags, copy recursively.
+func copyFlags(v *Value, b *Block) *Value {
+ flagsArgs := make(map[int]*Value)
+ for i, a := range v.Args {
+ if a.Type.IsFlags() || a.Type.IsTuple() {
+ flagsArgs[i] = copyFlags(a, b)
+ }
+ }
+ c := v.copyInto(b)
+ for i, a := range flagsArgs {
+ c.SetArg(i, a)
+ }
+ return c
+}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
new file mode 100644
index 0000000000..6a0990da47
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -0,0 +1,1265 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(AddPtr x y) -> (ADDL x y)
+(Add32 x y) -> (ADDL x y)
+(Add16 x y) -> (ADDL x y)
+(Add8 x y) -> (ADDL x y)
+(Add32F x y) -> (ADDSS x y)
+(Add64F x y) -> (ADDSD x y)
+
+(Add32carry x y) -> (ADDLcarry x y)
+(Add32withcarry x y c) -> (ADCL x y c)
+
+(SubPtr x y) -> (SUBL x y)
+(Sub32 x y) -> (SUBL x y)
+(Sub16 x y) -> (SUBL x y)
+(Sub8 x y) -> (SUBL x y)
+(Sub32F x y) -> (SUBSS x y)
+(Sub64F x y) -> (SUBSD x y)
+
+(Sub32carry x y) -> (SUBLcarry x y)
+(Sub32withcarry x y c) -> (SBBL x y c)
+
+(Mul32 x y) -> (MULL x y)
+(Mul16 x y) -> (MULL x y)
+(Mul8 x y) -> (MULL x y)
+(Mul32F x y) -> (MULSS x y)
+(Mul64F x y) -> (MULSD x y)
+
+(Mul32uhilo x y) -> (MULLQU x y)
+
+(Div32F x y) -> (DIVSS x y)
+(Div64F x y) -> (DIVSD x y)
+
+(Div32 x y) -> (DIVL x y)
+(Div32u x y) -> (DIVLU x y)
+(Div16 x y) -> (DIVW x y)
+(Div16u x y) -> (DIVWU x y)
+(Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y))
+(Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(Hmul32 x y) -> (HMULL x y)
+(Hmul32u x y) -> (HMULLU x y)
+(Hmul16 x y) -> (HMULW x y)
+(Hmul16u x y) -> (HMULWU x y)
+(Hmul8 x y) -> (HMULB x y)
+(Hmul8u x y) -> (HMULBU x y)
+
+(Mod32 x y) -> (MODL x y)
+(Mod32u x y) -> (MODLU x y)
+(Mod16 x y) -> (MODW x y)
+(Mod16u x y) -> (MODWU x y)
+(Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y))
+(Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(And32 x y) -> (ANDL x y)
+(And16 x y) -> (ANDL x y)
+(And8 x y) -> (ANDL x y)
+
+(Or32 x y) -> (ORL x y)
+(Or16 x y) -> (ORL x y)
+(Or8 x y) -> (ORL x y)
+
+(Xor32 x y) -> (XORL x y)
+(Xor16 x y) -> (XORL x y)
+(Xor8 x y) -> (XORL x y)
+
+(Neg32 x) -> (NEGL x)
+(Neg16 x) -> (NEGL x)
+(Neg8 x) -> (NEGL x)
+(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+(Neg32F x) && config.use387 -> (FCHS x)
+(Neg64F x) && config.use387 -> (FCHS x)
+
+(Com32 x) -> (NOTL x)
+(Com16 x) -> (NOTL x)
+(Com8 x) -> (NOTL x)
+
+// Lowering boolean ops
+(AndB x y) -> (ANDL x y)
+(OrB x y) -> (ORL x y)
+(Not x) -> (XORLconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr) -> (ADDLconst [off] ptr)
+
+(Bswap32 x) -> (BSWAPL x)
+
+(Sqrt x) -> (SQRTSD x)
+
+// Lowering extension
+(SignExt8to16 x) -> (MOVBLSX x)
+(SignExt8to32 x) -> (MOVBLSX x)
+(SignExt16to32 x) -> (MOVWLSX x)
+
+(ZeroExt8to16 x) -> (MOVBLZX x)
+(ZeroExt8to32 x) -> (MOVBLZX x)
+(ZeroExt16to32 x) -> (MOVWLZX x)
+
+(Signmask x) -> (SARLconst x [31])
+(Zeromask <t> x) -> (XORLconst [-1] (SBBLcarrymask <t> (CMPL x (MOVLconst [1]))))
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 x) -> x
+(Trunc32to8 x) -> x
+(Trunc32to16 x) -> x
+
+// Lowering float <-> int
+(Cvt32to32F x) -> (CVTSL2SS x)
+(Cvt32to64F x) -> (CVTSL2SD x)
+
+(Cvt32Fto32 x) -> (CVTTSS2SL x)
+(Cvt64Fto32 x) -> (CVTTSD2SL x)
+
+(Cvt32Fto64F x) -> (CVTSS2SD x)
+(Cvt64Fto32F x) -> (CVTSD2SS x)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+(Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+(Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+
+(Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+(Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+(Lsh16x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+
+(Lsh8x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+(Lsh8x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+(Lsh8x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+
+(Lrot32 <t> x [c]) -> (ROLLconst <t> [c&31] x)
+(Lrot16 <t> x [c]) -> (ROLWconst <t> [c&15] x)
+(Lrot8 <t> x [c]) -> (ROLBconst <t> [c&7] x)
+
+(Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+(Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+(Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+
+(Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+(Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+(Rsh16Ux8 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+
+(Rsh8Ux32 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+(Rsh8Ux16 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+(Rsh8Ux8 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+
+(Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+(Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+(Rsh32x8 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+
+(Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+(Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+(Rsh16x8 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+
+(Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+(Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+(Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SHLLconst x [c])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SARLconst x [c])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SHRLconst x [c])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SHLLconst x [c])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SARWconst x [c])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SHRWconst x [c])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SHLLconst x [c])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SARBconst x [c])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SHRBconst x [c])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SARLconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SARWconst x [15])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SARBconst x [7])
+
+// Lowering comparisons
+(Less32 x y) -> (SETL (CMPL x y))
+(Less16 x y) -> (SETL (CMPW x y))
+(Less8 x y) -> (SETL (CMPB x y))
+(Less32U x y) -> (SETB (CMPL x y))
+(Less16U x y) -> (SETB (CMPW x y))
+(Less8U x y) -> (SETB (CMPB x y))
+// Use SETGF with reversed operands to dodge NaN case
+(Less64F x y) -> (SETGF (UCOMISD y x))
+(Less32F x y) -> (SETGF (UCOMISS y x))
+
+(Leq32 x y) -> (SETLE (CMPL x y))
+(Leq16 x y) -> (SETLE (CMPW x y))
+(Leq8 x y) -> (SETLE (CMPB x y))
+(Leq32U x y) -> (SETBE (CMPL x y))
+(Leq16U x y) -> (SETBE (CMPW x y))
+(Leq8U x y) -> (SETBE (CMPB x y))
+// Use SETGEF with reversed operands to dodge NaN case
+(Leq64F x y) -> (SETGEF (UCOMISD y x))
+(Leq32F x y) -> (SETGEF (UCOMISS y x))
+
+(Greater32 x y) -> (SETG (CMPL x y))
+(Greater16 x y) -> (SETG (CMPW x y))
+(Greater8 x y) -> (SETG (CMPB x y))
+(Greater32U x y) -> (SETA (CMPL x y))
+(Greater16U x y) -> (SETA (CMPW x y))
+(Greater8U x y) -> (SETA (CMPB x y))
+// Note Go assembler gets UCOMISx operand order wrong, but it is right here
+// Bug is accommodated at generation of assembly language.
+(Greater64F x y) -> (SETGF (UCOMISD x y))
+(Greater32F x y) -> (SETGF (UCOMISS x y))
+
+(Geq32 x y) -> (SETGE (CMPL x y))
+(Geq16 x y) -> (SETGE (CMPW x y))
+(Geq8 x y) -> (SETGE (CMPB x y))
+(Geq32U x y) -> (SETAE (CMPL x y))
+(Geq16U x y) -> (SETAE (CMPW x y))
+(Geq8U x y) -> (SETAE (CMPB x y))
+// Note Go assembler gets UCOMISx operand order wrong, but it is right here
+// Bug is accommodated at generation of assembly language.
+(Geq64F x y) -> (SETGEF (UCOMISD x y))
+(Geq32F x y) -> (SETGEF (UCOMISS x y))
+
+(Eq32 x y) -> (SETEQ (CMPL x y))
+(Eq16 x y) -> (SETEQ (CMPW x y))
+(Eq8 x y) -> (SETEQ (CMPB x y))
+(EqB x y) -> (SETEQ (CMPB x y))
+(EqPtr x y) -> (SETEQ (CMPL x y))
+(Eq64F x y) -> (SETEQF (UCOMISD x y))
+(Eq32F x y) -> (SETEQF (UCOMISS x y))
+
+(Neq32 x y) -> (SETNE (CMPL x y))
+(Neq16 x y) -> (SETNE (CMPW x y))
+(Neq8 x y) -> (SETNE (CMPB x y))
+(NeqB x y) -> (SETNE (CMPB x y))
+(NeqPtr x y) -> (SETNE (CMPL x y))
+(Neq64F x y) -> (SETNEF (UCOMISD x y))
+(Neq32F x y) -> (SETNEF (UCOMISS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
+(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
+
+(Store [4] ptr val mem) -> (MOVLstore ptr val mem)
+(Store [2] ptr val mem) -> (MOVWstore ptr val mem)
+(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
+
+// Lowering moves
+(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstore dst (MOVWload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstore dst (MOVLload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
+ (MOVBstore [4] dst (MOVBload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
+ (MOVLstore [3] dst (MOVLload [3] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 ->
+ (MOVLstore [4] dst (MOVLload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Adjust moves to be a multiple of 4 bytes.
+(Move [s] dst src mem)
+ && SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0 ->
+ (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4]
+ (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4])
+ (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4])
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Medium copying uses a duff device.
+(Move [s] dst src mem)
+ && SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0
+ && !config.noDuffDevice ->
+ (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
+// 10 and 128 are magic constants. 10 is the number of bytes to encode:
+// MOVL (SI), CX
+// ADDL $4, SI
+// MOVL CX, (DI)
+// ADDL $4, DI
+// and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy.
+
+// Large copying uses REP MOVSL.
+(Move [s] dst src mem) && (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0 ->
+ (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
+
+// Lowering Zero instructions
+(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstoreconst [0] destptr mem)
+
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
+ (MOVWstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
+ (MOVLstoreconst [makeValAndOff(0,3)] destptr
+ (MOVLstoreconst [0] destptr mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && SizeAndAlign(s).Size()%4 != 0 && SizeAndAlign(s).Size() > 4 ->
+ (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst destptr [SizeAndAlign(s).Size()%4])
+ (MOVLstoreconst [0] destptr mem))
+
+// Zero small numbers of words directly.
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 ->
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 12 ->
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [0] destptr mem)))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 ->
+ (MOVLstoreconst [makeValAndOff(0,12)] destptr
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [0] destptr mem))))
+
+// Medium zeroing uses a duff device.
+(Zero [s] destptr mem)
+ && SizeAndAlign(s).Size() > 16
+ && SizeAndAlign(s).Size() <= 4*128
+ && SizeAndAlign(s).Size()%4 == 0
+ && !config.noDuffDevice ->
+ (DUFFZERO [1*(128-SizeAndAlign(s).Size()/4)] destptr (MOVLconst [0]) mem)
+// 1 and 128 are magic constants. 1 is the number of bytes to encode STOSL.
+// 128 is the number of STOSL instructions in duffzero.
+// See src/runtime/duff_386.s:duffzero.
+
+// Large zeroing uses REP STOSQ.
+(Zero [s] destptr mem)
+ && (SizeAndAlign(s).Size() > 4*128 || (config.noDuffDevice && SizeAndAlign(s).Size() > 16))
+ && SizeAndAlign(s).Size()%4 == 0 ->
+ (REPSTOSL destptr (MOVLconst [SizeAndAlign(s).Size()/4]) (MOVLconst [0]) mem)
+
+// Lowering constants
+(Const8 [val]) -> (MOVLconst [val])
+(Const16 [val]) -> (MOVLconst [val])
+(Const32 [val]) -> (MOVLconst [val])
+(Const32F [val]) -> (MOVSSconst [val])
+(Const64F [val]) -> (MOVSDconst [val])
+(ConstNil) -> (MOVLconst [0])
+(ConstBool [b]) -> (MOVLconst [b])
+
+// Lowering calls
+(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
+(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
+(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
+
+// Miscellaneous
+(Convert <t> x mem) -> (MOVLconvert <t> x mem)
+(IsNonNil p) -> (SETNE (TESTL p p))
+(IsInBounds idx len) -> (SETB (CMPL idx len))
+(IsSliceInBounds idx len) -> (SETBE (CMPL idx len))
+(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
+(GetG mem) -> (LoweredGetG mem)
+(GetClosurePtr) -> (LoweredGetClosurePtr)
+(Addr {sym} base) -> (LEAL {sym} base)
+
+// block rewrites
+(If (SETL cmp) yes no) -> (LT cmp yes no)
+(If (SETLE cmp) yes no) -> (LE cmp yes no)
+(If (SETG cmp) yes no) -> (GT cmp yes no)
+(If (SETGE cmp) yes no) -> (GE cmp yes no)
+(If (SETEQ cmp) yes no) -> (EQ cmp yes no)
+(If (SETNE cmp) yes no) -> (NE cmp yes no)
+(If (SETB cmp) yes no) -> (ULT cmp yes no)
+(If (SETBE cmp) yes no) -> (ULE cmp yes no)
+(If (SETA cmp) yes no) -> (UGT cmp yes no)
+(If (SETAE cmp) yes no) -> (UGE cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(If (SETGF cmp) yes no) -> (UGT cmp yes no)
+(If (SETGEF cmp) yes no) -> (UGE cmp yes no)
+(If (SETEQF cmp) yes no) -> (EQF cmp yes no)
+(If (SETNEF cmp) yes no) -> (NEF cmp yes no)
+
+(If cond yes no) -> (NE (TESTB cond cond) yes no)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) -> (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no)
+
+// fold constants into instructions
+(ADDL x (MOVLconst [c])) -> (ADDLconst [c] x)
+(ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x)
+(ADDLcarry x (MOVLconst [c])) -> (ADDLconstcarry [c] x)
+(ADDLcarry (MOVLconst [c]) x) -> (ADDLconstcarry [c] x)
+(ADCL x (MOVLconst [c]) f) -> (ADCLconst [c] x f)
+(ADCL (MOVLconst [c]) x f) -> (ADCLconst [c] x f)
+
+(SUBL x (MOVLconst [c])) -> (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c]))
+(SUBLcarry x (MOVLconst [c])) -> (SUBLconstcarry [c] x)
+(SBBL x (MOVLconst [c]) f) -> (SBBLconst [c] x f)
+
+(MULL x (MOVLconst [c])) -> (MULLconst [c] x)
+(MULL (MOVLconst [c]) x) -> (MULLconst [c] x)
+
+(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
+(ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x)
+
+(ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
+
+(XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
+
+(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
+
+(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
+(ORL (MOVLconst [c]) x) -> (ORLconst [c] x)
+
+(XORL x (MOVLconst [c])) -> (XORLconst [c] x)
+(XORL (MOVLconst [c]) x) -> (XORLconst [c] x)
+
+(SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
+(SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x)
+
+(SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
+(SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x)
+
+(SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x)
+(SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x)
+
+(SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x)
+(SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x)
+
+(SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
+(SARL x (MOVLconst [c])) -> (SARLconst [c&31] x)
+
+(SARW x (MOVLconst [c])) -> (SARWconst [c&31] x)
+(SARW x (MOVLconst [c])) -> (SARWconst [c&31] x)
+
+(SARB x (MOVLconst [c])) -> (SARBconst [c&31] x)
+(SARB x (MOVLconst [c])) -> (SARBconst [c&31] x)
+
+(SARL x (ANDLconst [31] y)) -> (SARL x y)
+
+(SHLL x (ANDLconst [31] y)) -> (SHLL x y)
+
+(SHRL x (ANDLconst [31] y)) -> (SHRL x y)
+
+(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
+
+(ROLLconst [0] x) -> x
+(ROLWconst [0] x) -> x
+(ROLBconst [0] x) -> x
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHRW x (MOVLconst [24])), but just in case.
+
+(CMPL x (MOVLconst [c])) -> (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))])
+(CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))]))
+(CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))])
+(CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))]))
+
+// strength reduction
+// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
+// 1 - addq, shlq, leaq, negq
+// 3 - imulq
+// This limits the rewrites to two instructions.
+// TODO: 27, 81
+(MULLconst [-1] x) -> (NEGL x)
+(MULLconst [0] _) -> (MOVLconst [0])
+(MULLconst [1] x) -> x
+(MULLconst [3] x) -> (LEAL2 x x)
+(MULLconst [5] x) -> (LEAL4 x x)
+(MULLconst [7] x) -> (LEAL8 (NEGL <v.Type> x) x)
+(MULLconst [9] x) -> (LEAL8 x x)
+(MULLconst [11] x) -> (LEAL2 x (LEAL4 <v.Type> x x))
+(MULLconst [13] x) -> (LEAL4 x (LEAL2 <v.Type> x x))
+(MULLconst [21] x) -> (LEAL4 x (LEAL4 <v.Type> x x))
+(MULLconst [25] x) -> (LEAL8 x (LEAL2 <v.Type> x x))
+(MULLconst [37] x) -> (LEAL4 x (LEAL8 <v.Type> x x))
+(MULLconst [41] x) -> (LEAL8 x (LEAL4 <v.Type> x x))
+(MULLconst [73] x) -> (LEAL8 x (LEAL8 <v.Type> x x))
+
+(MULLconst [c] x) && isPowerOfTwo(c) -> (SHLLconst [log2(c)] x)
+(MULLconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
+(MULLconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
+(MULLconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
+(MULLconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
+(MULLconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
+(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
+(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
+(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
+
+// combine add/shift into LEAL
+(ADDL x (SHLLconst [3] y)) -> (LEAL8 x y)
+(ADDL x (SHLLconst [2] y)) -> (LEAL4 x y)
+(ADDL x (SHLLconst [1] y)) -> (LEAL2 x y)
+(ADDL x (ADDL y y)) -> (LEAL2 x y)
+(ADDL x (ADDL x y)) -> (LEAL2 y x)
+(ADDL x (ADDL y x)) -> (LEAL2 y x)
+
+// combine ADDL/ADDLconst into LEAL1
+(ADDLconst [c] (ADDL x y)) -> (LEAL1 [c] x y)
+(ADDL (ADDLconst [c] x) y) -> (LEAL1 [c] x y)
+(ADDL x (ADDLconst [c] y)) -> (LEAL1 [c] x y)
+
+// fold ADDL into LEAL
+(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
+(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
+(ADDL (LEAL [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y)
+
+// fold ADDLconst into LEALx
+(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(c+d) -> (LEAL1 [c+d] {s} x y)
+(ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(c+d) -> (LEAL2 [c+d] {s} x y)
+(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(c+d) -> (LEAL4 [c+d] {s} x y)
+(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(c+d) -> (LEAL8 [c+d] {s} x y)
+(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL1 [c+d] {s} x y)
+(LEAL1 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAL1 [c+d] {s} x y)
+(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL2 [c+d] {s} x y)
+(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAL2 [c+2*d] {s} x y)
+(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL4 [c+d] {s} x y)
+(LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAL4 [c+4*d] {s} x y)
+(LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL8 [c+d] {s} x y)
+(LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAL8 [c+8*d] {s} x y)
+
+// fold shifts into LEALx
+(LEAL1 [c] {s} x (SHLLconst [1] y)) -> (LEAL2 [c] {s} x y)
+(LEAL1 [c] {s} (SHLLconst [1] x) y) -> (LEAL2 [c] {s} y x)
+(LEAL1 [c] {s} x (SHLLconst [2] y)) -> (LEAL4 [c] {s} x y)
+(LEAL1 [c] {s} (SHLLconst [2] x) y) -> (LEAL4 [c] {s} y x)
+(LEAL1 [c] {s} x (SHLLconst [3] y)) -> (LEAL8 [c] {s} x y)
+(LEAL1 [c] {s} (SHLLconst [3] x) y) -> (LEAL8 [c] {s} y x)
+
+(LEAL2 [c] {s} x (SHLLconst [1] y)) -> (LEAL4 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [2] y)) -> (LEAL8 [c] {s} x y)
+(LEAL4 [c] {s} x (SHLLconst [1] y)) -> (LEAL8 [c] {s} x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) -> (SETG x)
+(SETG (InvertFlags x)) -> (SETL x)
+(SETB (InvertFlags x)) -> (SETA x)
+(SETA (InvertFlags x)) -> (SETB x)
+(SETLE (InvertFlags x)) -> (SETGE x)
+(SETGE (InvertFlags x)) -> (SETLE x)
+(SETBE (InvertFlags x)) -> (SETAE x)
+(SETAE (InvertFlags x)) -> (SETBE x)
+(SETEQ (InvertFlags x)) -> (SETEQ x)
+(SETNE (InvertFlags x)) -> (SETNE x)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+(MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+
+(MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
+(MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
+(MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
+
+// replace load from same location as preceding store with copy
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+// Fold extensions and ANDs together.
+(MOVBLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x)
+(MOVWLZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x)
+(MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x)
+(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x)
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWLSX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBLSX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWLZX x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBLZX x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
+// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
+(MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem)
+(MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem)
+
+(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem)
+(MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem)
+
+// Fold constants into stores.
+(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
+ (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
+ (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) ->
+ (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+
+// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+// Note: we turn off this merging for operations on globals when building
+// position-independent code (when Flag_shared is set).
+// PIC needs a spare register to load the PC into. Having the LEAL be
+// a separate instruction gives us that register. Having the LEAL be
+// a separate instruction also allows it to be CSEd (which is good because
+// it compiles to a thunk call).
+(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+// generating indexed loads and stores
+(MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+
+(MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+(MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+
+(MOVBload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem)
+(MOVWload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem)
+(MOVLload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem)
+(MOVSSload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem)
+(MOVSDload [off] {sym} (ADDL ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem)
+(MOVBstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem)
+(MOVWstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem)
+(MOVLstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem)
+(MOVSSstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
+(MOVSDstore [off] {sym} (ADDL ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
+
+(MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
+ (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
+ (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
+ (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
+ (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+(MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) ->
+ (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+
+(MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
+(MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
+(MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
+
+// combine SHLL into indexed loads and stores
+(MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem)
+(MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem)
+(MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
+(MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
+
+// combine ADDL into indexed loads and stores
+(MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
+
+(MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
+
+(MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+(MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
+(MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+(MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
+(MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
+(MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+(MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
+
+(MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
+(MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
+(MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
+(MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+(MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
+
+(MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
+ (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
+ (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
+ (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
+ (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) ->
+ (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+
+(MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
+ (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
+ (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
+ (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
+(MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
+ (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+(MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) ->
+ (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
+
+// fold LEALs together
+(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// LEAL into LEAL1
+(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB ->
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL1 into LEAL
+(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL into LEAL[248]
+(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB ->
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL[248] into LEAL
+(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+
+// Constant comparisons.
+(CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)<uint16(y) -> (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<int16(y) && uint16(x)>uint16(y) -> (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)<uint16(y) -> (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)<uint8(y) -> (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<int8(y) && uint8(x)>uint8(y) -> (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
+
+// Other known comparisons.
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) -> (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT)
+// TODO: DIVxU also.
+
+// Absorb flag constants into SBB ops.
+(SBBLcarrymask (FlagEQ)) -> (MOVLconst [0])
+(SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1])
+(SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0])
+(SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1])
+(SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0])
+
+// Absorb flag constants into branches.
+(EQ (FlagEQ) yes no) -> (First nil yes no)
+(EQ (FlagLT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagLT_UGT) yes no) -> (First nil no yes)
+(EQ (FlagGT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(NE (FlagEQ) yes no) -> (First nil no yes)
+(NE (FlagLT_ULT) yes no) -> (First nil yes no)
+(NE (FlagLT_UGT) yes no) -> (First nil yes no)
+(NE (FlagGT_ULT) yes no) -> (First nil yes no)
+(NE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(LT (FlagEQ) yes no) -> (First nil no yes)
+(LT (FlagLT_ULT) yes no) -> (First nil yes no)
+(LT (FlagLT_UGT) yes no) -> (First nil yes no)
+(LT (FlagGT_ULT) yes no) -> (First nil no yes)
+(LT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(LE (FlagEQ) yes no) -> (First nil yes no)
+(LE (FlagLT_ULT) yes no) -> (First nil yes no)
+(LE (FlagLT_UGT) yes no) -> (First nil yes no)
+(LE (FlagGT_ULT) yes no) -> (First nil no yes)
+(LE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(GT (FlagEQ) yes no) -> (First nil no yes)
+(GT (FlagLT_ULT) yes no) -> (First nil no yes)
+(GT (FlagLT_UGT) yes no) -> (First nil no yes)
+(GT (FlagGT_ULT) yes no) -> (First nil yes no)
+(GT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(GE (FlagEQ) yes no) -> (First nil yes no)
+(GE (FlagLT_ULT) yes no) -> (First nil no yes)
+(GE (FlagLT_UGT) yes no) -> (First nil no yes)
+(GE (FlagGT_ULT) yes no) -> (First nil yes no)
+(GE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(ULT (FlagEQ) yes no) -> (First nil no yes)
+(ULT (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULT (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(ULE (FlagEQ) yes no) -> (First nil yes no)
+(ULE (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULE (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(UGT (FlagEQ) yes no) -> (First nil no yes)
+(UGT (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGT (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(UGE (FlagEQ) yes no) -> (First nil yes no)
+(UGE (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGE (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+// Absorb flag constants into SETxx ops.
+(SETEQ (FlagEQ)) -> (MOVLconst [1])
+(SETEQ (FlagLT_ULT)) -> (MOVLconst [0])
+(SETEQ (FlagLT_UGT)) -> (MOVLconst [0])
+(SETEQ (FlagGT_ULT)) -> (MOVLconst [0])
+(SETEQ (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETNE (FlagEQ)) -> (MOVLconst [0])
+(SETNE (FlagLT_ULT)) -> (MOVLconst [1])
+(SETNE (FlagLT_UGT)) -> (MOVLconst [1])
+(SETNE (FlagGT_ULT)) -> (MOVLconst [1])
+(SETNE (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETL (FlagEQ)) -> (MOVLconst [0])
+(SETL (FlagLT_ULT)) -> (MOVLconst [1])
+(SETL (FlagLT_UGT)) -> (MOVLconst [1])
+(SETL (FlagGT_ULT)) -> (MOVLconst [0])
+(SETL (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETLE (FlagEQ)) -> (MOVLconst [1])
+(SETLE (FlagLT_ULT)) -> (MOVLconst [1])
+(SETLE (FlagLT_UGT)) -> (MOVLconst [1])
+(SETLE (FlagGT_ULT)) -> (MOVLconst [0])
+(SETLE (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETG (FlagEQ)) -> (MOVLconst [0])
+(SETG (FlagLT_ULT)) -> (MOVLconst [0])
+(SETG (FlagLT_UGT)) -> (MOVLconst [0])
+(SETG (FlagGT_ULT)) -> (MOVLconst [1])
+(SETG (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETGE (FlagEQ)) -> (MOVLconst [1])
+(SETGE (FlagLT_ULT)) -> (MOVLconst [0])
+(SETGE (FlagLT_UGT)) -> (MOVLconst [0])
+(SETGE (FlagGT_ULT)) -> (MOVLconst [1])
+(SETGE (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETB (FlagEQ)) -> (MOVLconst [0])
+(SETB (FlagLT_ULT)) -> (MOVLconst [1])
+(SETB (FlagLT_UGT)) -> (MOVLconst [0])
+(SETB (FlagGT_ULT)) -> (MOVLconst [1])
+(SETB (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETBE (FlagEQ)) -> (MOVLconst [1])
+(SETBE (FlagLT_ULT)) -> (MOVLconst [1])
+(SETBE (FlagLT_UGT)) -> (MOVLconst [0])
+(SETBE (FlagGT_ULT)) -> (MOVLconst [1])
+(SETBE (FlagGT_UGT)) -> (MOVLconst [0])
+
+(SETA (FlagEQ)) -> (MOVLconst [0])
+(SETA (FlagLT_ULT)) -> (MOVLconst [0])
+(SETA (FlagLT_UGT)) -> (MOVLconst [1])
+(SETA (FlagGT_ULT)) -> (MOVLconst [0])
+(SETA (FlagGT_UGT)) -> (MOVLconst [1])
+
+(SETAE (FlagEQ)) -> (MOVLconst [1])
+(SETAE (FlagLT_ULT)) -> (MOVLconst [0])
+(SETAE (FlagLT_UGT)) -> (MOVLconst [1])
+(SETAE (FlagGT_ULT)) -> (MOVLconst [0])
+(SETAE (FlagGT_UGT)) -> (MOVLconst [1])
+
+// Remove redundant *const ops
+(ADDLconst [c] x) && int32(c)==0 -> x
+(SUBLconst [c] x) && int32(c) == 0 -> x
+(ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0])
+(ANDLconst [c] x) && int32(c)==-1 -> x
+(ORLconst [c] x) && int32(c)==0 -> x
+(ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1])
+(XORLconst [c] x) && int32(c)==0 -> x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
+
+// Convert constant subtracts to constant adds
+(SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
+(ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
+(SARLconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
+(SARWconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
+(SARBconst [c] (MOVLconst [d])) -> (MOVLconst [d>>uint64(c)])
+(NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
+(MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
+(ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
+(ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d])
+(XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
+(NOTL (MOVLconst [c])) -> (MOVLconst [^c])
+
+// generic simplifications
+// TODO: more of this
+(ADDL x (NEGL y)) -> (SUBL x y)
+(SUBL x x) -> (MOVLconst [0])
+(ANDL x x) -> x
+(ORL x x) -> x
+(XORL x x) -> (MOVLconst [0])
+
+// checking AND against 0.
+(CMPLconst (ANDL x y) [0]) -> (TESTL x y)
+(CMPWconst (ANDL x y) [0]) -> (TESTW x y)
+(CMPBconst (ANDL x y) [0]) -> (TESTB x y)
+(CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
+(CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
+(CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
+
+// TEST %reg,%reg is shorter than CMP
+(CMPLconst x [0]) -> (TESTL x x)
+(CMPWconst x [0]) -> (TESTW x x)
+(CMPBconst x [0]) -> (TESTB x x)
+
+// Combining byte loads into larger (unaligned) loads.
+// There are many ways these combinations could occur. This is
+// designed to match the way encoding/binary.LittleEndian does it.
+(ORL x0:(MOVBload [i] {s} p mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(s0)
+ -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
+
+(ORL o0:(ORL o1:(ORL
+ x0:(MOVBload [i] {s} p mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
+ s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem)))
+ s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(o0)
+ && clobber(o1)
+ -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
+
+(ORL x0:(MOVBloadidx1 [i] {s} p idx mem)
+ s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(s0)
+ -> @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
+
+(ORL o0:(ORL o1:(ORL
+ x0:(MOVBloadidx1 [i] {s} p idx mem)
+ s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
+ s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))
+ s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && s2.Uses == 1
+ && o0.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0)
+ && clobber(x1)
+ && clobber(x2)
+ && clobber(x3)
+ && clobber(s0)
+ && clobber(s1)
+ && clobber(s2)
+ && clobber(o0)
+ && clobber(o1)
+ -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
+
+// Combine constant stores into larger (unaligned) stores.
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+
+(MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
+(MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
+
+(MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
+
+// Combine stores into larger (unaligned) stores.
+(MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstore [i-1] {s} p w0 mem)
+(MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVLstore [i-2] {s} p w mem)
+(MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVLstore [i-2] {s} p w0 mem)
+
+(MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstoreidx1 [i-1] {s} p idx w mem)
+(MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
+(MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVLstoreidx1 [i-2] {s} p idx w mem)
+(MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
+
+(MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w mem)
+(MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ -> (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w0 mem)
+
+// For PIC, break floating-point constant loading into two instructions so we have
+// a register to use for holding the address of the constant pool entry.
+(MOVSSconst [c]) && config.ctxt.Flag_shared -> (MOVSSconst2 (MOVSSconst1 [c]))
+(MOVSDconst [c]) && config.ctxt.Flag_shared -> (MOVSDconst2 (MOVSDconst1 [c]))
diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go
new file mode 100644
index 0000000000..1013adf4a6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386Ops.go
@@ -0,0 +1,508 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Floating-point types live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+
+// Suffixes encode the bit width of various instructions.
+// L (long word) = 32 bit
+// W (word) = 16 bit
+// B (byte) = 8 bit
+
+// copied from ../../x86/reg.go
+var regNames386 = []string{
+ "AX",
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+
+ // pseudo-registers
+ "SB",
+}
+
+// Notes on 387 support.
+// - The 387 has a weird stack-register setup for floating-point registers.
+// We use these registers when SSE registers are not available (when GO386=387).
+// - We use the same register names (X0-X7) but they refer to the 387
+// floating-point registers. That way, most of the SSA backend is unchanged.
+// - The instruction generation pass maintains an SSE->387 register mapping.
+// This mapping is updated whenever the FP stack is pushed or popped so that
+// we can always find a given SSE register even when the TOS pointer has changed.
+// - To facilitate the mapping from SSE to 387, we enforce that
+// every basic block starts and ends with an empty floating-point stack.
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNames386) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNames386 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ ax = buildReg("AX")
+ cx = buildReg("CX")
+ dx = buildReg("DX")
+ gp = buildReg("AX CX DX BX BP SI DI")
+ fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7")
+ x7 = buildReg("X7")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ callerSave = gp | fp
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{0, gp}}
+ gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{0, gp}}
+ gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax}
+ gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
+
+ readflags = regInfo{inputs: nil, outputs: gponly}
+ flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
+
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
+ gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+
+ fp01 = regInfo{inputs: nil, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp21x7 = regInfo{inputs: []regMask{fp &^ x7, fp &^ x7},
+ clobbers: x7, outputs: []regMask{fp &^ x7}}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
+ )
+
+ var _386ops = []opData{
+ // fp ops
+ {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add
+ {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add
+ {name: "SUBSS", argLength: 2, reg: fp21x7, asm: "SUBSS", resultInArg0: true}, // fp32 sub
+ {name: "SUBSD", argLength: 2, reg: fp21x7, asm: "SUBSD", resultInArg0: true}, // fp64 sub
+ {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul
+ {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul
+ {name: "DIVSS", argLength: 2, reg: fp21x7, asm: "DIVSS", resultInArg0: true}, // fp32 div
+ {name: "DIVSD", argLength: 2, reg: fp21x7, asm: "DIVSD", resultInArg0: true}, // fp64 div
+
+ {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff"}, // fp32 load
+ {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff"}, // fp64 load
+ {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by i
+ {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by 4*i
+ {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by i
+ {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by 8*i
+
+ {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff"}, // fp32 store
+ {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff"}, // fp64 store
+ {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by i store
+ {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by 4i store
+ {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by i store
+ {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by 8i store
+
+ // binary ops
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32", clobberFlags: true}, // arg0 + auxint
+
+ {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates <carry,result> pair
+ {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates <carry,result> pair
+ {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags
+ {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags
+
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+
+ {name: "SUBLcarry", argLength: 2, reg: gp21carry, asm: "SUBL", resultInArg0: true}, // arg0-arg1, generates <borrow,result> pair
+ {name: "SUBLconstcarry", argLength: 1, reg: gp11carry, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0-auxint, generates <borrow,result> pair
+ {name: "SBBL", argLength: 3, reg: gp2carry1, asm: "SBBL", resultInArg0: true, clobberFlags: true}, // arg0-arg1-borrow(arg2), where arg2 is flags
+ {name: "SBBLconst", argLength: 2, reg: gp1carry1, asm: "SBBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0-auxint-borrow(arg1), where arg1 is flags
+
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+
+ {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULW", argLength: 2, reg: gp21hmul, asm: "IMULW", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULB", argLength: 2, reg: gp21hmul, asm: "IMULB", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULWU", argLength: 2, reg: gp21hmul, asm: "MULW", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULBU", argLength: 2, reg: gp21hmul, asm: "MULB", clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "MULLQU", argLength: 2, reg: gp21mul, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1]
+
+ {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1
+
+ {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", clobberFlags: true}, // arg0 % arg1
+ {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1
+
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+
+ {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
+ {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
+
+ {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
+
+ {name: "TESTL", argLength: 2, reg: gp2flags, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTW", argLength: 2, reg: gp2flags, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTB", argLength: 2, reg: gp2flags, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
+ {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
+
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
+ // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
+
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
+
+ // unary ops
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
+
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true, clobberFlags: true}, // ^arg0
+
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "BSFW", argLength: 1, reg: gp11, asm: "BSFW", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+ {name: "BSRW", argLength: 1, reg: gp11, asm: "BSRW", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+
+ {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0)
+
+ {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ // Need different opcodes for floating point conditions because
+ // any comparison involving a NaN is always FALSE and thus
+ // the patterns for inverting conditions cannot be used.
+ {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+
+ {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
+ {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
+
+ {name: "MOVBLSX", argLength: 1, reg: gp11, asm: "MOVBLSX"}, // sign extend arg0 from int8 to int32
+ {name: "MOVBLZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int32
+ {name: "MOVWLSX", argLength: 1, reg: gp11, asm: "MOVWLSX"}, // sign extend arg0 from int16 to int32
+ {name: "MOVWLZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int32
+
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+
+ {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32
+ {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32
+ {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32
+ {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64
+ {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32
+ {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64
+
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+
+ {name: "LEAL", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAL1", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAL2", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAL4", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAL8", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 8*arg1 + auxint + aux
+ // Note: LEAL{1,2,4,8} must not have OpSB as either argument.
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBLSXload", argLength: 2, reg: gpload, asm: "MOVBLSX", aux: "SymOff"}, // ditto, sign extend to int32
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWLSXload", argLength: 2, reg: gpload, asm: "MOVWLSX", aux: "SymOff"}, // ditto, sign extend to int32
+ {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+
+ // indexed loads/stores
+ {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
+ // TODO: sign-extending indexed loads
+ {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ // TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ...
+ {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem"}, // store low 4 bytes of ...
+
+ {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem
+ {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ... arg1 ...
+ {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ... 2*arg1 ...
+ {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem"}, // store low 4 bytes of ... arg1 ...
+ {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem"}, // store low 4 bytes of ... 4*arg1 ...
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = value to store (will always be zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ // Note: CX is only clobbered when dynamic linking.
+ },
+ },
+
+ // arg0 = address of memory to zero
+ // arg1 = # of 4-byte words to zero
+ // arg2 = value to store (will always be zero)
+ // arg3 = mem
+ // returns mem
+ {
+ name: "REPSTOSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ },
+ },
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call newproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = offset from duffcopy symbol to call
+ // returns memory
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI")},
+ clobbers: buildReg("DI SI CX"), // uses CX as a temporary
+ },
+ clobberFlags: true,
+ },
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = # of 8-byte words to copy
+ // arg3 = mem
+ // returns memory
+ {
+ name: "REPMOVSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")},
+ clobbers: buildReg("DI SI CX"),
+ },
+ },
+
+ // (InvertFlags (CMPL a b)) == (CMPL b a)
+ // So if we want (SETL (CMPL a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPL b a))) instead.
+ // Rewrites will convert this to (SETG (CMPL b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of DX (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}},
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true},
+
+ // MOVLconvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ {name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL"},
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned <
+ {name: "FlagGT_ULT"}, // signed > and unsigned >
+
+ // Special op for -x on 387
+ {name: "FCHS", argLength: 1, reg: fp11},
+
+ // Special ops for PIC floating-point constants.
+ // MOVSXconst1 loads the address of the constant-pool entry into a register.
+ // MOVSXconst2 loads the constant from that address.
+ // MOVSXconst1 returns a pointer, but we type it as uint32 because it can never point to the Go heap.
+ {name: "MOVSSconst1", reg: gp01, typ: "UInt32", aux: "Float32"},
+ {name: "MOVSDconst1", reg: gp01, typ: "UInt32", aux: "Float64"},
+ {name: "MOVSSconst2", argLength: 1, reg: gpfp, asm: "MOVSS"},
+ {name: "MOVSDconst2", argLength: 1, reg: gpfp, asm: "MOVSD"},
+ }
+
+ var _386blocks = []blockData{
+ {name: "EQ"},
+ {name: "NE"},
+ {name: "LT"},
+ {name: "LE"},
+ {name: "GT"},
+ {name: "GE"},
+ {name: "ULT"},
+ {name: "ULE"},
+ {name: "UGT"},
+ {name: "UGE"},
+ {name: "EQF"},
+ {name: "NEF"},
+ {name: "ORD"}, // FP, ordered comparison (parity zero)
+ {name: "NAN"}, // FP, unordered comparison (parity one)
+ }
+
+ archs = append(archs, arch{
+ name: "386",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../x86/ssa.go",
+ ops: _386ops,
+ blocks: _386blocks,
+ regnames: regNames386,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["BP"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index d27eff0f6a..aa81ca7aa8 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -4,7 +4,8 @@
// Lowering arithmetic
(Add64 x y) -> (ADDQ x y)
-(AddPtr x y) -> (ADDQ x y)
+(AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y)
+(AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
(Add32 x y) -> (ADDL x y)
(Add16 x y) -> (ADDL x y)
(Add8 x y) -> (ADDL x y)
@@ -12,7 +13,8 @@
(Add64F x y) -> (ADDSD x y)
(Sub64 x y) -> (SUBQ x y)
-(SubPtr x y) -> (SUBQ x y)
+(SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y)
+(SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
(Sub32 x y) -> (SUBL x y)
(Sub16 x y) -> (SUBL x y)
(Sub8 x y) -> (SUBL x y)
@@ -29,14 +31,14 @@
(Div32F x y) -> (DIVSS x y)
(Div64F x y) -> (DIVSD x y)
-(Div64 x y) -> (DIVQ x y)
-(Div64u x y) -> (DIVQU x y)
-(Div32 x y) -> (DIVL x y)
-(Div32u x y) -> (DIVLU x y)
-(Div16 x y) -> (DIVW x y)
-(Div16u x y) -> (DIVWU x y)
-(Div8 x y) -> (DIVW (SignExt8to16 x) (SignExt8to16 y))
-(Div8u x y) -> (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+(Div64 x y) -> (Select0 (DIVQ x y))
+(Div64u x y) -> (Select0 (DIVQU x y))
+(Div32 x y) -> (Select0 (DIVL x y))
+(Div32u x y) -> (Select0 (DIVLU x y))
+(Div16 x y) -> (Select0 (DIVW x y))
+(Div16u x y) -> (Select0 (DIVWU x y))
+(Div8 x y) -> (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
(Hmul64 x y) -> (HMULQ x y)
(Hmul64u x y) -> (HMULQU x y)
@@ -49,14 +51,14 @@
(Avg64u x y) -> (AVGQU x y)
-(Mod64 x y) -> (MODQ x y)
-(Mod64u x y) -> (MODQU x y)
-(Mod32 x y) -> (MODL x y)
-(Mod32u x y) -> (MODLU x y)
-(Mod16 x y) -> (MODW x y)
-(Mod16u x y) -> (MODWU x y)
-(Mod8 x y) -> (MODW (SignExt8to16 x) (SignExt8to16 y))
-(Mod8u x y) -> (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+(Mod64 x y) -> (Select1 (DIVQ x y))
+(Mod64u x y) -> (Select1 (DIVQU x y))
+(Mod32 x y) -> (Select1 (DIVL x y))
+(Mod32u x y) -> (Select1 (DIVLU x y))
+(Mod16 x y) -> (Select1 (DIVW x y))
+(Mod16u x y) -> (Select1 (DIVWU x y))
+(Mod8 x y) -> (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Mod8u x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
(And64 x y) -> (ANDQ x y)
(And32 x y) -> (ANDL x y)
@@ -91,8 +93,9 @@
(Not x) -> (XORLconst [1] x)
// Lowering pointer arithmetic
-(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
-(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
+(OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr)
+(OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr)
+(OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
// Lowering other arithmetic
// TODO: CMPQconst 0 below is redundant because BSF sets Z but how to remove?
@@ -270,7 +273,8 @@
(Eq16 x y) -> (SETEQ (CMPW x y))
(Eq8 x y) -> (SETEQ (CMPB x y))
(EqB x y) -> (SETEQ (CMPB x y))
-(EqPtr x y) -> (SETEQ (CMPQ x y))
+(EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y))
+(EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
(Eq64F x y) -> (SETEQF (UCOMISD x y))
(Eq32F x y) -> (SETEQF (UCOMISS x y))
@@ -279,13 +283,16 @@
(Neq16 x y) -> (SETNE (CMPW x y))
(Neq8 x y) -> (SETNE (CMPB x y))
(NeqB x y) -> (SETNE (CMPB x y))
-(NeqPtr x y) -> (SETNE (CMPQ x y))
+(NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y))
+(NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
(Neq64F x y) -> (SETNEF (UCOMISD x y))
(Neq32F x y) -> (SETNEF (UCOMISS x y))
+(Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32
+
// Lowering loads
-(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
-(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
@@ -302,39 +309,47 @@
(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
// Lowering moves
-(Move [0] _ _ mem) -> mem
-(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
-(Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
-(Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
-(Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem)
-(Move [16] dst src mem) -> (MOVOstore dst (MOVOload src mem) mem)
-(Move [3] dst src mem) ->
+(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstore dst (MOVWload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstore dst (MOVLload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstore dst (MOVQload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 -> (MOVOstore dst (MOVOload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [5] dst src mem) ->
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstore [4] dst (MOVBload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem))
-(Move [6] dst src mem) ->
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 ->
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem))
-(Move [7] dst src mem) ->
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 7 ->
(MOVLstore [3] dst (MOVLload [3] src mem)
(MOVLstore dst (MOVLload src mem) mem))
-(Move [size] dst src mem) && size > 8 && size < 16 ->
- (MOVQstore [size-8] dst (MOVQload [size-8] src mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16 ->
+ (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem)
(MOVQstore dst (MOVQload src mem) mem))
// Adjust moves to be a multiple of 16 bytes.
-(Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 <= 8 ->
- (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16])
+(Move [s] dst src mem)
+ && SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 ->
+ (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
+ (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16])
+ (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVQstore dst (MOVQload src mem) mem))
-(Move [size] dst src mem) && size > 16 && size%16 != 0 && size%16 > 8 ->
- (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16])
+(Move [s] dst src mem)
+ && SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 ->
+ (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
+ (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16])
+ (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16])
(MOVOstore dst (MOVOload src mem) mem))
// Medium copying uses a duff device.
-(Move [size] dst src mem) && size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice ->
- (DUFFCOPY [14*(64-size/16)] dst src mem)
+(Move [s] dst src mem)
+ && SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0
+ && !config.noDuffDevice ->
+ (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
// 14 and 64 are magic constants. 14 is the number of bytes to encode:
// MOVUPS (SI), X0
// ADDQ $16, SI
@@ -343,57 +358,62 @@
// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
// Large copying uses REP MOVSQ.
-(Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 ->
- (REPMOVSQ dst src (MOVQconst [size/8]) mem)
+(Move [s] dst src mem) && (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0 ->
+ (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
// Lowering Zero instructions
-(Zero [0] _ mem) -> mem
-(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
-(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
-(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
-(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
+(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 -> (MOVWstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 -> (MOVLstoreconst [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 -> (MOVQstoreconst [0] destptr mem)
-(Zero [3] destptr mem) ->
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVWstoreconst [0] destptr mem))
-(Zero [5] destptr mem) ->
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 5 ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
-(Zero [6] destptr mem) ->
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 6 ->
(MOVWstoreconst [makeValAndOff(0,4)] destptr
(MOVLstoreconst [0] destptr mem))
-(Zero [7] destptr mem) ->
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 7 ->
(MOVLstoreconst [makeValAndOff(0,3)] destptr
(MOVLstoreconst [0] destptr mem))
// Strip off any fractional word zeroing.
-(Zero [size] destptr mem) && size%8 != 0 && size > 8 ->
- (Zero [size-size%8] (ADDQconst destptr [size%8])
+(Zero [s] destptr mem) && SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 ->
+ (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8])
(MOVQstoreconst [0] destptr mem))
// Zero small numbers of words directly.
-(Zero [16] destptr mem) ->
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 ->
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))
-(Zero [24] destptr mem) ->
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 24 ->
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem)))
-(Zero [32] destptr mem) ->
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 32 ->
(MOVQstoreconst [makeValAndOff(0,24)] destptr
(MOVQstoreconst [makeValAndOff(0,16)] destptr
(MOVQstoreconst [makeValAndOff(0,8)] destptr
(MOVQstoreconst [0] destptr mem))))
// Medium zeroing uses a duff device.
-(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
- (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
-(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
- (DUFFZERO [size] destptr (MOVOconst [0]) mem)
+(Zero [s] destptr mem)
+ && SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0
+ && !config.noDuffDevice ->
+ (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
+(Zero [s] destptr mem)
+ && SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice ->
+ (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
-(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
- (REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
+(Zero [s] destptr mem)
+ && (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32))
+ && SizeAndAlign(s).Size()%8 == 0 ->
+ (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem)
// Lowering constants
(Const8 [val]) -> (MOVLconst [val])
@@ -402,7 +422,8 @@
(Const64 [val]) -> (MOVQconst [val])
(Const32F [val]) -> (MOVSSconst [val])
(Const64F [val]) -> (MOVSDconst [val])
-(ConstNil) -> (MOVQconst [0])
+(ConstNil) && config.PtrSize == 8 -> (MOVQconst [0])
+(ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
(ConstBool [b]) -> (MOVLconst [b])
// Lowering calls
@@ -413,15 +434,17 @@
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// Miscellaneous
-(Convert <t> x mem) -> (MOVQconvert <t> x mem)
-(IsNonNil p) -> (SETNE (TESTQ p p))
+(Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem)
+(Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem)
+(IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p))
+(IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p))
(IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
-(Addr {sym} base) -> (LEAQ {sym} base)
-(ITab (Load ptr mem)) -> (MOVQload ptr mem)
+(Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
+(Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
// block rewrites
(If (SETL cmp) yes no) -> (LT cmp yes no)
@@ -495,6 +518,12 @@
(ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
(ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x)
+(XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
+(XORQconst [c] (XORQconst [d] x)) -> (XORQconst [c ^ d] x)
+
+(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
+(MULQconst [c] (MULQconst [d] x)) -> (MULQconst [c * d] x)
+
(ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
(ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x)
(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
@@ -544,6 +573,16 @@
(SHRL x (ANDLconst [31] y)) -> (SHRL x y)
(SHRQ x (ANDQconst [63] y)) -> (SHRQ x y)
+(ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x)
+(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
+
+(ROLQconst [0] x) -> x
+(ROLLconst [0] x) -> x
+(ROLWconst [0] x) -> x
+(ROLBconst [0] x) -> x
+
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
@@ -1564,3 +1603,53 @@
&& x.Uses == 1
&& clobber(x)
-> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
+
+// amd64p32 rules
+// same as the rules above, but with 32 instead of 64 bit pointer arithmetic.
+// LEAQ,ADDQ -> LEAL,ADDL
+(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
+
+(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem)
+(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
+(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem)
+(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
+ (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index 43cc0eb5b3..07301618f4 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -64,7 +64,6 @@ var regNamesAMD64 = []string{
// pseudo-registers
"SB",
- "FLAGS",
}
func init() {
@@ -98,43 +97,36 @@ func init() {
fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15")
gpsp = gp | buildReg("SP")
gpspsb = gpsp | buildReg("SB")
- flags = buildReg("FLAGS")
- callerSave = gp | fp | flags
+ callerSave = gp | fp
)
// Common slices of register masks
var (
- gponly = []regMask{gp}
- fponly = []regMask{fp}
- flagsonly = []regMask{flags}
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
)
// Common regInfo
var (
- gp01 = regInfo{inputs: []regMask{}, outputs: gponly}
- gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly, clobbers: flags}
- gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly, clobbers: flags}
- gp11nf = regInfo{inputs: []regMask{gpsp}, outputs: gponly} // nf: no flags clobbered
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
- gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly, clobbers: flags}
- gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly, clobbers: flags}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
- gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}, clobbers: flags}
- gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax},
- clobbers: dx | flags}
- gp11hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx},
- clobbers: ax | flags}
- gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx},
- clobbers: ax | flags}
-
- gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}, outputs: flagsonly}
- gp1flags = regInfo{inputs: []regMask{gpsp}, outputs: flagsonly}
- flagsgp = regInfo{inputs: flagsonly, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
// for CMOVconst -- uses AX to hold constant temporary.
- gp1flagsgp = regInfo{inputs: []regMask{gp &^ ax, flags}, clobbers: ax | flags, outputs: []regMask{gp &^ ax}}
+ gp1flagsgp = regInfo{inputs: []regMask{gp &^ ax}, clobbers: ax, outputs: []regMask{gp &^ ax}}
- readflags = regInfo{inputs: flagsonly, outputs: gponly}
- flagsgpax = regInfo{inputs: flagsonly, clobbers: ax | flags, outputs: []regMask{gp &^ ax}}
+ readflags = regInfo{inputs: nil, outputs: gponly}
+ flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
@@ -144,14 +136,14 @@ func init() {
gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
- fp01 = regInfo{inputs: []regMask{}, outputs: fponly}
+ fp01 = regInfo{inputs: nil, outputs: fponly}
fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
fp21x15 = regInfo{inputs: []regMask{fp &^ x15, fp &^ x15},
clobbers: x15, outputs: []regMask{fp &^ x15}}
fpgp = regInfo{inputs: fponly, outputs: gponly}
gpfp = regInfo{inputs: gponly, outputs: fponly}
fp11 = regInfo{inputs: fponly, outputs: fponly}
- fp2flags = regInfo{inputs: []regMask{fp, fp}, outputs: flagsonly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
@@ -188,60 +180,53 @@ func init() {
{name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by 8i store
// binary ops
- {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true}, // arg0 + arg1
- {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1
- {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64"}, // arg0 + auxint
- {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32"}, // arg0 + auxint
-
- {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true}, // arg0 - arg1
- {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1
- {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true}, // arg0 - auxint
- {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0 - auxint
-
- {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true}, // arg0 * arg1
- {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true}, // arg0 * arg1
- {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true}, // arg0 * auxint
- {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true}, // arg0 * auxint
-
- {name: "HMULQ", argLength: 2, reg: gp11hmul, asm: "IMULQ"}, // (arg0 * arg1) >> width
- {name: "HMULL", argLength: 2, reg: gp11hmul, asm: "IMULL"}, // (arg0 * arg1) >> width
- {name: "HMULW", argLength: 2, reg: gp11hmul, asm: "IMULW"}, // (arg0 * arg1) >> width
- {name: "HMULB", argLength: 2, reg: gp11hmul, asm: "IMULB"}, // (arg0 * arg1) >> width
- {name: "HMULQU", argLength: 2, reg: gp11hmul, asm: "MULQ"}, // (arg0 * arg1) >> width
- {name: "HMULLU", argLength: 2, reg: gp11hmul, asm: "MULL"}, // (arg0 * arg1) >> width
- {name: "HMULWU", argLength: 2, reg: gp11hmul, asm: "MULW"}, // (arg0 * arg1) >> width
- {name: "HMULBU", argLength: 2, reg: gp11hmul, asm: "MULB"}, // (arg0 * arg1) >> width
-
- {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits
-
- {name: "DIVQ", argLength: 2, reg: gp11div, asm: "IDIVQ"}, // arg0 / arg1
- {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL"}, // arg0 / arg1
- {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW"}, // arg0 / arg1
- {name: "DIVQU", argLength: 2, reg: gp11div, asm: "DIVQ"}, // arg0 / arg1
- {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL"}, // arg0 / arg1
- {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW"}, // arg0 / arg1
-
- {name: "MODQ", argLength: 2, reg: gp11mod, asm: "IDIVQ"}, // arg0 % arg1
- {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL"}, // arg0 % arg1
- {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW"}, // arg0 % arg1
- {name: "MODQU", argLength: 2, reg: gp11mod, asm: "DIVQ"}, // arg0 % arg1
- {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL"}, // arg0 % arg1
- {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW"}, // arg0 % arg1
-
- {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true}, // arg0 & arg1
- {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true}, // arg0 & arg1
- {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true}, // arg0 & auxint
- {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true}, // arg0 & auxint
-
- {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true}, // arg0 | arg1
- {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true}, // arg0 | arg1
- {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true}, // arg0 | auxint
- {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true}, // arg0 | auxint
-
- {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true}, // arg0 ^ arg1
- {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true}, // arg0 ^ arg1
- {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true}, // arg0 ^ auxint
- {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true}, // arg0 ^ auxint
+ {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint
+
+ {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+
+ {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+
+ {name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULW", argLength: 2, reg: gp21hmul, asm: "IMULW", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULB", argLength: 2, reg: gp21hmul, asm: "IMULB", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULWU", argLength: 2, reg: gp21hmul, asm: "MULW", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULBU", argLength: 2, reg: gp21hmul, asm: "MULB", clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits
+
+ {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+
+ {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+
+ {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+
+ {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
{name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
@@ -264,60 +249,60 @@ func init() {
{name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
{name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
- {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true}, // arg0 << arg1, shift amount is mod 64
- {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true}, // arg0 << arg1, shift amount is mod 32
- {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int64", resultInArg0: true}, // arg0 << auxint, shift amount 0-63
- {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true}, // arg0 << auxint, shift amount 0-31
+ {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
// Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
- {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true}, // unsigned arg0 >> arg1, shift amount is mod 64
- {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true}, // unsigned arg0 >> arg1, shift amount is mod 32
- {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true}, // unsigned arg0 >> arg1, shift amount is mod 32
- {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true}, // unsigned arg0 >> arg1, shift amount is mod 32
- {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int64", resultInArg0: true}, // unsigned arg0 >> auxint, shift amount 0-63
- {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true}, // unsigned arg0 >> auxint, shift amount 0-31
- {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true}, // unsigned arg0 >> auxint, shift amount 0-31
- {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true}, // unsigned arg0 >> auxint, shift amount 0-31
-
- {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true}, // signed arg0 >> arg1, shift amount is mod 64
- {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true}, // signed arg0 >> arg1, shift amount is mod 32
- {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true}, // signed arg0 >> arg1, shift amount is mod 32
- {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true}, // signed arg0 >> arg1, shift amount is mod 32
- {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int64", resultInArg0: true}, // signed arg0 >> auxint, shift amount 0-63
- {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true}, // signed arg0 >> auxint, shift amount 0-31
- {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true}, // signed arg0 >> auxint, shift amount 0-31
- {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true}, // signed arg0 >> auxint, shift amount 0-31
-
- {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int64", resultInArg0: true}, // arg0 rotate left auxint, rotate amount 0-63
- {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true}, // arg0 rotate left auxint, rotate amount 0-31
- {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true}, // arg0 rotate left auxint, rotate amount 0-15
- {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true}, // arg0 rotate left auxint, rotate amount 0-7
+ {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+
+ {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+
+ {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
// unary ops
- {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true}, // -arg0
- {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true}, // -arg0
+ {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true, clobberFlags: true}, // -arg0
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
- {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true}, // ^arg0
- {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0
+ {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true, clobberFlags: true}, // ^arg0
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true, clobberFlags: true}, // ^arg0
- {name: "BSFQ", argLength: 1, reg: gp11, asm: "BSFQ"}, // arg0 # of low-order zeroes ; undef if zero
- {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL"}, // arg0 # of low-order zeroes ; undef if zero
- {name: "BSFW", argLength: 1, reg: gp11, asm: "BSFW"}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "BSFQ", argLength: 1, reg: gp11, asm: "BSFQ", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "BSFW", argLength: 1, reg: gp11, asm: "BSFW", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
- {name: "BSRQ", argLength: 1, reg: gp11, asm: "BSRQ"}, // arg0 # of high-order zeroes ; undef if zero
- {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL"}, // arg0 # of high-order zeroes ; undef if zero
- {name: "BSRW", argLength: 1, reg: gp11, asm: "BSRW"}, // arg0 # of high-order zeroes ; undef if zero
+ {name: "BSRQ", argLength: 1, reg: gp11, asm: "BSRQ", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+ {name: "BSRW", argLength: 1, reg: gp11, asm: "BSRW", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
// Note ASM for ops moves whole register
- {name: "CMOVQEQconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVQEQ", typ: "UInt64", aux: "Int64", resultInArg0: true}, // replace arg0 w/ constant if Z set
- {name: "CMOVLEQconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLEQ", typ: "UInt32", aux: "Int32", resultInArg0: true}, // replace arg0 w/ constant if Z set
- {name: "CMOVWEQconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLEQ", typ: "UInt16", aux: "Int16", resultInArg0: true}, // replace arg0 w/ constant if Z set
- {name: "CMOVQNEconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVQNE", typ: "UInt64", aux: "Int64", resultInArg0: true}, // replace arg0 w/ constant if Z not set
- {name: "CMOVLNEconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLNE", typ: "UInt32", aux: "Int32", resultInArg0: true}, // replace arg0 w/ constant if Z not set
- {name: "CMOVWNEconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLNE", typ: "UInt16", aux: "Int16", resultInArg0: true}, // replace arg0 w/ constant if Z not set
+ {name: "CMOVQEQconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVQEQ", typ: "UInt64", aux: "Int64", resultInArg0: true, clobberFlags: true}, // replace arg0 w/ constant if Z set
+ {name: "CMOVLEQconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLEQ", typ: "UInt32", aux: "Int32", resultInArg0: true, clobberFlags: true}, // replace arg0 w/ constant if Z set
+ {name: "CMOVWEQconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLEQ", typ: "UInt16", aux: "Int16", resultInArg0: true, clobberFlags: true}, // replace arg0 w/ constant if Z set
+ {name: "CMOVQNEconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVQNE", typ: "UInt64", aux: "Int64", resultInArg0: true, clobberFlags: true}, // replace arg0 w/ constant if Z not set
+ {name: "CMOVLNEconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLNE", typ: "UInt32", aux: "Int32", resultInArg0: true, clobberFlags: true}, // replace arg0 w/ constant if Z not set
+ {name: "CMOVWNEconst", argLength: 2, reg: gp1flagsgp, asm: "CMOVLNE", typ: "UInt16", aux: "Int16", resultInArg0: true, clobberFlags: true}, // replace arg0 w/ constant if Z not set
- {name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true}, // arg0 swap bytes
- {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true}, // arg0 swap bytes
+ {name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
{name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0)
@@ -338,20 +323,20 @@ func init() {
// Need different opcodes for floating point conditions because
// any comparison involving a NaN is always FALSE and thus
// the patterns for inverting conditions cannot be used.
- {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ"}, // extract == condition from arg0
- {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE"}, // extract != condition from arg0
- {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
- {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+ {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
{name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
{name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
- {name: "MOVBQSX", argLength: 1, reg: gp11nf, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64
- {name: "MOVBQZX", argLength: 1, reg: gp11nf, asm: "MOVBQZX"}, // zero extend arg0 from int8 to int64
- {name: "MOVWQSX", argLength: 1, reg: gp11nf, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64
- {name: "MOVWQZX", argLength: 1, reg: gp11nf, asm: "MOVWQZX"}, // zero extend arg0 from int16 to int64
- {name: "MOVLQSX", argLength: 1, reg: gp11nf, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64
- {name: "MOVLQZX", argLength: 1, reg: gp11nf, asm: "MOVLQZX"}, // zero extend arg0 from int32 to int64
+ {name: "MOVBQSX", argLength: 1, reg: gp11, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBQZX", argLength: 1, reg: gp11, asm: "MOVBQZX"}, // zero extend arg0 from int8 to int64
+ {name: "MOVWQSX", argLength: 1, reg: gp11, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64
+ {name: "MOVWQZX", argLength: 1, reg: gp11, asm: "MOVWQZX"}, // zero extend arg0 from int16 to int64
+ {name: "MOVLQSX", argLength: 1, reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64
+ {name: "MOVLQZX", argLength: 1, reg: gp11, asm: "MOVLQZX"}, // zero extend arg0 from int32 to int64
{name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
{name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
@@ -369,13 +354,15 @@ func init() {
{name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
- {name: "LEAQ", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux
- {name: "LEAQ1", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + arg1 + auxint + aux
- {name: "LEAQ2", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 2*arg1 + auxint + aux
- {name: "LEAQ4", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 4*arg1 + auxint + aux
- {name: "LEAQ8", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 8*arg1 + auxint + aux
+ {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAQ1", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAQ2", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAQ4", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAQ8", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 8*arg1 + auxint + aux
// Note: LEAQ{1,2,4,8} must not have OpSB as either argument.
+ {name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux
+
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
{name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff"}, // ditto, sign extend to int64
@@ -436,8 +423,9 @@ func init() {
argLength: 3,
reg: regInfo{
inputs: []regMask{buildReg("DI"), buildReg("X0")},
- clobbers: buildReg("DI FLAGS"),
+ clobbers: buildReg("DI"),
},
+ clobberFlags: true,
},
{name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true},
@@ -455,11 +443,11 @@ func init() {
},
},
- {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
- {name: "CALLclosure", argLength: 3, reg: regInfo{[]regMask{gpsp, buildReg("DX"), 0}, callerSave, nil}, aux: "Int64"}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
- {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64"}, // call deferproc. arg0=mem, auxint=argsize, returns mem
- {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64"}, // call newproc. arg0=mem, auxint=argsize, returns mem
- {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64"}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call newproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// arg0 = destination pointer
// arg1 = source pointer
@@ -472,8 +460,9 @@ func init() {
argLength: 3,
reg: regInfo{
inputs: []regMask{buildReg("DI"), buildReg("SI")},
- clobbers: buildReg("DI SI X0 FLAGS"), // uses X0 as a temporary
+ clobbers: buildReg("DI SI X0"), // uses X0 as a temporary
},
+ clobberFlags: true,
},
// arg0 = destination pointer
@@ -504,14 +493,15 @@ func init() {
// use of DX (the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}},
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
- {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}, clobbers: flags}},
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true},
// MOVQconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// gets correctly ordered with respect to GC safepoints.
// arg0=ptr/int arg1=mem, output=int/ptr
- {name: "MOVQconvert", argLength: 2, reg: gp11nf, asm: "MOVQ"},
+ {name: "MOVQconvert", argLength: 2, reg: gp11, asm: "MOVQ"},
+ {name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL"}, // amd64p32 equivalent
// Constant flag values. For any comparison, there are 5 possible
// outcomes: the three from the signed total order (<,==,>) and the
@@ -545,11 +535,14 @@ func init() {
}
archs = append(archs, arch{
- name: "AMD64",
- pkg: "cmd/internal/obj/x86",
- genfile: "../../amd64/ssa.go",
- ops: AMD64ops,
- blocks: AMD64blocks,
- regnames: regNamesAMD64,
+ name: "AMD64",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../amd64/ssa.go",
+ ops: AMD64ops,
+ blocks: AMD64blocks,
+ regnames: regNamesAMD64,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["BP"]),
})
}
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 273500fc38..54d7395d0c 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -2,31 +2,1205 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+(AddPtr x y) -> (ADD x y)
(Add32 x y) -> (ADD x y)
+(Add16 x y) -> (ADD x y)
+(Add8 x y) -> (ADD x y)
+(Add32F x y) -> (ADDF x y)
+(Add64F x y) -> (ADDD x y)
+(Add32carry x y) -> (ADDS x y)
+(Add32withcarry x y c) -> (ADC x y c)
+
+(SubPtr x y) -> (SUB x y)
+(Sub32 x y) -> (SUB x y)
+(Sub16 x y) -> (SUB x y)
+(Sub8 x y) -> (SUB x y)
+(Sub32F x y) -> (SUBF x y)
+(Sub64F x y) -> (SUBD x y)
+
+(Sub32carry x y) -> (SUBS x y)
+(Sub32withcarry x y c) -> (SBC x y c)
+
+(Mul32 x y) -> (MUL x y)
+(Mul16 x y) -> (MUL x y)
+(Mul8 x y) -> (MUL x y)
+(Mul32F x y) -> (MULF x y)
+(Mul64F x y) -> (MULD x y)
+
+(Hmul32 x y) -> (HMUL x y)
+(Hmul32u x y) -> (HMULU x y)
+(Hmul16 x y) -> (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+(Hmul16u x y) -> (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+(Hmul8 x y) -> (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+(Hmul8u x y) -> (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+
+(Mul32uhilo x y) -> (MULLU x y)
+
+(Div32 x y) -> (DIV x y)
+(Div32u x y) -> (DIVU x y)
+(Div16 x y) -> (DIV (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) -> (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) -> (DIV (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) -> (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div32F x y) -> (DIVF x y)
+(Div64F x y) -> (DIVD x y)
+
+(Mod32 x y) -> (MOD x y)
+(Mod32u x y) -> (MODU x y)
+(Mod16 x y) -> (MOD (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) -> (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) -> (MOD (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) -> (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(And32 x y) -> (AND x y)
+(And16 x y) -> (AND x y)
+(And8 x y) -> (AND x y)
+
+(Or32 x y) -> (OR x y)
+(Or16 x y) -> (OR x y)
+(Or8 x y) -> (OR x y)
+
+(Xor32 x y) -> (XOR x y)
+(Xor16 x y) -> (XOR x y)
+(Xor8 x y) -> (XOR x y)
+
+// unary ops
+(Neg32 x) -> (RSBconst [0] x)
+(Neg16 x) -> (RSBconst [0] x)
+(Neg8 x) -> (RSBconst [0] x)
+(Neg32F x) -> (NEGF x)
+(Neg64F x) -> (NEGD x)
+
+(Com32 x) -> (MVN x)
+(Com16 x) -> (MVN x)
+(Com8 x) -> (MVN x)
+
+(Sqrt x) -> (SQRTD x)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB x y) -> (AND x y)
+(OrB x y) -> (OR x y)
+(EqB x y) -> (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+(NeqB x y) -> (XOR x y)
+(Not x) -> (XORconst [1] x)
+
+// shifts
+// hardware instruction uses only the low byte of the shift
+// we compare to 256 to ensure Go semantics for large shifts
+(Lsh32x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh32x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh32x8 x y) -> (SLL x (ZeroExt8to32 y))
+
+(Lsh16x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh16x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh16x8 x y) -> (SLL x (ZeroExt8to32 y))
+
+(Lsh8x32 x y) -> (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh8x16 x y) -> (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh8x8 x y) -> (SLL x (ZeroExt8to32 y))
+
+(Rsh32Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+(Rsh32Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh32Ux8 x y) -> (SRL x (ZeroExt8to32 y))
+
+(Rsh16Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+(Rsh16Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh16Ux8 x y) -> (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8Ux32 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+(Rsh8Ux16 x y) -> (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh8Ux8 x y) -> (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Rsh32x32 x y) -> (SRAcond x y (CMPconst [256] y))
+(Rsh32x16 x y) -> (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh32x8 x y) -> (SRA x (ZeroExt8to32 y))
+
+(Rsh16x32 x y) -> (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+(Rsh16x16 x y) -> (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh16x8 x y) -> (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8x32 x y) -> (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+(Rsh8x16 x y) -> (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh8x8 x y) -> (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLLconst x [c])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAconst x [c])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRLconst x [c])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLLconst x [c])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLLconst x [c])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+
+(Lrot32 x [c]) -> (SRRconst x [32-c&31])
+(Lrot16 <t> x [c]) -> (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+(Lrot8 <t> x [c]) -> (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+
+// constants
+(Const8 [val]) -> (MOVWconst [val])
+(Const16 [val]) -> (MOVWconst [val])
(Const32 [val]) -> (MOVWconst [val])
+(Const32F [val]) -> (MOVFconst [val])
+(Const64F [val]) -> (MOVDconst [val])
+(ConstNil) -> (MOVWconst [0])
+(ConstBool [b]) -> (MOVWconst [b])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 x) -> x
+(Trunc32to8 x) -> x
+(Trunc32to16 x) -> x
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 x) -> (MOVBUreg x)
+(ZeroExt8to32 x) -> (MOVBUreg x)
+(ZeroExt16to32 x) -> (MOVHUreg x)
+
+(SignExt8to16 x) -> (MOVBreg x)
+(SignExt8to32 x) -> (MOVBreg x)
+(SignExt16to32 x) -> (MOVHreg x)
+(Signmask x) -> (SRAconst x [31])
+(Zeromask x) -> (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
+
+// float <-> int conversion
+(Cvt32to32F x) -> (MOVWF x)
+(Cvt32to64F x) -> (MOVWD x)
+(Cvt32Uto32F x) -> (MOVWUF x)
+(Cvt32Uto64F x) -> (MOVWUD x)
+(Cvt32Fto32 x) -> (MOVFW x)
+(Cvt64Fto32 x) -> (MOVDW x)
+(Cvt32Fto32U x) -> (MOVFWU x)
+(Cvt64Fto32U x) -> (MOVDWU x)
+(Cvt32Fto64F x) -> (MOVFD x)
+(Cvt64Fto32F x) -> (MOVDF x)
+
+// comparisons
+(Eq8 x y) -> (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) -> (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) -> (Equal (CMP x y))
+(EqPtr x y) -> (Equal (CMP x y))
+(Eq32F x y) -> (Equal (CMPF x y))
+(Eq64F x y) -> (Equal (CMPD x y))
+
+(Neq8 x y) -> (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) -> (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) -> (NotEqual (CMP x y))
+(NeqPtr x y) -> (NotEqual (CMP x y))
+(Neq32F x y) -> (NotEqual (CMPF x y))
+(Neq64F x y) -> (NotEqual (CMPD x y))
+
+(Less8 x y) -> (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) -> (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
(Less32 x y) -> (LessThan (CMP x y))
+(Less32F x y) -> (GreaterThan (CMPF y x)) // reverse operands to work around NaN
+(Less64F x y) -> (GreaterThan (CMPD y x)) // reverse operands to work around NaN
+
+(Less8U x y) -> (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) -> (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) -> (LessThanU (CMP x y))
+
+(Leq8 x y) -> (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) -> (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) -> (LessEqual (CMP x y))
+(Leq32F x y) -> (GreaterEqual (CMPF y x)) // reverse operands to work around NaN
+(Leq64F x y) -> (GreaterEqual (CMPD y x)) // reverse operands to work around NaN
+
+(Leq8U x y) -> (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) -> (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) -> (LessEqualU (CMP x y))
+
+(Greater8 x y) -> (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Greater16 x y) -> (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Greater32 x y) -> (GreaterThan (CMP x y))
+(Greater32F x y) -> (GreaterThan (CMPF x y))
+(Greater64F x y) -> (GreaterThan (CMPD x y))
+
+(Greater8U x y) -> (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Greater16U x y) -> (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Greater32U x y) -> (GreaterThanU (CMP x y))
+
+(Geq8 x y) -> (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Geq16 x y) -> (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Geq32 x y) -> (GreaterEqual (CMP x y))
+(Geq32F x y) -> (GreaterEqual (CMPF x y))
+(Geq64F x y) -> (GreaterEqual (CMPD x y))
+
+(Geq8U x y) -> (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Geq16U x y) -> (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Geq32U x y) -> (GreaterEqualU (CMP x y))
+
+(OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
+(OffPtr [off] ptr) -> (ADDconst [off] ptr)
+
+(Addr {sym} base) -> (MOVWaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
+
+// stores
+(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
+(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
+(Store [4] ptr val mem) && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
+(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+
+// zero instructions
+(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore ptr (MOVWconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 ->
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 ->
+ (MOVBstore [3] ptr (MOVWconst [0])
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))))
-(OffPtr [off] ptr) -> (ADD (MOVWconst <config.Frontend().TypeInt32()> [off]) ptr)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem)))
-(Addr {sym} base) -> (ADDconst {sym} base)
+// Medium zeroing uses a duff device
+// 4 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] ptr mem)
+ && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
+ && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice ->
+ (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
-(Load <t> ptr mem) && is32BitInt(t) -> (MOVWload ptr mem)
-(Store [4] ptr val mem) -> (MOVWstore ptr val mem)
+// Large zeroing uses a loop
+(Zero [s] ptr mem)
+ && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0 ->
+ (LoweredZero [SizeAndAlign(s).Align()]
+ ptr
+ (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
+ (MOVWconst [0])
+ mem)
+// moves
+(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBUload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 ->
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 ->
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+
+// Medium move uses a duff device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] dst src mem)
+ && SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
+ && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice ->
+ (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
+
+// Large move uses a loop
+(Move [s] dst src mem)
+ && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0 ->
+ (LoweredMove [SizeAndAlign(s).Align()]
+ dst
+ src
+ (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
+ mem)
+
+// calls
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
+(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
+(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
-// Absorb LessThan into blocks.
+// checks
+(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
+(IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) -> (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) -> (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr) -> (LoweredGetClosurePtr)
+(Convert x mem) -> (MOVWconvert x mem)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) -> (EQ cc yes no)
+(If (NotEqual cc) yes no) -> (NE cc yes no)
(If (LessThan cc) yes no) -> (LT cc yes no)
+(If (LessThanU cc) yes no) -> (ULT cc yes no)
+(If (LessEqual cc) yes no) -> (LE cc yes no)
+(If (LessEqualU cc) yes no) -> (ULE cc yes no)
+(If (GreaterThan cc) yes no) -> (GT cc yes no)
+(If (GreaterThanU cc) yes no) -> (UGT cc yes no)
+(If (GreaterEqual cc) yes no) -> (GE cc yes no)
+(If (GreaterEqualU cc) yes no) -> (UGE cc yes no)
+(If cond yes no) -> (NE (CMPconst [0] cond) yes no)
+// Absorb boolean tests into block
+(NE (CMPconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
+(NE (CMPconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
+(NE (CMPconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
+(NE (CMPconst [0] (LessThanU cc)) yes no) -> (ULT cc yes no)
+(NE (CMPconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
+(NE (CMPconst [0] (LessEqualU cc)) yes no) -> (ULE cc yes no)
+(NE (CMPconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
+(NE (CMPconst [0] (GreaterThanU cc)) yes no) -> (UGT cc yes no)
+(NE (CMPconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
+(NE (CMPconst [0] (GreaterEqualU cc)) yes no) -> (UGE cc yes no)
// Optimizations
+// fold offset into address
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) -> (MOVWaddr [off1+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVWload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVFload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVDload [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVDstore [off1+off2] {sym} ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+// replace load from same location as preceding store with copy
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) -> x
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) -> x
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) -> x
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) -> x
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+(MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x
+
+// fold constant into arithmatic ops
(ADD (MOVWconst [c]) x) -> (ADDconst [c] x)
(ADD x (MOVWconst [c])) -> (ADDconst [c] x)
-(MOVWload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
- (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(SUB (MOVWconst [c]) x) -> (RSBconst [c] x)
+(SUB x (MOVWconst [c])) -> (SUBconst [c] x)
+(RSB (MOVWconst [c]) x) -> (SUBconst [c] x)
+(RSB x (MOVWconst [c])) -> (RSBconst [c] x)
+
+(ADDS (MOVWconst [c]) x) -> (ADDSconst [c] x)
+(ADDS x (MOVWconst [c])) -> (ADDSconst [c] x)
+(SUBS (MOVWconst [c]) x) -> (RSBSconst [c] x)
+(SUBS x (MOVWconst [c])) -> (SUBSconst [c] x)
+
+(ADC (MOVWconst [c]) x flags) -> (ADCconst [c] x flags)
+(ADC x (MOVWconst [c]) flags) -> (ADCconst [c] x flags)
+(SBC (MOVWconst [c]) x flags) -> (RSCconst [c] x flags)
+(SBC x (MOVWconst [c]) flags) -> (SBCconst [c] x flags)
+
+(AND (MOVWconst [c]) x) -> (ANDconst [c] x)
+(AND x (MOVWconst [c])) -> (ANDconst [c] x)
+(OR (MOVWconst [c]) x) -> (ORconst [c] x)
+(OR x (MOVWconst [c])) -> (ORconst [c] x)
+(XOR (MOVWconst [c]) x) -> (XORconst [c] x)
+(XOR x (MOVWconst [c])) -> (XORconst [c] x)
+(BIC x (MOVWconst [c])) -> (BICconst [c] x)
+
+(SLL x (MOVWconst [c])) -> (SLLconst x [c&31]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=32)
+(SRL x (MOVWconst [c])) -> (SRLconst x [c&31])
+(SRA x (MOVWconst [c])) -> (SRAconst x [c&31])
+
+(CMP x (MOVWconst [c])) -> (CMPconst [c] x)
+(CMP (MOVWconst [c]) x) -> (InvertFlags (CMPconst [c] x))
+
+// don't extend after proper load
+// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
+(MOVBreg x:(MOVBload _ _)) -> (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) -> (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) -> (MOVWreg x)
+
+// fold extensions and ANDs together
+(MOVBUreg (ANDconst [c] x)) -> (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) -> (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) -> (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) -> (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) -> (MOVWreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVWnop doesn't emit instruction, only for ensuring the type.
+(MOVWreg x) && x.Uses == 1 -> (MOVWnop x)
+
+// mul by constant
+(MUL x (MOVWconst [c])) && int32(c) == -1 -> (RSBconst [0] x)
+(MUL _ (MOVWconst [0])) -> (MOVWconst [0])
+(MUL x (MOVWconst [1])) -> x
+(MUL x (MOVWconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+(MUL x (MOVWconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL x (MOVWconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)])
+(MUL x (MOVWconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVWconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MUL (MOVWconst [c]) x) && int32(c) == -1 -> (RSBconst [0] x)
+(MUL (MOVWconst [0]) _) -> (MOVWconst [0])
+(MUL (MOVWconst [1]) x) -> x
+(MUL (MOVWconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+(MUL (MOVWconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL (MOVWconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)])
+(MUL (MOVWconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL (MOVWconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL (MOVWconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+(MUL (MOVWconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULA x (MOVWconst [c]) a) && int32(c) == -1 -> (SUB a x)
+(MULA _ (MOVWconst [0]) a) -> a
+(MULA x (MOVWconst [1]) a) -> (ADD x a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c) -> (ADD (SLLconst <x.Type> [log2(c)] x) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+(MULA x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULA (MOVWconst [c]) x a) && int32(c) == -1 -> (SUB a x)
+(MULA (MOVWconst [0]) _ a) -> a
+(MULA (MOVWconst [1]) x a) -> (ADD x a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c) -> (ADD (SLLconst <x.Type> [log2(c)] x) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+(MULA (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+
+// div by constant
+(DIVU x (MOVWconst [1])) -> x
+(DIVU x (MOVWconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
+
+// constant comparisons
+(CMPconst (MOVWconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
+(CMPconst (MOVWconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
+(CMPconst (MOVWconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
+(CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
+(CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT)
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT_ULT)
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT)
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT_ULT)
+
+// absorb flag constants into branches
+(EQ (FlagEQ) yes no) -> (First nil yes no)
+(EQ (FlagLT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagLT_UGT) yes no) -> (First nil no yes)
+(EQ (FlagGT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(NE (FlagEQ) yes no) -> (First nil no yes)
+(NE (FlagLT_ULT) yes no) -> (First nil yes no)
+(NE (FlagLT_UGT) yes no) -> (First nil yes no)
+(NE (FlagGT_ULT) yes no) -> (First nil yes no)
+(NE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(LT (FlagEQ) yes no) -> (First nil no yes)
+(LT (FlagLT_ULT) yes no) -> (First nil yes no)
+(LT (FlagLT_UGT) yes no) -> (First nil yes no)
+(LT (FlagGT_ULT) yes no) -> (First nil no yes)
+(LT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(LE (FlagEQ) yes no) -> (First nil yes no)
+(LE (FlagLT_ULT) yes no) -> (First nil yes no)
+(LE (FlagLT_UGT) yes no) -> (First nil yes no)
+(LE (FlagGT_ULT) yes no) -> (First nil no yes)
+(LE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(GT (FlagEQ) yes no) -> (First nil no yes)
+(GT (FlagLT_ULT) yes no) -> (First nil no yes)
+(GT (FlagLT_UGT) yes no) -> (First nil no yes)
+(GT (FlagGT_ULT) yes no) -> (First nil yes no)
+(GT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(GE (FlagEQ) yes no) -> (First nil yes no)
+(GE (FlagLT_ULT) yes no) -> (First nil no yes)
+(GE (FlagLT_UGT) yes no) -> (First nil no yes)
+(GE (FlagGT_ULT) yes no) -> (First nil yes no)
+(GE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(ULT (FlagEQ) yes no) -> (First nil no yes)
+(ULT (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULT (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(ULE (FlagEQ) yes no) -> (First nil yes no)
+(ULE (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULE (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(UGT (FlagEQ) yes no) -> (First nil no yes)
+(UGT (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGT (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(UGE (FlagEQ) yes no) -> (First nil yes no)
+(UGE (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGE (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+
+// absorb flag constants into boolean values
+(Equal (FlagEQ)) -> (MOVWconst [1])
+(Equal (FlagLT_ULT)) -> (MOVWconst [0])
+(Equal (FlagLT_UGT)) -> (MOVWconst [0])
+(Equal (FlagGT_ULT)) -> (MOVWconst [0])
+(Equal (FlagGT_UGT)) -> (MOVWconst [0])
+
+(NotEqual (FlagEQ)) -> (MOVWconst [0])
+(NotEqual (FlagLT_ULT)) -> (MOVWconst [1])
+(NotEqual (FlagLT_UGT)) -> (MOVWconst [1])
+(NotEqual (FlagGT_ULT)) -> (MOVWconst [1])
+(NotEqual (FlagGT_UGT)) -> (MOVWconst [1])
+
+(LessThan (FlagEQ)) -> (MOVWconst [0])
+(LessThan (FlagLT_ULT)) -> (MOVWconst [1])
+(LessThan (FlagLT_UGT)) -> (MOVWconst [1])
+(LessThan (FlagGT_ULT)) -> (MOVWconst [0])
+(LessThan (FlagGT_UGT)) -> (MOVWconst [0])
+
+(LessThanU (FlagEQ)) -> (MOVWconst [0])
+(LessThanU (FlagLT_ULT)) -> (MOVWconst [1])
+(LessThanU (FlagLT_UGT)) -> (MOVWconst [0])
+(LessThanU (FlagGT_ULT)) -> (MOVWconst [1])
+(LessThanU (FlagGT_UGT)) -> (MOVWconst [0])
+
+(LessEqual (FlagEQ)) -> (MOVWconst [1])
+(LessEqual (FlagLT_ULT)) -> (MOVWconst [1])
+(LessEqual (FlagLT_UGT)) -> (MOVWconst [1])
+(LessEqual (FlagGT_ULT)) -> (MOVWconst [0])
+(LessEqual (FlagGT_UGT)) -> (MOVWconst [0])
+
+(LessEqualU (FlagEQ)) -> (MOVWconst [1])
+(LessEqualU (FlagLT_ULT)) -> (MOVWconst [1])
+(LessEqualU (FlagLT_UGT)) -> (MOVWconst [0])
+(LessEqualU (FlagGT_ULT)) -> (MOVWconst [1])
+(LessEqualU (FlagGT_UGT)) -> (MOVWconst [0])
+
+(GreaterThan (FlagEQ)) -> (MOVWconst [0])
+(GreaterThan (FlagLT_ULT)) -> (MOVWconst [0])
+(GreaterThan (FlagLT_UGT)) -> (MOVWconst [0])
+(GreaterThan (FlagGT_ULT)) -> (MOVWconst [1])
+(GreaterThan (FlagGT_UGT)) -> (MOVWconst [1])
+
+(GreaterThanU (FlagEQ)) -> (MOVWconst [0])
+(GreaterThanU (FlagLT_ULT)) -> (MOVWconst [0])
+(GreaterThanU (FlagLT_UGT)) -> (MOVWconst [1])
+(GreaterThanU (FlagGT_ULT)) -> (MOVWconst [0])
+(GreaterThanU (FlagGT_UGT)) -> (MOVWconst [1])
+
+(GreaterEqual (FlagEQ)) -> (MOVWconst [1])
+(GreaterEqual (FlagLT_ULT)) -> (MOVWconst [0])
+(GreaterEqual (FlagLT_UGT)) -> (MOVWconst [0])
+(GreaterEqual (FlagGT_ULT)) -> (MOVWconst [1])
+(GreaterEqual (FlagGT_UGT)) -> (MOVWconst [1])
+
+(GreaterEqualU (FlagEQ)) -> (MOVWconst [1])
+(GreaterEqualU (FlagLT_ULT)) -> (MOVWconst [0])
+(GreaterEqualU (FlagLT_UGT)) -> (MOVWconst [1])
+(GreaterEqualU (FlagGT_ULT)) -> (MOVWconst [0])
+(GreaterEqualU (FlagGT_UGT)) -> (MOVWconst [1])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) -> (Equal x)
+(NotEqual (InvertFlags x)) -> (NotEqual x)
+(LessThan (InvertFlags x)) -> (GreaterThan x)
+(LessThanU (InvertFlags x)) -> (GreaterThanU x)
+(GreaterThan (InvertFlags x)) -> (LessThan x)
+(GreaterThanU (InvertFlags x)) -> (LessThanU x)
+(LessEqual (InvertFlags x)) -> (GreaterEqual x)
+(LessEqualU (InvertFlags x)) -> (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) -> (LessEqual x)
+(GreaterEqualU (InvertFlags x)) -> (LessEqualU x)
+
+// absorb flag constants into conditional instructions
+(CMOVWLSconst _ (FlagEQ) [c]) -> (MOVWconst [c])
+(CMOVWLSconst _ (FlagLT_ULT) [c]) -> (MOVWconst [c])
+(CMOVWLSconst x (FlagLT_UGT)) -> x
+(CMOVWLSconst _ (FlagGT_ULT) [c]) -> (MOVWconst [c])
+(CMOVWLSconst x (FlagGT_UGT)) -> x
+
+(CMOVWHSconst _ (FlagEQ) [c]) -> (MOVWconst [c])
+(CMOVWHSconst x (FlagLT_ULT)) -> x
+(CMOVWHSconst _ (FlagLT_UGT) [c]) -> (MOVWconst [c])
+(CMOVWHSconst x (FlagGT_ULT)) -> x
+(CMOVWHSconst _ (FlagGT_UGT) [c]) -> (MOVWconst [c])
+
+(CMOVWLSconst x (InvertFlags flags) [c]) -> (CMOVWHSconst x flags [c])
+(CMOVWHSconst x (InvertFlags flags) [c]) -> (CMOVWLSconst x flags [c])
+
+(SRAcond x _ (FlagEQ)) -> (SRAconst x [31])
+(SRAcond x y (FlagLT_ULT)) -> (SRA x y)
+(SRAcond x _ (FlagLT_UGT)) -> (SRAconst x [31])
+(SRAcond x y (FlagGT_ULT)) -> (SRA x y)
+(SRAcond x _ (FlagGT_UGT)) -> (SRAconst x [31])
+
+// remove redundant *const ops
+(ADDconst [0] x) -> x
+(SUBconst [0] x) -> x
+(ANDconst [0] _) -> (MOVWconst [0])
+(ANDconst [c] x) && int32(c)==-1 -> x
+(ORconst [0] x) -> x
+(ORconst [c] _) && int32(c)==-1 -> (MOVWconst [-1])
+(XORconst [0] x) -> x
+(BICconst [0] x) -> x
+(BICconst [c] _) && int32(c)==-1 -> (MOVWconst [0])
+
+// generic constant folding
+(ADDconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c+d))])
+(ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x)
+(ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x)
+(ADDconst [c] (RSBconst [d] x)) -> (RSBconst [int64(int32(c+d))] x)
+(ADCconst [c] (ADDconst [d] x) flags) -> (ADCconst [int64(int32(c+d))] x flags)
+(ADCconst [c] (SUBconst [d] x) flags) -> (ADCconst [int64(int32(c-d))] x flags)
+(SUBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d-c))])
+(SUBconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(-c-d))] x)
+(SUBconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(-c+d))] x)
+(SUBconst [c] (RSBconst [d] x)) -> (RSBconst [int64(int32(-c+d))] x)
+(SBCconst [c] (ADDconst [d] x) flags) -> (SBCconst [int64(int32(c-d))] x flags)
+(SBCconst [c] (SUBconst [d] x) flags) -> (SBCconst [int64(int32(c+d))] x flags)
+(RSBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c-d))])
+(RSBconst [c] (RSBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x)
+(RSBconst [c] (ADDconst [d] x)) -> (RSBconst [int64(int32(c-d))] x)
+(RSBconst [c] (SUBconst [d] x)) -> (RSBconst [int64(int32(c+d))] x)
+(RSCconst [c] (ADDconst [d] x) flags) -> (RSCconst [int64(int32(c-d))] x flags)
+(RSCconst [c] (SUBconst [d] x) flags) -> (RSCconst [int64(int32(c+d))] x flags)
+(SLLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(uint32(d)<<uint64(c))])
+(SRLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(uint32(d)>>uint64(c))])
+(SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint64(c))])
+(MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c*d))])
+(MULA (MOVWconst [c]) (MOVWconst [d]) a) -> (ADDconst [int64(int32(c*d))] a)
+(DIV (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c)/int32(d))])
+(DIVU (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(uint32(c)/uint32(d))])
+(ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) -> (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
+(BICconst [c] (MOVWconst [d])) -> (MOVWconst [d&^c])
+(MVN (MOVWconst [c])) -> (MOVWconst [^c])
+(MOVBreg (MOVWconst [c])) -> (MOVWconst [int64(int8(c))])
+(MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))])
+(MOVHreg (MOVWconst [c])) -> (MOVWconst [int64(int16(c))])
+(MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))])
+(MOVWreg (MOVWconst [c])) -> (MOVWconst [c])
+
+// absorb shifts into ops
+(ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c])
+(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c])
+(ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c])
+(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c])
+(ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c])
+(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c])
+(ADD x (SLL y z)) -> (ADDshiftLLreg x y z)
+(ADD (SLL y z) x) -> (ADDshiftLLreg x y z)
+(ADD x (SRL y z)) -> (ADDshiftRLreg x y z)
+(ADD (SRL y z) x) -> (ADDshiftRLreg x y z)
+(ADD x (SRA y z)) -> (ADDshiftRAreg x y z)
+(ADD (SRA y z) x) -> (ADDshiftRAreg x y z)
+(ADC x (SLLconst [c] y) flags) -> (ADCshiftLL x y [c] flags)
+(ADC (SLLconst [c] y) x flags) -> (ADCshiftLL x y [c] flags)
+(ADC x (SRLconst [c] y) flags) -> (ADCshiftRL x y [c] flags)
+(ADC (SRLconst [c] y) x flags) -> (ADCshiftRL x y [c] flags)
+(ADC x (SRAconst [c] y) flags) -> (ADCshiftRA x y [c] flags)
+(ADC (SRAconst [c] y) x flags) -> (ADCshiftRA x y [c] flags)
+(ADC x (SLL y z) flags) -> (ADCshiftLLreg x y z flags)
+(ADC (SLL y z) x flags) -> (ADCshiftLLreg x y z flags)
+(ADC x (SRL y z) flags) -> (ADCshiftRLreg x y z flags)
+(ADC (SRL y z) x flags) -> (ADCshiftRLreg x y z flags)
+(ADC x (SRA y z) flags) -> (ADCshiftRAreg x y z flags)
+(ADC (SRA y z) x flags) -> (ADCshiftRAreg x y z flags)
+(ADDS x (SLLconst [c] y)) -> (ADDSshiftLL x y [c])
+(ADDS (SLLconst [c] y) x) -> (ADDSshiftLL x y [c])
+(ADDS x (SRLconst [c] y)) -> (ADDSshiftRL x y [c])
+(ADDS (SRLconst [c] y) x) -> (ADDSshiftRL x y [c])
+(ADDS x (SRAconst [c] y)) -> (ADDSshiftRA x y [c])
+(ADDS (SRAconst [c] y) x) -> (ADDSshiftRA x y [c])
+(ADDS x (SLL y z)) -> (ADDSshiftLLreg x y z)
+(ADDS (SLL y z) x) -> (ADDSshiftLLreg x y z)
+(ADDS x (SRL y z)) -> (ADDSshiftRLreg x y z)
+(ADDS (SRL y z) x) -> (ADDSshiftRLreg x y z)
+(ADDS x (SRA y z)) -> (ADDSshiftRAreg x y z)
+(ADDS (SRA y z) x) -> (ADDSshiftRAreg x y z)
+(SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c])
+(SUB (SLLconst [c] y) x) -> (RSBshiftLL x y [c])
+(SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c])
+(SUB (SRLconst [c] y) x) -> (RSBshiftRL x y [c])
+(SUB x (SRAconst [c] y)) -> (SUBshiftRA x y [c])
+(SUB (SRAconst [c] y) x) -> (RSBshiftRA x y [c])
+(SUB x (SLL y z)) -> (SUBshiftLLreg x y z)
+(SUB (SLL y z) x) -> (RSBshiftLLreg x y z)
+(SUB x (SRL y z)) -> (SUBshiftRLreg x y z)
+(SUB (SRL y z) x) -> (RSBshiftRLreg x y z)
+(SUB x (SRA y z)) -> (SUBshiftRAreg x y z)
+(SUB (SRA y z) x) -> (RSBshiftRAreg x y z)
+(SBC x (SLLconst [c] y) flags) -> (SBCshiftLL x y [c] flags)
+(SBC (SLLconst [c] y) x flags) -> (RSCshiftLL x y [c] flags)
+(SBC x (SRLconst [c] y) flags) -> (SBCshiftRL x y [c] flags)
+(SBC (SRLconst [c] y) x flags) -> (RSCshiftRL x y [c] flags)
+(SBC x (SRAconst [c] y) flags) -> (SBCshiftRA x y [c] flags)
+(SBC (SRAconst [c] y) x flags) -> (RSCshiftRA x y [c] flags)
+(SBC x (SLL y z) flags) -> (SBCshiftLLreg x y z flags)
+(SBC (SLL y z) x flags) -> (RSCshiftLLreg x y z flags)
+(SBC x (SRL y z) flags) -> (SBCshiftRLreg x y z flags)
+(SBC (SRL y z) x flags) -> (RSCshiftRLreg x y z flags)
+(SBC x (SRA y z) flags) -> (SBCshiftRAreg x y z flags)
+(SBC (SRA y z) x flags) -> (RSCshiftRAreg x y z flags)
+(SUBS x (SLLconst [c] y)) -> (SUBSshiftLL x y [c])
+(SUBS (SLLconst [c] y) x) -> (RSBSshiftLL x y [c])
+(SUBS x (SRLconst [c] y)) -> (SUBSshiftRL x y [c])
+(SUBS (SRLconst [c] y) x) -> (RSBSshiftRL x y [c])
+(SUBS x (SRAconst [c] y)) -> (SUBSshiftRA x y [c])
+(SUBS (SRAconst [c] y) x) -> (RSBSshiftRA x y [c])
+(SUBS x (SLL y z)) -> (SUBSshiftLLreg x y z)
+(SUBS (SLL y z) x) -> (RSBSshiftLLreg x y z)
+(SUBS x (SRL y z)) -> (SUBSshiftRLreg x y z)
+(SUBS (SRL y z) x) -> (RSBSshiftRLreg x y z)
+(SUBS x (SRA y z)) -> (SUBSshiftRAreg x y z)
+(SUBS (SRA y z) x) -> (RSBSshiftRAreg x y z)
+(RSB x (SLLconst [c] y)) -> (RSBshiftLL x y [c])
+(RSB (SLLconst [c] y) x) -> (SUBshiftLL x y [c])
+(RSB x (SRLconst [c] y)) -> (RSBshiftRL x y [c])
+(RSB (SRLconst [c] y) x) -> (SUBshiftRL x y [c])
+(RSB x (SRAconst [c] y)) -> (RSBshiftRA x y [c])
+(RSB (SRAconst [c] y) x) -> (SUBshiftRA x y [c])
+(RSB x (SLL y z)) -> (RSBshiftLLreg x y z)
+(RSB (SLL y z) x) -> (SUBshiftLLreg x y z)
+(RSB x (SRL y z)) -> (RSBshiftRLreg x y z)
+(RSB (SRL y z) x) -> (SUBshiftRLreg x y z)
+(RSB x (SRA y z)) -> (RSBshiftRAreg x y z)
+(RSB (SRA y z) x) -> (SUBshiftRAreg x y z)
+(AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c])
+(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c])
+(AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c])
+(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c])
+(AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c])
+(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c])
+(AND x (SLL y z)) -> (ANDshiftLLreg x y z)
+(AND (SLL y z) x) -> (ANDshiftLLreg x y z)
+(AND x (SRL y z)) -> (ANDshiftRLreg x y z)
+(AND (SRL y z) x) -> (ANDshiftRLreg x y z)
+(AND x (SRA y z)) -> (ANDshiftRAreg x y z)
+(AND (SRA y z) x) -> (ANDshiftRAreg x y z)
+(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c])
+(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c])
+(OR x (SRLconst [c] y)) -> (ORshiftRL x y [c])
+(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c])
+(OR x (SRAconst [c] y)) -> (ORshiftRA x y [c])
+(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c])
+(OR x (SLL y z)) -> (ORshiftLLreg x y z)
+(OR (SLL y z) x) -> (ORshiftLLreg x y z)
+(OR x (SRL y z)) -> (ORshiftRLreg x y z)
+(OR (SRL y z) x) -> (ORshiftRLreg x y z)
+(OR x (SRA y z)) -> (ORshiftRAreg x y z)
+(OR (SRA y z) x) -> (ORshiftRAreg x y z)
+(XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c])
+(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c])
+(XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c])
+(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c])
+(XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c])
+(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c])
+(XOR x (SLL y z)) -> (XORshiftLLreg x y z)
+(XOR (SLL y z) x) -> (XORshiftLLreg x y z)
+(XOR x (SRL y z)) -> (XORshiftRLreg x y z)
+(XOR (SRL y z) x) -> (XORshiftRLreg x y z)
+(XOR x (SRA y z)) -> (XORshiftRAreg x y z)
+(XOR (SRA y z) x) -> (XORshiftRAreg x y z)
+(BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c])
+(BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c])
+(BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c])
+(BIC x (SLL y z)) -> (BICshiftLLreg x y z)
+(BIC x (SRL y z)) -> (BICshiftRLreg x y z)
+(BIC x (SRA y z)) -> (BICshiftRAreg x y z)
+(MVN (SLLconst [c] x)) -> (MVNshiftLL x [c])
+(MVN (SRLconst [c] x)) -> (MVNshiftRL x [c])
+(MVN (SRAconst [c] x)) -> (MVNshiftRA x [c])
+(MVN (SLL x y)) -> (MVNshiftLLreg x y)
+(MVN (SRL x y)) -> (MVNshiftRLreg x y)
+(MVN (SRA x y)) -> (MVNshiftRAreg x y)
+
+(CMP x (SLLconst [c] y)) -> (CMPshiftLL x y [c])
+(CMP (SLLconst [c] y) x) -> (InvertFlags (CMPshiftLL x y [c]))
+(CMP x (SRLconst [c] y)) -> (CMPshiftRL x y [c])
+(CMP (SRLconst [c] y) x) -> (InvertFlags (CMPshiftRL x y [c]))
+(CMP x (SRAconst [c] y)) -> (CMPshiftRA x y [c])
+(CMP (SRAconst [c] y) x) -> (InvertFlags (CMPshiftRA x y [c]))
+(CMP x (SLL y z)) -> (CMPshiftLLreg x y z)
+(CMP (SLL y z) x) -> (InvertFlags (CMPshiftLLreg x y z))
+(CMP x (SRL y z)) -> (CMPshiftRLreg x y z)
+(CMP (SRL y z) x) -> (InvertFlags (CMPshiftRLreg x y z))
+(CMP x (SRA y z)) -> (CMPshiftRAreg x y z)
+(CMP (SRA y z) x) -> (InvertFlags (CMPshiftRAreg x y z))
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVWconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ADCshiftLL (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+(ADCshiftRL (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+(ADCshiftRA (MOVWconst [c]) x [d] flags) -> (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+(ADDSshiftLL (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SLLconst <x.Type> x [d]))
+(ADDSshiftRL (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SRLconst <x.Type> x [d]))
+(ADDSshiftRA (MOVWconst [c]) x [d]) -> (ADDSconst [c] (SRAconst <x.Type> x [d]))
+(SUBshiftLL (MOVWconst [c]) x [d]) -> (RSBconst [c] (SLLconst <x.Type> x [d]))
+(SUBshiftRL (MOVWconst [c]) x [d]) -> (RSBconst [c] (SRLconst <x.Type> x [d]))
+(SUBshiftRA (MOVWconst [c]) x [d]) -> (RSBconst [c] (SRAconst <x.Type> x [d]))
+(SBCshiftLL (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+(SBCshiftRL (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+(SBCshiftRA (MOVWconst [c]) x [d] flags) -> (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+(SUBSshiftLL (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SLLconst <x.Type> x [d]))
+(SUBSshiftRL (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SRLconst <x.Type> x [d]))
+(SUBSshiftRA (MOVWconst [c]) x [d]) -> (RSBSconst [c] (SRAconst <x.Type> x [d]))
+(RSBshiftLL (MOVWconst [c]) x [d]) -> (SUBconst [c] (SLLconst <x.Type> x [d]))
+(RSBshiftRL (MOVWconst [c]) x [d]) -> (SUBconst [c] (SRLconst <x.Type> x [d]))
+(RSBshiftRA (MOVWconst [c]) x [d]) -> (SUBconst [c] (SRAconst <x.Type> x [d]))
+(RSCshiftLL (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+(RSCshiftRL (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+(RSCshiftRA (MOVWconst [c]) x [d] flags) -> (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+(RSBSshiftLL (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SLLconst <x.Type> x [d]))
+(RSBSshiftRL (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SRLconst <x.Type> x [d]))
+(RSBSshiftRA (MOVWconst [c]) x [d]) -> (SUBSconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVWconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVWconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVWconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVWconst [c]) x [d]) -> (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVWconst [c]) x [d]) -> (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVWconst [c]) x [d]) -> (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVWconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVWconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVWconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+
+(ADDshiftLLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SLL <x.Type> x y))
+(ADDshiftRLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRL <x.Type> x y))
+(ADDshiftRAreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRA <x.Type> x y))
+(ADCshiftLLreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SLL <x.Type> x y) flags)
+(ADCshiftRLreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SRL <x.Type> x y) flags)
+(ADCshiftRAreg (MOVWconst [c]) x y flags) -> (ADCconst [c] (SRA <x.Type> x y) flags)
+(ADDSshiftLLreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SLL <x.Type> x y))
+(ADDSshiftRLreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SRL <x.Type> x y))
+(ADDSshiftRAreg (MOVWconst [c]) x y) -> (ADDSconst [c] (SRA <x.Type> x y))
+(SUBshiftLLreg (MOVWconst [c]) x y) -> (RSBconst [c] (SLL <x.Type> x y))
+(SUBshiftRLreg (MOVWconst [c]) x y) -> (RSBconst [c] (SRL <x.Type> x y))
+(SUBshiftRAreg (MOVWconst [c]) x y) -> (RSBconst [c] (SRA <x.Type> x y))
+(SBCshiftLLreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SLL <x.Type> x y) flags)
+(SBCshiftRLreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SRL <x.Type> x y) flags)
+(SBCshiftRAreg (MOVWconst [c]) x y flags) -> (RSCconst [c] (SRA <x.Type> x y) flags)
+(SUBSshiftLLreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SLL <x.Type> x y))
+(SUBSshiftRLreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SRL <x.Type> x y))
+(SUBSshiftRAreg (MOVWconst [c]) x y) -> (RSBSconst [c] (SRA <x.Type> x y))
+(RSBshiftLLreg (MOVWconst [c]) x y) -> (SUBconst [c] (SLL <x.Type> x y))
+(RSBshiftRLreg (MOVWconst [c]) x y) -> (SUBconst [c] (SRL <x.Type> x y))
+(RSBshiftRAreg (MOVWconst [c]) x y) -> (SUBconst [c] (SRA <x.Type> x y))
+(RSCshiftLLreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SLL <x.Type> x y) flags)
+(RSCshiftRLreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SRL <x.Type> x y) flags)
+(RSCshiftRAreg (MOVWconst [c]) x y flags) -> (SBCconst [c] (SRA <x.Type> x y) flags)
+(RSBSshiftLLreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SLL <x.Type> x y))
+(RSBSshiftRLreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SRL <x.Type> x y))
+(RSBSshiftRAreg (MOVWconst [c]) x y) -> (SUBSconst [c] (SRA <x.Type> x y))
+(ANDshiftLLreg (MOVWconst [c]) x y) -> (ANDconst [c] (SLL <x.Type> x y))
+(ANDshiftRLreg (MOVWconst [c]) x y) -> (ANDconst [c] (SRL <x.Type> x y))
+(ANDshiftRAreg (MOVWconst [c]) x y) -> (ANDconst [c] (SRA <x.Type> x y))
+(ORshiftLLreg (MOVWconst [c]) x y) -> (ORconst [c] (SLL <x.Type> x y))
+(ORshiftRLreg (MOVWconst [c]) x y) -> (ORconst [c] (SRL <x.Type> x y))
+(ORshiftRAreg (MOVWconst [c]) x y) -> (ORconst [c] (SRA <x.Type> x y))
+(XORshiftLLreg (MOVWconst [c]) x y) -> (XORconst [c] (SLL <x.Type> x y))
+(XORshiftRLreg (MOVWconst [c]) x y) -> (XORconst [c] (SRL <x.Type> x y))
+(XORshiftRAreg (MOVWconst [c]) x y) -> (XORconst [c] (SRA <x.Type> x y))
+(CMPshiftLLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+(CMPshiftRLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+(CMPshiftRAreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+
+// constant folding in *shift ops
+(ADDshiftLL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)<<uint64(d))])
+(ADDshiftRL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)>>uint64(d))])
+(ADDshiftRA x (MOVWconst [c]) [d]) -> (ADDconst x [int64(int32(c)>>uint64(d))])
+(ADCshiftLL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
+(ADCshiftRL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
+(ADCshiftRA x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(int32(c)>>uint64(d))] flags)
+(ADDSshiftLL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(uint32(c)<<uint64(d))])
+(ADDSshiftRL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(uint32(c)>>uint64(d))])
+(ADDSshiftRA x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(int32(c)>>uint64(d))])
+(SUBshiftLL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(uint32(c)<<uint64(d))])
+(SUBshiftRL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(uint32(c)>>uint64(d))])
+(SUBshiftRA x (MOVWconst [c]) [d]) -> (SUBconst x [int64(int32(c)>>uint64(d))])
+(SBCshiftLL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
+(SBCshiftRL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+(SBCshiftRA x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(int32(c)>>uint64(d))] flags)
+(SUBSshiftLL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(uint32(c)<<uint64(d))])
+(SUBSshiftRL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(uint32(c)>>uint64(d))])
+(SUBSshiftRA x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(int32(c)>>uint64(d))])
+(RSBshiftLL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(uint32(c)<<uint64(d))])
+(RSBshiftRL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(uint32(c)>>uint64(d))])
+(RSBshiftRA x (MOVWconst [c]) [d]) -> (RSBconst x [int64(int32(c)>>uint64(d))])
+(RSCshiftLL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+(RSCshiftRL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+(RSCshiftRA x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+(RSBSshiftLL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(uint32(c)<<uint64(d))])
+(RSBSshiftRL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(uint32(c)>>uint64(d))])
+(RSBSshiftRA x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(int32(c)>>uint64(d))])
+(ANDshiftLL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(uint32(c)<<uint64(d))])
+(ANDshiftRL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(uint32(c)>>uint64(d))])
+(ANDshiftRA x (MOVWconst [c]) [d]) -> (ANDconst x [int64(int32(c)>>uint64(d))])
+(ORshiftLL x (MOVWconst [c]) [d]) -> (ORconst x [int64(uint32(c)<<uint64(d))])
+(ORshiftRL x (MOVWconst [c]) [d]) -> (ORconst x [int64(uint32(c)>>uint64(d))])
+(ORshiftRA x (MOVWconst [c]) [d]) -> (ORconst x [int64(int32(c)>>uint64(d))])
+(XORshiftLL x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)<<uint64(d))])
+(XORshiftRL x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)>>uint64(d))])
+(XORshiftRA x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(c)>>uint64(d))])
+(BICshiftLL x (MOVWconst [c]) [d]) -> (BICconst x [int64(uint32(c)<<uint64(d))])
+(BICshiftRL x (MOVWconst [c]) [d]) -> (BICconst x [int64(uint32(c)>>uint64(d))])
+(BICshiftRA x (MOVWconst [c]) [d]) -> (BICconst x [int64(int32(c)>>uint64(d))])
+(MVNshiftLL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)<<uint64(d))])
+(MVNshiftRL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)>>uint64(d))])
+(MVNshiftRA (MOVWconst [c]) [d]) -> (MOVWconst [^int64(int32(c)>>uint64(d))])
+(CMPshiftLL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)<<uint64(d))])
+(CMPshiftRL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)>>uint64(d))])
+(CMPshiftRA x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(c)>>uint64(d))])
+
+(ADDshiftLLreg x y (MOVWconst [c])) -> (ADDshiftLL x y [c])
+(ADDshiftRLreg x y (MOVWconst [c])) -> (ADDshiftRL x y [c])
+(ADDshiftRAreg x y (MOVWconst [c])) -> (ADDshiftRA x y [c])
+(ADCshiftLLreg x y (MOVWconst [c]) flags) -> (ADCshiftLL x y [c] flags)
+(ADCshiftRLreg x y (MOVWconst [c]) flags) -> (ADCshiftRL x y [c] flags)
+(ADCshiftRAreg x y (MOVWconst [c]) flags) -> (ADCshiftRA x y [c] flags)
+(ADDSshiftLLreg x y (MOVWconst [c])) -> (ADDSshiftLL x y [c])
+(ADDSshiftRLreg x y (MOVWconst [c])) -> (ADDSshiftRL x y [c])
+(ADDSshiftRAreg x y (MOVWconst [c])) -> (ADDSshiftRA x y [c])
+(SUBshiftLLreg x y (MOVWconst [c])) -> (SUBshiftLL x y [c])
+(SUBshiftRLreg x y (MOVWconst [c])) -> (SUBshiftRL x y [c])
+(SUBshiftRAreg x y (MOVWconst [c])) -> (SUBshiftRA x y [c])
+(SBCshiftLLreg x y (MOVWconst [c]) flags) -> (SBCshiftLL x y [c] flags)
+(SBCshiftRLreg x y (MOVWconst [c]) flags) -> (SBCshiftRL x y [c] flags)
+(SBCshiftRAreg x y (MOVWconst [c]) flags) -> (SBCshiftRA x y [c] flags)
+(SUBSshiftLLreg x y (MOVWconst [c])) -> (SUBSshiftLL x y [c])
+(SUBSshiftRLreg x y (MOVWconst [c])) -> (SUBSshiftRL x y [c])
+(SUBSshiftRAreg x y (MOVWconst [c])) -> (SUBSshiftRA x y [c])
+(RSBshiftLLreg x y (MOVWconst [c])) -> (RSBshiftLL x y [c])
+(RSBshiftRLreg x y (MOVWconst [c])) -> (RSBshiftRL x y [c])
+(RSBshiftRAreg x y (MOVWconst [c])) -> (RSBshiftRA x y [c])
+(RSCshiftLLreg x y (MOVWconst [c]) flags) -> (RSCshiftLL x y [c] flags)
+(RSCshiftRLreg x y (MOVWconst [c]) flags) -> (RSCshiftRL x y [c] flags)
+(RSCshiftRAreg x y (MOVWconst [c]) flags) -> (RSCshiftRA x y [c] flags)
+(RSBSshiftLLreg x y (MOVWconst [c])) -> (RSBSshiftLL x y [c])
+(RSBSshiftRLreg x y (MOVWconst [c])) -> (RSBSshiftRL x y [c])
+(RSBSshiftRAreg x y (MOVWconst [c])) -> (RSBSshiftRA x y [c])
+(ANDshiftLLreg x y (MOVWconst [c])) -> (ANDshiftLL x y [c])
+(ANDshiftRLreg x y (MOVWconst [c])) -> (ANDshiftRL x y [c])
+(ANDshiftRAreg x y (MOVWconst [c])) -> (ANDshiftRA x y [c])
+(ORshiftLLreg x y (MOVWconst [c])) -> (ORshiftLL x y [c])
+(ORshiftRLreg x y (MOVWconst [c])) -> (ORshiftRL x y [c])
+(ORshiftRAreg x y (MOVWconst [c])) -> (ORshiftRA x y [c])
+(XORshiftLLreg x y (MOVWconst [c])) -> (XORshiftLL x y [c])
+(XORshiftRLreg x y (MOVWconst [c])) -> (XORshiftRL x y [c])
+(XORshiftRAreg x y (MOVWconst [c])) -> (XORshiftRA x y [c])
+(BICshiftLLreg x y (MOVWconst [c])) -> (BICshiftLL x y [c])
+(BICshiftRLreg x y (MOVWconst [c])) -> (BICshiftRL x y [c])
+(BICshiftRAreg x y (MOVWconst [c])) -> (BICshiftRA x y [c])
+(MVNshiftLLreg x (MOVWconst [c])) -> (MVNshiftLL x [c])
+(MVNshiftRLreg x (MOVWconst [c])) -> (MVNshiftRL x [c])
+(MVNshiftRAreg x (MOVWconst [c])) -> (MVNshiftRA x [c])
+(CMPshiftLLreg x y (MOVWconst [c])) -> (CMPshiftLL x y [c])
+(CMPshiftRLreg x y (MOVWconst [c])) -> (CMPshiftRL x y [c])
+(CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c])
+
+// use indexed loads and stores
+(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem)
+(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem)
+(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem)
+
+// constant folding in indexed loads and stores
+(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
+(MOVWloadidx (MOVWconst [c]) ptr mem) -> (MOVWload [c] ptr mem)
+
+(MOVWstoreidx ptr (MOVWconst [c]) val mem) -> (MOVWstore [c] ptr val mem)
+(MOVWstoreidx (MOVWconst [c]) ptr val mem) -> (MOVWstore [c] ptr val mem)
+
+(MOVWloadidx ptr (SLLconst idx [c]) mem) -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx (SLLconst idx [c]) ptr mem) -> (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx ptr (SRLconst idx [c]) mem) -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx (SRLconst idx [c]) ptr mem) -> (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx ptr (SRAconst idx [c]) mem) -> (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWloadidx (SRAconst idx [c]) ptr mem) -> (MOVWloadshiftRA ptr idx [c] mem)
+
+(MOVWstoreidx ptr (SLLconst idx [c]) val mem) -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx (SLLconst idx [c]) ptr val mem) -> (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRLconst idx [c]) val mem) -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx (SRLconst idx [c]) ptr val mem) -> (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRAconst idx [c]) val mem) -> (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVWstoreidx (SRAconst idx [c]) ptr val mem) -> (MOVWstoreshiftRA ptr idx [c] val mem)
+
+(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
+
+(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
+
+// generic simplifications
+(ADD x (RSBconst [0] y)) -> (SUB x y)
+(ADD (RSBconst [0] y) x) -> (SUB x y)
+(SUB x x) -> (MOVWconst [0])
+(RSB x x) -> (MOVWconst [0])
+(AND x x) -> x
+(OR x x) -> x
+(XOR x x) -> (MOVWconst [0])
+(BIC x x) -> (MOVWconst [0])
+
+(ADD (MUL x y) a) -> (MULA x y a)
+(ADD a (MUL x y)) -> (MULA x y a)
+
+(AND x (MVN y)) -> (BIC x y)
+
+// simplification with *shift ops
+(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(RSBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0])
+(AND x (MVNshiftLL y [c])) -> (BICshiftLL x y [c])
+(AND x (MVNshiftRL y [c])) -> (BICshiftRL x y [c])
+(AND x (MVNshiftRA y [c])) -> (BICshiftRA x y [c])
+
+// floating point optimizations
+(CMPF x (MOVFconst [0])) -> (CMPF0 x)
+(CMPD x (MOVDconst [0])) -> (CMPD0 x)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
new file mode 100644
index 0000000000..bc215c56b4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -0,0 +1,1057 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(AddPtr x y) -> (ADD x y)
+(Add64 x y) -> (ADD x y)
+(Add32 x y) -> (ADD x y)
+(Add16 x y) -> (ADD x y)
+(Add8 x y) -> (ADD x y)
+(Add32F x y) -> (FADDS x y)
+(Add64F x y) -> (FADDD x y)
+
+(SubPtr x y) -> (SUB x y)
+(Sub64 x y) -> (SUB x y)
+(Sub32 x y) -> (SUB x y)
+(Sub16 x y) -> (SUB x y)
+(Sub8 x y) -> (SUB x y)
+(Sub32F x y) -> (FSUBS x y)
+(Sub64F x y) -> (FSUBD x y)
+
+(Mul64 x y) -> (MUL x y)
+(Mul32 x y) -> (MULW x y)
+(Mul16 x y) -> (MULW x y)
+(Mul8 x y) -> (MULW x y)
+(Mul32F x y) -> (FMULS x y)
+(Mul64F x y) -> (FMULD x y)
+
+(Hmul64 x y) -> (MULH x y)
+(Hmul64u x y) -> (UMULH x y)
+(Hmul32 x y) -> (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
+(Hmul32u x y) -> (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
+(Hmul16 x y) -> (SRAconst (MULW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+(Hmul16u x y) -> (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+(Hmul8 x y) -> (SRAconst (MULW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+(Hmul8u x y) -> (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+
+(Div64 x y) -> (DIV x y)
+(Div64u x y) -> (UDIV x y)
+(Div32 x y) -> (DIVW x y)
+(Div32u x y) -> (UDIVW x y)
+(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) -> (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) -> (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div32F x y) -> (FDIVS x y)
+(Div64F x y) -> (FDIVD x y)
+
+(Mod64 x y) -> (MOD x y)
+(Mod64u x y) -> (UMOD x y)
+(Mod32 x y) -> (MODW x y)
+(Mod32u x y) -> (UMODW x y)
+(Mod16 x y) -> (MODW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) -> (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) -> (MODW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) -> (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Avg64u <t> x y) -> (ADD (ADD <t> (SRLconst <t> x [1]) (SRLconst <t> y [1])) (AND <t> (AND <t> x y) (MOVDconst [1])))
+
+(And64 x y) -> (AND x y)
+(And32 x y) -> (AND x y)
+(And16 x y) -> (AND x y)
+(And8 x y) -> (AND x y)
+
+(Or64 x y) -> (OR x y)
+(Or32 x y) -> (OR x y)
+(Or16 x y) -> (OR x y)
+(Or8 x y) -> (OR x y)
+
+(Xor64 x y) -> (XOR x y)
+(Xor32 x y) -> (XOR x y)
+(Xor16 x y) -> (XOR x y)
+(Xor8 x y) -> (XOR x y)
+
+// unary ops
+(Neg64 x) -> (NEG x)
+(Neg32 x) -> (NEG x)
+(Neg16 x) -> (NEG x)
+(Neg8 x) -> (NEG x)
+(Neg32F x) -> (FNEGS x)
+(Neg64F x) -> (FNEGD x)
+
+(Com64 x) -> (MVN x)
+(Com32 x) -> (MVN x)
+(Com16 x) -> (MVN x)
+(Com8 x) -> (MVN x)
+
+(Sqrt x) -> (FSQRTD x)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB x y) -> (AND x y)
+(OrB x y) -> (OR x y)
+(EqB x y) -> (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
+(NeqB x y) -> (XOR x y)
+(Not x) -> (XOR (MOVDconst [1]) x)
+
+// constant shifts
+(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLLconst x [c])
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRAconst x [c])
+(Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRLconst x [c])
+(Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SLLconst x [c])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRAconst (SignExt32to64 x) [c])
+(Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRLconst (ZeroExt32to64 x) [c])
+(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SLLconst x [c])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRAconst (SignExt16to64 x) [c])
+(Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRLconst (ZeroExt16to64 x) [c])
+(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SLLconst x [c])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRAconst (SignExt8to64 x) [c])
+(Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRLconst (ZeroExt8to64 x) [c])
+
+// large constant shifts
+(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
+(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
+(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
+(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
+(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
+(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
+(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
+(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRAconst x [63])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAconst (SignExt32to64 x) [63])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAconst (SignExt16to64 x) [63])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 -> (SRAconst (SignExt8to64 x) [63])
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+(Lsh64x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh64x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh32x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh16x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) -> (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh8x32 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) -> (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) -> (CSELULT (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh64Ux32 <t> x y) -> (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) -> (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) -> (CSELULT (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh32Ux32 <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) -> (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh16Ux32 <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) -> (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh8Ux32 <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) -> (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64x64 x y) -> (SRA x (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh64x32 x y) -> (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh64x16 x y) -> (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh64x8 x y) -> (SRA x (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh32x64 x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh32x32 x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh32x16 x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh32x8 x y) -> (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh16x64 x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh16x32 x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh16x16 x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh16x8 x y) -> (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh8x64 x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh8x32 x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh8x16 x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh8x8 x y) -> (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Lrot64 x [c]) -> (RORconst x [64-c&63])
+(Lrot32 x [c]) -> (RORWconst x [32-c&31])
+(Lrot16 <t> x [c]) -> (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
+(Lrot8 <t> x [c]) -> (OR (SLLconst <t> x [c&7]) (SRLconst <t> (ZeroExt8to64 x) [8-c&7]))
+
+// constants
+(Const64 [val]) -> (MOVDconst [val])
+(Const32 [val]) -> (MOVDconst [val])
+(Const16 [val]) -> (MOVDconst [val])
+(Const8 [val]) -> (MOVDconst [val])
+(Const32F [val]) -> (FMOVSconst [val])
+(Const64F [val]) -> (FMOVDconst [val])
+(ConstNil) -> (MOVDconst [0])
+(ConstBool [b]) -> (MOVDconst [b])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 x) -> x
+(Trunc32to8 x) -> x
+(Trunc32to16 x) -> x
+(Trunc64to8 x) -> x
+(Trunc64to16 x) -> x
+(Trunc64to32 x) -> x
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 x) -> (MOVBUreg x)
+(ZeroExt8to32 x) -> (MOVBUreg x)
+(ZeroExt16to32 x) -> (MOVHUreg x)
+(ZeroExt8to64 x) -> (MOVBUreg x)
+(ZeroExt16to64 x) -> (MOVHUreg x)
+(ZeroExt32to64 x) -> (MOVWUreg x)
+
+(SignExt8to16 x) -> (MOVBreg x)
+(SignExt8to32 x) -> (MOVBreg x)
+(SignExt16to32 x) -> (MOVHreg x)
+(SignExt8to64 x) -> (MOVBreg x)
+(SignExt16to64 x) -> (MOVHreg x)
+(SignExt32to64 x) -> (MOVWreg x)
+
+// float <-> int conversion
+(Cvt32to32F x) -> (SCVTFWS x)
+(Cvt32to64F x) -> (SCVTFWD x)
+(Cvt64to32F x) -> (SCVTFS x)
+(Cvt64to64F x) -> (SCVTFD x)
+(Cvt32Uto32F x) -> (UCVTFWS x)
+(Cvt32Uto64F x) -> (UCVTFWD x)
+//(Cvt64Uto32F x) -> (UCVTFS x)
+//(Cvt64Uto64F x) -> (UCVTFD x)
+(Cvt32Fto32 x) -> (FCVTZSSW x)
+(Cvt64Fto32 x) -> (FCVTZSDW x)
+(Cvt32Fto64 x) -> (FCVTZSS x)
+(Cvt64Fto64 x) -> (FCVTZSD x)
+(Cvt32Fto32U x) -> (FCVTZUSW x)
+(Cvt64Fto32U x) -> (FCVTZUDW x)
+//(Cvt32Fto64U x) -> (FCVTZUS x)
+//(Cvt64Fto64U x) -> (FCVTZUD x)
+(Cvt32Fto64F x) -> (FCVTSD x)
+(Cvt64Fto32F x) -> (FCVTDS x)
+
+// comparisons
+(Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) -> (Equal (CMPW x y))
+(Eq64 x y) -> (Equal (CMP x y))
+(EqPtr x y) -> (Equal (CMP x y))
+(Eq32F x y) -> (Equal (FCMPS x y))
+(Eq64F x y) -> (Equal (FCMPD x y))
+
+(Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) -> (NotEqual (CMPW x y))
+(Neq64 x y) -> (NotEqual (CMP x y))
+(NeqPtr x y) -> (NotEqual (CMP x y))
+(Neq32F x y) -> (NotEqual (FCMPS x y))
+(Neq64F x y) -> (NotEqual (FCMPD x y))
+
+(Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) -> (LessThan (CMPW x y))
+(Less64 x y) -> (LessThan (CMP x y))
+(Less32F x y) -> (GreaterThan (FCMPS y x)) // reverse operands to work around NaN
+(Less64F x y) -> (GreaterThan (FCMPD y x)) // reverse operands to work around NaN
+
+(Less8U x y) -> (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) -> (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) -> (LessThanU (CMPW x y))
+(Less64U x y) -> (LessThanU (CMP x y))
+
+(Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) -> (LessEqual (CMPW x y))
+(Leq64 x y) -> (LessEqual (CMP x y))
+(Leq32F x y) -> (GreaterEqual (FCMPS y x)) // reverse operands to work around NaN
+(Leq64F x y) -> (GreaterEqual (FCMPD y x)) // reverse operands to work around NaN
+
+(Leq8U x y) -> (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) -> (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) -> (LessEqualU (CMPW x y))
+(Leq64U x y) -> (LessEqualU (CMP x y))
+
+(Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Greater32 x y) -> (GreaterThan (CMPW x y))
+(Greater64 x y) -> (GreaterThan (CMP x y))
+(Greater32F x y) -> (GreaterThan (FCMPS x y))
+(Greater64F x y) -> (GreaterThan (FCMPD x y))
+
+(Greater8U x y) -> (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Greater16U x y) -> (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Greater32U x y) -> (GreaterThanU (CMPW x y))
+(Greater64U x y) -> (GreaterThanU (CMP x y))
+
+(Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Geq32 x y) -> (GreaterEqual (CMPW x y))
+(Geq64 x y) -> (GreaterEqual (CMP x y))
+(Geq32F x y) -> (GreaterEqual (FCMPS x y))
+(Geq64F x y) -> (GreaterEqual (FCMPD x y))
+
+(Geq8U x y) -> (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Geq16U x y) -> (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Geq32U x y) -> (GreaterEqualU (CMPW x y))
+(Geq64U x y) -> (GreaterEqualU (CMP x y))
+
+(OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr)
+(OffPtr [off] ptr) -> (ADDconst [off] ptr)
+
+(Addr {sym} base) -> (MOVDaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
+
+// stores
+(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
+(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
+(Store [4] ptr val mem) && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store [8] ptr val mem) && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+
+// zeroing
+(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore ptr (MOVDconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore ptr (MOVDconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 2 ->
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore ptr (MOVDconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 4 ->
+ (MOVBstore [3] ptr (MOVDconst [0])
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem))))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstore ptr (MOVDconst [0]) mem)
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [6] ptr (MOVDconst [0])
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))))
+
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem)))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem)))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVWstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem)))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [s] ptr mem) && SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstore [16] ptr (MOVDconst [0])
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+
+// medium zeroing uses a duff device
+// 4, 8, and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] ptr mem)
+ && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128
+ && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice ->
+ (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
+
+// large or unaligned zeroing uses a loop
+(Zero [s] ptr mem)
+ && (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0 ->
+ (LoweredZero [SizeAndAlign(s).Align()]
+ ptr
+ (ADDconst <ptr.Type> [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)] ptr)
+ mem)
+
+// moves
+(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBUload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 ->
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore dst (MOVWUload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 ->
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore [4] dst (MOVWUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [6] dst (MOVHUload [6] src mem)
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))))
+
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore [8] dst (MOVWUload [8] src mem)
+ (MOVWstore [4] dst (MOVWUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem)))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+
+// large or unaligned move uses a loop
+// DUFFCOPY is not implemented on ARM64 (TODO)
+(Move [s] dst src mem)
+ && SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0 ->
+ (LoweredMove [SizeAndAlign(s).Align()]
+ dst
+ src
+ (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
+ mem)
+
+// calls
+(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
+(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
+(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
+
+// checks
+(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
+(IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) -> (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) -> (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr) -> (LoweredGetClosurePtr)
+(Convert x mem) -> (MOVDconvert x mem)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) -> (EQ cc yes no)
+(If (NotEqual cc) yes no) -> (NE cc yes no)
+(If (LessThan cc) yes no) -> (LT cc yes no)
+(If (LessThanU cc) yes no) -> (ULT cc yes no)
+(If (LessEqual cc) yes no) -> (LE cc yes no)
+(If (LessEqualU cc) yes no) -> (ULE cc yes no)
+(If (GreaterThan cc) yes no) -> (GT cc yes no)
+(If (GreaterThanU cc) yes no) -> (UGT cc yes no)
+(If (GreaterEqual cc) yes no) -> (GE cc yes no)
+(If (GreaterEqualU cc) yes no) -> (UGE cc yes no)
+
+(If cond yes no) -> (NE (CMPconst [0] cond) yes no)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (CMPconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
+(NE (CMPconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
+(NE (CMPconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
+(NE (CMPconst [0] (LessThanU cc)) yes no) -> (ULT cc yes no)
+(NE (CMPconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
+(NE (CMPconst [0] (LessEqualU cc)) yes no) -> (ULE cc yes no)
+(NE (CMPconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
+(NE (CMPconst [0] (GreaterThanU cc)) yes no) -> (UGT cc yes no)
+(NE (CMPconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
+(NE (CMPconst [0] (GreaterEqualU cc)) yes no) -> (UGE cc yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVWload [off1+off2] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVWUload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVDload [off1+off2] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) -> (FMOVSload [off1+off2] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) -> (FMOVDload [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVDstore [off1+off2] {sym} ptr val mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVDstorezero [off1+off2] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
+
+// replace load from same location as preceding store with copy
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+(MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
+(MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
+(MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
+(MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
+(MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
+(MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
+(MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDconst [0])
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) -> (MOVDreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) -> (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) -> (MOVDreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVDnop doesn't emit instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 -> (MOVDnop x)
+
+// fold constant into arithmatic ops
+(ADD (MOVDconst [c]) x) -> (ADDconst [c] x)
+(ADD x (MOVDconst [c])) -> (ADDconst [c] x)
+(SUB x (MOVDconst [c])) -> (SUBconst [c] x)
+(AND (MOVDconst [c]) x) -> (ANDconst [c] x)
+(AND x (MOVDconst [c])) -> (ANDconst [c] x)
+(OR (MOVDconst [c]) x) -> (ORconst [c] x)
+(OR x (MOVDconst [c])) -> (ORconst [c] x)
+(XOR (MOVDconst [c]) x) -> (XORconst [c] x)
+(XOR x (MOVDconst [c])) -> (XORconst [c] x)
+(BIC x (MOVDconst [c])) -> (BICconst [c] x)
+
+(SLL x (MOVDconst [c])) -> (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64)
+(SRL x (MOVDconst [c])) -> (SRLconst x [c&63])
+(SRA x (MOVDconst [c])) -> (SRAconst x [c&63])
+
+(CMP x (MOVDconst [c])) -> (CMPconst [c] x)
+(CMP (MOVDconst [c]) x) -> (InvertFlags (CMPconst [c] x))
+(CMPW x (MOVDconst [c])) -> (CMPWconst [int64(int32(c))] x)
+(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst [int64(int32(c))] x))
+
+// mul by constant
+(MUL x (MOVDconst [-1])) -> (NEG x)
+(MUL _ (MOVDconst [0])) -> (MOVDconst [0])
+(MUL x (MOVDconst [1])) -> x
+(MUL x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+(MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MUL (MOVDconst [-1]) x) -> (NEG x)
+(MUL (MOVDconst [0]) _) -> (MOVDconst [0])
+(MUL (MOVDconst [1]) x) -> x
+(MUL (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+(MUL (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+(MUL (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MUL (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+(MUL (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MUL (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULW x (MOVDconst [c])) && int32(c)==-1 -> (NEG x)
+(MULW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0])
+(MULW x (MOVDconst [c])) && int32(c)==1 -> x
+(MULW x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULW (MOVDconst [c]) x) && int32(c)==-1 -> (NEG x)
+(MULW (MOVDconst [c]) _) && int32(c)==0 -> (MOVDconst [0])
+(MULW (MOVDconst [c]) x) && int32(c)==1 -> x
+(MULW (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x)
+(MULW (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)])
+(MULW (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+(MULW (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MULW (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MULW (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MULW (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+// div by constant
+(UDIV x (MOVDconst [1])) -> x
+(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 -> x
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (SRLconst [log2(c)] x)
+(UMOD _ (MOVDconst [1])) -> (MOVDconst [0])
+(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) -> (ANDconst [c-1] x)
+(UMODW _ (MOVDconst [c])) && uint32(c)==1 -> (MOVDconst [0])
+(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (ANDconst [c-1] x)
+
+// generic simplifications
+(ADD x (NEG y)) -> (SUB x y)
+(ADD (NEG y) x) -> (SUB x y)
+(SUB x x) -> (MOVDconst [0])
+(AND x x) -> x
+(OR x x) -> x
+(XOR x x) -> (MOVDconst [0])
+(BIC x x) -> (MOVDconst [0])
+(AND x (MVN y)) -> (BIC x y)
+(CSELULT x (MOVDconst [0]) flag) -> (CSELULT0 x flag)
+
+// remove redundant *const ops
+(ADDconst [0] x) -> x
+(SUBconst [0] x) -> x
+(ANDconst [0] _) -> (MOVDconst [0])
+(ANDconst [-1] x) -> x
+(ORconst [0] x) -> x
+(ORconst [-1] _) -> (MOVDconst [-1])
+(XORconst [0] x) -> x
+(XORconst [-1] x) -> (MVN x)
+(BICconst [0] x) -> x
+(BICconst [-1] _) -> (MOVDconst [0])
+
+// generic constant folding
+(ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) -> (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) -> (ADDconst [c-d] x)
+(SUBconst [c] (MOVDconst [d])) -> (MOVDconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) -> (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) -> (ADDconst [-c+d] x)
+(SLLconst [c] (MOVDconst [d])) -> (MOVDconst [int64(d)<<uint64(c)])
+(SRLconst [c] (MOVDconst [d])) -> (MOVDconst [int64(uint64(d)>>uint64(c))])
+(SRAconst [c] (MOVDconst [d])) -> (MOVDconst [int64(d)>>uint64(c)])
+(MUL (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c*d])
+(MULW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)*int32(d))])
+(DIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)/int64(d)])
+(UDIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)/uint64(d))])
+(DIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)/int32(d))])
+(UDIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)/uint32(d))])
+(MOD (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(c)%int64(d)])
+(UMOD (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)%uint64(d))])
+(MODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)%int32(d))])
+(UMODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVDconst [d])) -> (MOVDconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
+(ORconst [c] (MOVDconst [d])) -> (MOVDconst [c|d])
+(ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
+(XORconst [c] (MOVDconst [d])) -> (MOVDconst [c^d])
+(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
+(BICconst [c] (MOVDconst [d])) -> (MOVDconst [d&^c])
+(MVN (MOVDconst [c])) -> (MOVDconst [^c])
+(NEG (MOVDconst [c])) -> (MOVDconst [-c])
+(MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
+(MOVBUreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
+(MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
+(MOVHUreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
+(MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
+(MOVWUreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
+(MOVDreg (MOVDconst [c])) -> (MOVDconst [c])
+
+// constant comparisons
+(CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)<uint64(y) -> (FlagLT_ULT)
+(CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y) && uint64(x)>uint64(y) -> (FlagLT_UGT)
+(CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)<uint64(y) -> (FlagGT_ULT)
+(CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y) && uint64(x)>uint64(y) -> (FlagGT_UGT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) && uint32(x)>uint32(y) -> (FlagLT_UGT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)<uint32(y) -> (FlagGT_ULT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT)
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT)
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT_ULT)
+(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c -> (FlagLT_ULT)
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT)
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) -> (FlagLT_ULT)
+(CMPWconst (MOVBUreg _) [c]) && 0xff < int32(c) -> (FlagLT_ULT)
+(CMPWconst (MOVHUreg _) [c]) && 0xffff < int32(c) -> (FlagLT_ULT)
+
+// absorb flag constants into branches
+(EQ (FlagEQ) yes no) -> (First nil yes no)
+(EQ (FlagLT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagLT_UGT) yes no) -> (First nil no yes)
+(EQ (FlagGT_ULT) yes no) -> (First nil no yes)
+(EQ (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(NE (FlagEQ) yes no) -> (First nil no yes)
+(NE (FlagLT_ULT) yes no) -> (First nil yes no)
+(NE (FlagLT_UGT) yes no) -> (First nil yes no)
+(NE (FlagGT_ULT) yes no) -> (First nil yes no)
+(NE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(LT (FlagEQ) yes no) -> (First nil no yes)
+(LT (FlagLT_ULT) yes no) -> (First nil yes no)
+(LT (FlagLT_UGT) yes no) -> (First nil yes no)
+(LT (FlagGT_ULT) yes no) -> (First nil no yes)
+(LT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(LE (FlagEQ) yes no) -> (First nil yes no)
+(LE (FlagLT_ULT) yes no) -> (First nil yes no)
+(LE (FlagLT_UGT) yes no) -> (First nil yes no)
+(LE (FlagGT_ULT) yes no) -> (First nil no yes)
+(LE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(GT (FlagEQ) yes no) -> (First nil no yes)
+(GT (FlagLT_ULT) yes no) -> (First nil no yes)
+(GT (FlagLT_UGT) yes no) -> (First nil no yes)
+(GT (FlagGT_ULT) yes no) -> (First nil yes no)
+(GT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(GE (FlagEQ) yes no) -> (First nil yes no)
+(GE (FlagLT_ULT) yes no) -> (First nil no yes)
+(GE (FlagLT_UGT) yes no) -> (First nil no yes)
+(GE (FlagGT_ULT) yes no) -> (First nil yes no)
+(GE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(ULT (FlagEQ) yes no) -> (First nil no yes)
+(ULT (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULT (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULT (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(ULE (FlagEQ) yes no) -> (First nil yes no)
+(ULE (FlagLT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagLT_UGT) yes no) -> (First nil no yes)
+(ULE (FlagGT_ULT) yes no) -> (First nil yes no)
+(ULE (FlagGT_UGT) yes no) -> (First nil no yes)
+
+(UGT (FlagEQ) yes no) -> (First nil no yes)
+(UGT (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGT (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGT (FlagGT_UGT) yes no) -> (First nil yes no)
+
+(UGE (FlagEQ) yes no) -> (First nil yes no)
+(UGE (FlagLT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagLT_UGT) yes no) -> (First nil yes no)
+(UGE (FlagGT_ULT) yes no) -> (First nil no yes)
+(UGE (FlagGT_UGT) yes no) -> (First nil yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+
+// absorb flag constants into boolean values
+(Equal (FlagEQ)) -> (MOVDconst [1])
+(Equal (FlagLT_ULT)) -> (MOVDconst [0])
+(Equal (FlagLT_UGT)) -> (MOVDconst [0])
+(Equal (FlagGT_ULT)) -> (MOVDconst [0])
+(Equal (FlagGT_UGT)) -> (MOVDconst [0])
+
+(NotEqual (FlagEQ)) -> (MOVDconst [0])
+(NotEqual (FlagLT_ULT)) -> (MOVDconst [1])
+(NotEqual (FlagLT_UGT)) -> (MOVDconst [1])
+(NotEqual (FlagGT_ULT)) -> (MOVDconst [1])
+(NotEqual (FlagGT_UGT)) -> (MOVDconst [1])
+
+(LessThan (FlagEQ)) -> (MOVDconst [0])
+(LessThan (FlagLT_ULT)) -> (MOVDconst [1])
+(LessThan (FlagLT_UGT)) -> (MOVDconst [1])
+(LessThan (FlagGT_ULT)) -> (MOVDconst [0])
+(LessThan (FlagGT_UGT)) -> (MOVDconst [0])
+
+(LessThanU (FlagEQ)) -> (MOVDconst [0])
+(LessThanU (FlagLT_ULT)) -> (MOVDconst [1])
+(LessThanU (FlagLT_UGT)) -> (MOVDconst [0])
+(LessThanU (FlagGT_ULT)) -> (MOVDconst [1])
+(LessThanU (FlagGT_UGT)) -> (MOVDconst [0])
+
+(LessEqual (FlagEQ)) -> (MOVDconst [1])
+(LessEqual (FlagLT_ULT)) -> (MOVDconst [1])
+(LessEqual (FlagLT_UGT)) -> (MOVDconst [1])
+(LessEqual (FlagGT_ULT)) -> (MOVDconst [0])
+(LessEqual (FlagGT_UGT)) -> (MOVDconst [0])
+
+(LessEqualU (FlagEQ)) -> (MOVDconst [1])
+(LessEqualU (FlagLT_ULT)) -> (MOVDconst [1])
+(LessEqualU (FlagLT_UGT)) -> (MOVDconst [0])
+(LessEqualU (FlagGT_ULT)) -> (MOVDconst [1])
+(LessEqualU (FlagGT_UGT)) -> (MOVDconst [0])
+
+(GreaterThan (FlagEQ)) -> (MOVDconst [0])
+(GreaterThan (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterThan (FlagLT_UGT)) -> (MOVDconst [0])
+(GreaterThan (FlagGT_ULT)) -> (MOVDconst [1])
+(GreaterThan (FlagGT_UGT)) -> (MOVDconst [1])
+
+(GreaterThanU (FlagEQ)) -> (MOVDconst [0])
+(GreaterThanU (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterThanU (FlagLT_UGT)) -> (MOVDconst [1])
+(GreaterThanU (FlagGT_ULT)) -> (MOVDconst [0])
+(GreaterThanU (FlagGT_UGT)) -> (MOVDconst [1])
+
+(GreaterEqual (FlagEQ)) -> (MOVDconst [1])
+(GreaterEqual (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterEqual (FlagLT_UGT)) -> (MOVDconst [0])
+(GreaterEqual (FlagGT_ULT)) -> (MOVDconst [1])
+(GreaterEqual (FlagGT_UGT)) -> (MOVDconst [1])
+
+(GreaterEqualU (FlagEQ)) -> (MOVDconst [1])
+(GreaterEqualU (FlagLT_ULT)) -> (MOVDconst [0])
+(GreaterEqualU (FlagLT_UGT)) -> (MOVDconst [1])
+(GreaterEqualU (FlagGT_ULT)) -> (MOVDconst [0])
+(GreaterEqualU (FlagGT_UGT)) -> (MOVDconst [1])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) -> (Equal x)
+(NotEqual (InvertFlags x)) -> (NotEqual x)
+(LessThan (InvertFlags x)) -> (GreaterThan x)
+(LessThanU (InvertFlags x)) -> (GreaterThanU x)
+(GreaterThan (InvertFlags x)) -> (LessThan x)
+(GreaterThanU (InvertFlags x)) -> (LessThanU x)
+(LessEqual (InvertFlags x)) -> (GreaterEqual x)
+(LessEqualU (InvertFlags x)) -> (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) -> (LessEqual x)
+(GreaterEqualU (InvertFlags x)) -> (LessEqualU x)
+
+// absorb flag constants into conditional instructions
+(CSELULT _ y (FlagEQ)) -> y
+(CSELULT x _ (FlagLT_ULT)) -> x
+(CSELULT _ y (FlagLT_UGT)) -> y
+(CSELULT x _ (FlagGT_ULT)) -> x
+(CSELULT _ y (FlagGT_UGT)) -> y
+(CSELULT0 _ (FlagEQ)) -> (MOVDconst [0])
+(CSELULT0 x (FlagLT_ULT)) -> x
+(CSELULT0 _ (FlagLT_UGT)) -> (MOVDconst [0])
+(CSELULT0 x (FlagGT_ULT)) -> x
+(CSELULT0 _ (FlagGT_UGT)) -> (MOVDconst [0])
+
+// absorb shifts into ops
+(ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c])
+(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c])
+(ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c])
+(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c])
+(ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c])
+(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c])
+(SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c])
+(SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c])
+(SUB x (SRAconst [c] y)) -> (SUBshiftRA x y [c])
+(AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c])
+(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c])
+(AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c])
+(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c])
+(AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c])
+(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c])
+(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c])
+(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c])
+(OR x (SRLconst [c] y)) -> (ORshiftRL x y [c])
+(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c])
+(OR x (SRAconst [c] y)) -> (ORshiftRA x y [c])
+(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c])
+(XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c])
+(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c])
+(XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c])
+(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c])
+(XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c])
+(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c])
+(BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c])
+(BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c])
+(BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c])
+(CMP x (SLLconst [c] y)) -> (CMPshiftLL x y [c])
+(CMP (SLLconst [c] y) x) -> (InvertFlags (CMPshiftLL x y [c]))
+(CMP x (SRLconst [c] y)) -> (CMPshiftRL x y [c])
+(CMP (SRLconst [c] y) x) -> (InvertFlags (CMPshiftRL x y [c]))
+(CMP x (SRAconst [c] y)) -> (CMPshiftRA x y [c])
+(CMP (SRAconst [c] y) x) -> (InvertFlags (CMPshiftRA x y [c]))
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVDconst [c]) x [d]) -> (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVDconst [c]) x [d]) -> (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVDconst [c]) x [d]) -> (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVDconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVDconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVDconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+
+// constant folding in *shift ops
+(ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))])
+(ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))])
+(ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [int64(int64(c)>>uint64(d))])
+(SUBshiftLL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)<<uint64(d))])
+(SUBshiftRL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)>>uint64(d))])
+(SUBshiftRA x (MOVDconst [c]) [d]) -> (SUBconst x [int64(int64(c)>>uint64(d))])
+(ANDshiftLL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)<<uint64(d))])
+(ANDshiftRL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)>>uint64(d))])
+(ANDshiftRA x (MOVDconst [c]) [d]) -> (ANDconst x [int64(int64(c)>>uint64(d))])
+(ORshiftLL x (MOVDconst [c]) [d]) -> (ORconst x [int64(uint64(c)<<uint64(d))])
+(ORshiftRL x (MOVDconst [c]) [d]) -> (ORconst x [int64(uint64(c)>>uint64(d))])
+(ORshiftRA x (MOVDconst [c]) [d]) -> (ORconst x [int64(int64(c)>>uint64(d))])
+(XORshiftLL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)<<uint64(d))])
+(XORshiftRL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)>>uint64(d))])
+(XORshiftRA x (MOVDconst [c]) [d]) -> (XORconst x [int64(int64(c)>>uint64(d))])
+(BICshiftLL x (MOVDconst [c]) [d]) -> (BICconst x [int64(uint64(c)<<uint64(d))])
+(BICshiftRL x (MOVDconst [c]) [d]) -> (BICconst x [int64(uint64(c)>>uint64(d))])
+(BICshiftRA x (MOVDconst [c]) [d]) -> (BICconst x [int64(int64(c)>>uint64(d))])
+(CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))])
+(CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))])
+(CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [int64(int64(c)>>uint64(d))])
+
+// simplification with *shift ops
+(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
+(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
+(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
+(XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
+(BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
new file mode 100644
index 0000000000..b586ec5b57
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -0,0 +1,455 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instuction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R27).
+
+// Suffixes encode the bit width of various instructions.
+// D (double word) = 64 bit
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesARM64 = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18", // platform register, not used
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ // R27 = REGTMP not used in regalloc
+ "g", // aka R28
+ "R29", // frame pointer, not used
+ // R30 = REGLINK not used in regalloc
+ "SP", // aka R31
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28", // 0.0
+ "F29", // 0.5
+ "F30", // 1.0
+ "F31", // 2.0
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ //gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ //gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ //gp3flags = regInfo{inputs: []regMask{gp, gp, gp}}
+ //gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ //gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ //gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ //fp1flags = regInfo{inputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64"}, // arg0 - auxInt
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true}, // arg0 * arg1, 32-bit
+ {name: "MULH", argLength: 2, reg: gp21, asm: "SMULH", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "UMULH", argLength: 2, reg: gp21, asm: "UMULH", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULL", argLength: 2, reg: gp21, asm: "SMULL", commutative: true}, // arg0 * arg1, signed, 32-bit mult results in 64-bit
+ {name: "UMULL", argLength: 2, reg: gp21, asm: "UMULL", commutative: true}, // arg0 * arg1, unsigned, 32-bit mult results in 64-bit
+ {name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"}, // arg0 / arg1, signed
+ {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsighed
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"}, // arg0 / arg1, signed, 32 bit
+ {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsighed, 32 bit
+ {name: "MOD", argLength: 2, reg: gp21, asm: "REM"}, // arg0 % arg1, signed
+ {name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"}, // arg0 % arg1, unsigned
+ {name: "MODW", argLength: 2, reg: gp21, asm: "REMW"}, // arg0 % arg1, signed, 32 bit
+ {name: "UMODW", argLength: 2, reg: gp21, asm: "UREMW"}, // arg0 % arg1, unsigned, 32 bit
+
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0 + arg1
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0 - arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0 * arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true}, // arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0 / arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int64"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int64"}, // arg0 &^ auxInt
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "LSL"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "LSL", aux: "Int64"}, // arg0 << auxInt
+ {name: "SRL", argLength: 2, reg: gp21, asm: "LSR"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned
+ {name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed
+ {name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits
+ {name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1, 32 bit
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt, 32 bit
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags"}, // arg0 compare to -arg1
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags"}, // arg0 compare to -arg1, 32 bit
+ {name: "CMNWconst", argLength: 1, reg: gp1flags, asm: "CMNW", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt, 32 bit
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+
+ // shifted ops
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<<auxInt
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, unsigned shift
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1<<auxInt
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, unsigned shift
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, signed shift
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<<auxInt)
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), unsigned shift
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<<auxInt
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, unsigned shift
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<<auxInt
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, unsigned shift
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<<auxInt)
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+
+ // moves
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "UInt64", rematerializeable: true}, // 32 low bits of auxint
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVS", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVD", typ: "UInt64"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVSload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVS", typ: "Float32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVD", typ: "Float64"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVS", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOVD"}, // move from arg0
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "SCVTFWS", argLength: 1, reg: gpfp, asm: "SCVTFWS"}, // int32 -> float32
+ {name: "SCVTFWD", argLength: 1, reg: gpfp, asm: "SCVTFWD"}, // int32 -> float64
+ {name: "UCVTFWS", argLength: 1, reg: gpfp, asm: "UCVTFWS"}, // uint32 -> float32
+ {name: "UCVTFWD", argLength: 1, reg: gpfp, asm: "UCVTFWD"}, // uint32 -> float64
+ {name: "SCVTFS", argLength: 1, reg: gpfp, asm: "SCVTFS"}, // int64 -> float32
+ {name: "SCVTFD", argLength: 1, reg: gpfp, asm: "SCVTFD"}, // int64 -> float64
+ {name: "UCVTFS", argLength: 1, reg: gpfp, asm: "UCVTFS"}, // uint64 -> float32
+ {name: "UCVTFD", argLength: 1, reg: gpfp, asm: "UCVTFD"}, // uint64 -> float64
+ {name: "FCVTZSSW", argLength: 1, reg: fpgp, asm: "FCVTZSSW"}, // float32 -> int32
+ {name: "FCVTZSDW", argLength: 1, reg: fpgp, asm: "FCVTZSDW"}, // float64 -> int32
+ {name: "FCVTZUSW", argLength: 1, reg: fpgp, asm: "FCVTZUSW"}, // float32 -> uint32
+ {name: "FCVTZUDW", argLength: 1, reg: fpgp, asm: "FCVTZUDW"}, // float64 -> uint32
+ {name: "FCVTZSS", argLength: 1, reg: fpgp, asm: "FCVTZSS"}, // float32 -> int64
+ {name: "FCVTZSD", argLength: 1, reg: fpgp, asm: "FCVTZSD"}, // float64 -> int64
+ {name: "FCVTZUS", argLength: 1, reg: fpgp, asm: "FCVTZUS"}, // float32 -> uint64
+ {name: "FCVTZUD", argLength: 1, reg: fpgp, asm: "FCVTZUD"}, // float64 -> uint64
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD"}, // float32 -> float64
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS"}, // float64 -> float32
+
+ // conditional instructions
+ {name: "CSELULT", argLength: 3, reg: gp2flags1, asm: "CSEL"}, // returns arg0 if flags indicates unsigned LT, arg1 otherwise, arg2=flags
+ {name: "CSELULT0", argLength: 2, reg: gp1flags1, asm: "CSEL"}, // returns arg0 if flags indicates unsigned LT, 0 otherwise, arg1=flags
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call newproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R16 aka arm64.REGRT1 changed as side effect
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ clobbers: buildReg("R16"),
+ },
+ },
+
+ // large zeroing
+ // arg0 = address of memory to zero (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // MOVD.P ZR, 8(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-the-memory - 8 is with the area to zero, ok to spill.
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R16"), gp},
+ clobbers: buildReg("R16"),
+ },
+ clobberFlags: true,
+ },
+
+ // large move
+ // arg0 = address of dst memory (in R17 aka arm64.REGRT2, changed as side effect)
+ // arg1 = address of src memory (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // MOVD.P 8(R16), Rtmp
+ // MOVD.P Rtmp, 8(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // Note: the-end-of-src may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-src - 8 is within the area to copy, ok to spill.
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R17"), buildReg("R16"), gp},
+ clobbers: buildReg("R16 R17"),
+ },
+ clobberFlags: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R26 (arm64.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}},
+
+ // MOVDconvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ {name: "MOVDconvert", argLength: 2, reg: gp11, asm: "MOVD"},
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned <
+ {name: "FlagGT_ULT"}, // signed > and unsigned >
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+ }
+
+ blocks := []blockData{
+ {name: "EQ"},
+ {name: "NE"},
+ {name: "LT"},
+ {name: "LE"},
+ {name: "GT"},
+ {name: "GE"},
+ {name: "ULT"},
+ {name: "ULE"},
+ {name: "UGT"},
+ {name: "UGE"},
+ }
+
+ archs = append(archs, arch{
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go
index 23e8f63471..f1774c6be0 100644
--- a/src/cmd/compile/internal/ssa/gen/ARMOps.go
+++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go
@@ -6,32 +6,481 @@
package main
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instuction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R11).
+
+// Suffixes encode the bit width of various instructions.
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+var regNamesARM = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "g", // aka R10
+ "R11", // tmp
+ "R12",
+ "SP", // aka R13
+ "R14", // link
+ "R15", // pc
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15", // tmp
+
+ // pseudo-registers
+ "SB",
+}
+
func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ )
+ // Common regInfo
var (
- gp01 = regInfo{inputs: []regMask{}, outputs: []regMask{31}}
- gp11 = regInfo{inputs: []regMask{31}, outputs: []regMask{31}}
- gp21 = regInfo{inputs: []regMask{31, 31}, outputs: []regMask{31}}
- gp2flags = regInfo{inputs: []regMask{31, 31}, outputs: []regMask{32}}
- gpload = regInfo{inputs: []regMask{31}, outputs: []regMask{31}}
- gpstore = regInfo{inputs: []regMask{31, 31}, outputs: []regMask{}}
- flagsgp = regInfo{inputs: []regMask{32}, outputs: []regMask{31}}
- callerSave = regMask(15)
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{0, gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{0, gp}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{0, gp}}
+ gp3flags = regInfo{inputs: []regMask{gp, gp, gp}}
+ gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
)
ops := []opData{
- {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
- {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "SymOff"}, // arg0 + auxInt + aux.(*gc.Sym)
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32"}, // arg0 - auxInt
+ {name: "RSB", argLength: 2, reg: gp21, asm: "RSB"}, // arg1 - arg0
+ {name: "RSBconst", argLength: 1, reg: gp11, asm: "RSB", aux: "Int32"}, // auxInt - arg0
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "HMUL", argLength: 2, reg: gp21, asm: "MULL", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "HMULU", argLength: 2, reg: gp21, asm: "MULLU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+ {name: "DIV", argLength: 2, reg: gp21, asm: "DIV", clobberFlags: true}, // arg0 / arg1, signed, soft div clobbers flags
+ {name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", clobberFlags: true}, // arg0 / arg1, unsighed
+ {name: "MOD", argLength: 2, reg: gp21, asm: "MOD", clobberFlags: true}, // arg0 % arg1, signed
+ {name: "MODU", argLength: 2, reg: gp21, asm: "MODU", clobberFlags: true}, // arg0 % arg1, unsigned
+
+ {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag
+ {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag
+ {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags
+ {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags
+ {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag
+ {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag
+ {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag
+ {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags
+ {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags
+ {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags
+
+ {name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1
+ {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int32"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int32"}, // arg0 &^ auxInt
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 256
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 256
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed
+ {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits
+
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift
+ {name: "RSBshiftLL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0
+ {name: "RSBshiftRL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift
+ {name: "RSBshiftRA", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1<<auxInt)
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), unsigned shift
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), signed shift
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1<<auxInt
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, unsigned shift
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, signed shift
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1<<auxInt
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, unsigned shift
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, signed shift
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1<<auxInt)
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), signed shift
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), signed shift
+
+ {name: "ADCshiftLL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1<<auxInt + carry, arg2=flags
+ {name: "ADCshiftRL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, unsigned shift, arg2=flags
+ {name: "ADCshiftRA", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, signed shift, arg2=flags
+ {name: "SBCshiftLL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1<<auxInt - carry, arg2=flags
+ {name: "SBCshiftRL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, unsigned shift, arg2=flags
+ {name: "SBCshiftRA", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, signed shift, arg2=flags
+ {name: "RSCshiftLL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1<<auxInt - arg0 - carry, arg2=flags
+ {name: "RSCshiftRL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, unsigned shift, arg2=flags
+ {name: "RSCshiftRA", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, signed shift, arg2=flags
+
+ {name: "ADDSshiftLL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt, set carry flag
+ {name: "ADDSshiftRL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift, set carry flag
+ {name: "ADDSshiftRA", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift, set carry flag
+ {name: "SUBSshiftLL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt, set carry flag
+ {name: "SUBSshiftRL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift, set carry flag
+ {name: "SUBSshiftRA", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift, set carry flag
+ {name: "RSBSshiftLL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0, set carry flag
+ {name: "RSBSshiftRL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRA", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift, set carry flag
+
+ {name: "ADDshiftLLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1<<arg2
+ {name: "ADDshiftRLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift
+ {name: "ADDshiftRAreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift
+ {name: "SUBshiftLLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1<<arg2
+ {name: "SUBshiftRLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift
+ {name: "SUBshiftRAreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift
+ {name: "RSBshiftLLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1<<arg2 - arg0
+ {name: "RSBshiftRLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift
+ {name: "RSBshiftRAreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift
+ {name: "ANDshiftLLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1<<arg2)
+ {name: "ANDshiftRLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), unsigned shift
+ {name: "ANDshiftRAreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), signed shift
+ {name: "ORshiftLLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1<<arg2
+ {name: "ORshiftRLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, unsigned shift
+ {name: "ORshiftRAreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, signed shift
+ {name: "XORshiftLLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1<<arg2
+ {name: "XORshiftRLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, unsigned shift
+ {name: "XORshiftRAreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, signed shift
+ {name: "BICshiftLLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1<<arg2)
+ {name: "BICshiftRLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), unsigned shift
+ {name: "BICshiftRAreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), signed shift
+ {name: "MVNshiftLLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0<<arg1)
+ {name: "MVNshiftRLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), unsigned shift
+ {name: "MVNshiftRAreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), signed shift
- {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", rematerializeable: true}, // 32 low bits of auxint
+ {name: "ADCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1<<arg2 + carry, arg3=flags
+ {name: "ADCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, unsigned shift, arg3=flags
+ {name: "ADCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, signed shift, arg3=flags
+ {name: "SBCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1<<arg2 - carry, arg3=flags
+ {name: "SBCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, unsigned shift, arg3=flags
+ {name: "SBCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, signed shift, arg3=flags
+ {name: "RSCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1<<arg2 - arg0 - carry, arg3=flags
+ {name: "RSCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, unsigned shift, arg3=flags
+ {name: "RSCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, signed shift, arg3=flags
- {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "ADDSshiftLLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1<<arg2, set carry flag
+ {name: "ADDSshiftRLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift, set carry flag
+ {name: "ADDSshiftRAreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift, set carry flag
+ {name: "SUBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1<<arg2, set carry flag
+ {name: "SUBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift, set carry flag
+ {name: "SUBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift, set carry flag
+ {name: "RSBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1<<arg2 - arg0, set carry flag
+ {name: "RSBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift, set carry flag
- {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW"}, // load from arg0 + auxInt + aux. arg1=mem.
- {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags"}, // arg0 compare to -arg1
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
+ {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0
+ {name: "TEQ", argLength: 2, reg: gp2flags, asm: "TEQ", typ: "Flags", commutative: true}, // arg0 ^ arg1 compare to 0
+ {name: "TEQconst", argLength: 1, reg: gp1flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ auxInt compare to 0
+ {name: "CMPF", argLength: 2, reg: fp2flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "CMPD", argLength: 2, reg: fp2flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to arg1, float64
- {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+
+ {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<<arg2
+ {name: "CMPshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, unsigned shift
+ {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift
+
+ {name: "CMPF0", argLength: 1, reg: fp1flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to 0, float32
+ {name: "CMPD0", argLength: 1, reg: fp1flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to 0, float64
+
+ // moves
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1<<auxInt. arg2=mem
+ {name: "MOVWloadshiftRL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1>>auxInt, unsigned shift. arg2=mem
+ {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem
+
+ {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1<<auxInt. arg3=mem
+ {name: "MOVWstoreshiftRL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1>>auxInt, unsigned shift. arg3=mem
+ {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0
+
+ {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVWUF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // uint32 -> float32, set U bit in the instruction
+ {name: "MOVWUD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // uint32 -> float64, set U bit in the instruction
+ {name: "MOVFW", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> int32
+ {name: "MOVDW", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> int32
+ {name: "MOVFWU", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> uint32, set U bit in the instruction
+ {name: "MOVDWU", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> uint32, set U bit in the instruction
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // conditional instructions, for lowering shifts
+ {name: "CMOVWHSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates HS, arg1=flags
+ {name: "CMOVWLSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates LS, arg1=flags
+ {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call newproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// pseudo-ops
- {name: "LessThan", argLength: 1, reg: flagsgp}, // bool, 1 flags encode x<y 0 otherwise.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+
+ // duffzero (must be 4-byte aligned)
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = value to store (always zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R0")},
+ clobbers: buildReg("R1"),
+ },
+ },
+
+ // duffcopy (must be 4-byte aligned)
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1")},
+ clobbers: buildReg("R0 R1 R2"),
+ },
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = value to store (always zero)
+ // arg3 = mem
+ // returns mem
+ // MOVW.P Rarg2, 4(R1)
+ // CMP R1, Rarg1
+ // BLE -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp, gp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP R1, Rarg2
+ // BLE -3(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R7 (arm.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R7")}}},
+
+ // MOVWconvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ {name: "MOVWconvert", argLength: 2, reg: gp11, asm: "MOVW"},
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned <
+ {name: "FlagGT_ULT"}, // signed > and unsigned >
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
}
blocks := []blockData{
@@ -47,22 +496,15 @@ func init() {
{name: "UGE"},
}
- regNames := []string{
- "R0",
- "R1",
- "R2",
- "R3",
- "SP",
- "FLAGS",
- "SB",
- }
-
archs = append(archs, arch{
- name: "ARM",
- pkg: "cmd/internal/obj/arm",
- genfile: "../../arm/ssa.go",
- ops: ops,
- blocks: blocks,
- regnames: regNames,
+ name: "ARM",
+ pkg: "cmd/internal/obj/arm",
+ genfile: "../../arm/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
})
}
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
new file mode 100644
index 0000000000..2e746e32d4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -0,0 +1,573 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add64 x y) -> (ADD x y)
+(AddPtr x y) -> (ADD x y)
+(Add32 x y) -> (ADD x y)
+(Add16 x y) -> (ADD x y)
+(Add8 x y) -> (ADD x y)
+(Add64F x y) -> (FADD x y)
+(Add32F x y) -> (FADDS x y)
+
+(Sub64 x y) -> (SUB x y)
+(SubPtr x y) -> (SUB x y)
+(Sub32 x y) -> (SUB x y)
+(Sub16 x y) -> (SUB x y)
+(Sub8 x y) -> (SUB x y)
+(Sub32F x y) -> (FSUBS x y)
+(Sub64F x y) -> (FSUB x y)
+
+(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
+(Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
+(Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
+(Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
+
+(Avg64u <t> x y) -> (ADD (ADD <t> (SRD <t> x (MOVDconst <t> [1])) (SRD <t> y (MOVDconst <t> [1]))) (ANDconst <t> (AND <t> x y) [1]))
+
+(Mul64 x y) -> (MULLD x y)
+(Mul32 x y) -> (MULLW x y)
+(Mul16 x y) -> (MULLW x y)
+(Mul8 x y) -> (MULLW x y)
+
+(Div64 x y) -> (DIVD x y)
+(Div64u x y) -> (DIVDU x y)
+(Div32 x y) -> (DIVW x y)
+(Div32u x y) -> (DIVWU x y)
+(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul64 x y) -> (MULHD x y)
+(Hmul64u x y) -> (MULHDU x y)
+(Hmul32 x y) -> (MULHW x y)
+(Hmul32u x y) -> (MULHWU x y)
+(Hmul16 x y) -> (SRAWconst (MULLW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+(Hmul16u x y) -> (SRWconst (MULLW <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+(Hmul8 x y) -> (SRAWconst (MULLW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+(Hmul8u x y) -> (SRWconst (MULLW <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+
+(Mul32F x y) -> (FMULS x y)
+(Mul64F x y) -> (FMUL x y)
+
+(Div32F x y) -> (FDIVS x y)
+(Div64F x y) -> (FDIV x y)
+
+// Lowering float <-> int
+(Cvt32to32F x) -> (FRSP (FCFID (Xi2f64 (SignExt32to64 x))))
+(Cvt32to64F x) -> (FCFID (Xi2f64 (SignExt32to64 x)))
+(Cvt64to32F x) -> (FRSP (FCFID (Xi2f64 x)))
+(Cvt64to64F x) -> (FCFID (Xi2f64 x))
+
+(Cvt32Fto32 x) -> (Xf2i64 (FCTIWZ x))
+(Cvt32Fto64 x) -> (Xf2i64 (FCTIDZ x))
+(Cvt64Fto32 x) -> (Xf2i64 (FCTIWZ x))
+(Cvt64Fto64 x) -> (Xf2i64 (FCTIDZ x))
+
+(Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64
+(Cvt64Fto32F x) -> (FRSP x)
+
+(Sqrt x) -> (FSQRT x)
+
+(Rsh64x64 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Rsh64Ux64 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Lsh64x64 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+
+(Rsh32x64 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Rsh32Ux64 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Lsh32x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+
+(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Lsh16x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+
+(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Lsh8x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+
+
+(Rsh64x32 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Rsh64Ux32 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Lsh64x32 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+
+(Rsh32x32 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Rsh32Ux32 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Lsh32x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+
+(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Lsh16x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+
+(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Lsh8x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+
+
+(Rsh64x16 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Rsh64Ux16 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Lsh64x16 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+
+(Rsh32x16 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Rsh32Ux16 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Lsh32x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+
+(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Lsh16x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+
+(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Lsh8x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+
+
+(Rsh64x8 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Rsh64Ux8 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Lsh64x8 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+
+(Rsh32x8 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Rsh32Ux8 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Lsh32x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+
+(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Lsh16x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+
+(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Lsh8x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+
+// Potentially useful optimizing rewrites.
+// (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
+// (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
+// (MaskIfNotCarry CarrySet) -> 0
+// (MaskIfNotCarry CarrySet) -> -1
+
+// Lowering constants
+(Const8 [val]) -> (MOVWconst [val])
+(Const16 [val]) -> (MOVWconst [val])
+(Const32 [val]) -> (MOVWconst [val])
+(Const64 [val]) -> (MOVDconst [val])
+(Const32F [val]) -> (FMOVSconst [val])
+(Const64F [val]) -> (FMOVDconst [val])
+(ConstNil) -> (MOVDconst [0])
+(ConstBool [b]) -> (MOVWconst [b])
+
+(Addr {sym} base) -> (MOVDaddr {sym} base)
+// (Addr {sym} base) -> (ADDconst {sym} base)
+(OffPtr [off] ptr) -> (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
+
+(And64 x y) -> (AND x y)
+(And32 x y) -> (AND x y)
+(And16 x y) -> (AND x y)
+(And8 x y) -> (AND x y)
+
+(Or64 x y) -> (OR x y)
+(Or32 x y) -> (OR x y)
+(Or16 x y) -> (OR x y)
+(Or8 x y) -> (OR x y)
+
+(Xor64 x y) -> (XOR x y)
+(Xor32 x y) -> (XOR x y)
+(Xor16 x y) -> (XOR x y)
+(Xor8 x y) -> (XOR x y)
+
+(Neg64F x) -> (FNEG x)
+(Neg32F x) -> (FNEG x)
+(Neg64 x) -> (NEG x)
+(Neg32 x) -> (NEG x)
+(Neg16 x) -> (NEG x)
+(Neg8 x) -> (NEG x)
+
+(Com64 x) -> (XORconst [-1] x)
+(Com32 x) -> (XORconst [-1] x)
+(Com16 x) -> (XORconst [-1] x)
+(Com8 x) -> (XORconst [-1] x)
+
+// Lowering boolean ops
+(AndB x y) -> (AND x y)
+(OrB x y) -> (OR x y)
+(Not x) -> (XORconst [1] x)
+
+// Lowering comparisons
+(EqB x y) -> (ANDconst [1] (EQV x y))
+(Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) -> (Equal (CMPW x y))
+(Eq64 x y) -> (Equal (CMP x y))
+(Eq32F x y) -> (Equal (FCMPU x y))
+(Eq64F x y) -> (Equal (FCMPU x y))
+(EqPtr x y) -> (Equal (CMP x y))
+
+(NeqB x y) -> (XOR x y)
+(Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) -> (NotEqual (CMPW x y))
+(Neq64 x y) -> (NotEqual (CMP x y))
+(Neq32F x y) -> (NotEqual (FCMPU x y))
+(Neq64F x y) -> (NotEqual (FCMPU x y))
+(NeqPtr x y) -> (NotEqual (CMP x y))
+
+(Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) -> (LessThan (CMPW x y))
+(Less64 x y) -> (LessThan (CMP x y))
+(Less32F x y) -> (FLessThan (FCMPU x y))
+(Less64F x y) -> (FLessThan (FCMPU x y))
+
+(Less8U x y) -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) -> (LessThan (CMPWU x y))
+(Less64U x y) -> (LessThan (CMPU x y))
+
+(Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) -> (LessEqual (CMPW x y))
+(Leq64 x y) -> (LessEqual (CMP x y))
+(Leq32F x y) -> (FLessEqual (FCMPU x y))
+(Leq64F x y) -> (FLessEqual (FCMPU x y))
+
+(Leq8U x y) -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) -> (LessEqual (CMPWU x y))
+(Leq64U x y) -> (LessEqual (CMPU x y))
+
+(Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Greater32 x y) -> (GreaterThan (CMPW x y))
+(Greater64 x y) -> (GreaterThan (CMP x y))
+(Greater32F x y) -> (FGreaterThan (FCMPU x y))
+(Greater64F x y) -> (FGreaterThan (FCMPU x y))
+
+(Greater8U x y) -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Greater32U x y) -> (GreaterThan (CMPWU x y))
+(Greater64U x y) -> (GreaterThan (CMPU x y))
+
+(Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Geq32 x y) -> (GreaterEqual (CMPW x y))
+(Geq64 x y) -> (GreaterEqual (CMP x y))
+(Geq32F x y) -> (FGreaterEqual (FCMPU x y))
+(Geq64F x y) -> (FGreaterEqual (FCMPU x y))
+
+(Geq8U x y) -> (GreaterEqual (CMPU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Geq16U x y) -> (GreaterEqual (CMPU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Geq32U x y) -> (GreaterEqual (CMPU x y))
+(Geq64U x y) -> (GreaterEqual (CMPU x y))
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) -> (EQ cc yes no)
+(If (NotEqual cc) yes no) -> (NE cc yes no)
+(If (LessThan cc) yes no) -> (LT cc yes no)
+(If (LessEqual cc) yes no) -> (LE cc yes no)
+(If (GreaterThan cc) yes no) -> (GT cc yes no)
+(If (GreaterEqual cc) yes no) -> (GE cc yes no)
+(If (FLessThan cc) yes no) -> (FLT cc yes no)
+(If (FLessEqual cc) yes no) -> (FLE cc yes no)
+(If (FGreaterThan cc) yes no) -> (FGT cc yes no)
+(If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
+
+(If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
+(NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
+(NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
+(NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
+(NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
+(NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
+// (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
+// (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
+// (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
+// (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
+
+// absorb flag constants into branches
+(EQ (FlagEQ) yes no) -> (First nil yes no)
+(EQ (FlagLT) yes no) -> (First nil no yes)
+(EQ (FlagGT) yes no) -> (First nil no yes)
+
+(NE (FlagEQ) yes no) -> (First nil no yes)
+(NE (FlagLT) yes no) -> (First nil yes no)
+(NE (FlagGT) yes no) -> (First nil yes no)
+
+(LT (FlagEQ) yes no) -> (First nil no yes)
+(LT (FlagLT) yes no) -> (First nil yes no)
+(LT (FlagGT) yes no) -> (First nil no yes)
+
+(LE (FlagEQ) yes no) -> (First nil yes no)
+(LE (FlagLT) yes no) -> (First nil yes no)
+(LE (FlagGT) yes no) -> (First nil no yes)
+
+(GT (FlagEQ) yes no) -> (First nil no yes)
+(GT (FlagLT) yes no) -> (First nil no yes)
+(GT (FlagGT) yes no) -> (First nil yes no)
+
+(GE (FlagEQ) yes no) -> (First nil yes no)
+(GE (FlagLT) yes no) -> (First nil no yes)
+(GE (FlagGT) yes no) -> (First nil yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
+(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
+
+// (FLT (InvertFlags cmp) yes no) -> (FGT cmp yes no)
+// (FGT (InvertFlags cmp) yes no) -> (FLT cmp yes no)
+// (FLE (InvertFlags cmp) yes no) -> (FGE cmp yes no)
+// (FGE (InvertFlags cmp) yes no) -> (FLE cmp yes no)
+
+// constant comparisons
+(CMPWconst (MOVWconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
+(CMPWconst (MOVWconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT)
+(CMPWconst (MOVWconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT)
+
+(CMPconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y) -> (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y) -> (FlagGT)
+
+(CMPWUconst (MOVWconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
+(CMPWUconst (MOVWconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
+(CMPWUconst (MOVWconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
+
+(CMPUconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
+
+// other known comparisons
+//(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
+//(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
+//(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
+//(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
+
+// absorb flag constants into boolean values
+(Equal (FlagEQ)) -> (MOVWconst [1])
+(Equal (FlagLT)) -> (MOVWconst [0])
+(Equal (FlagGT)) -> (MOVWconst [0])
+
+(NotEqual (FlagEQ)) -> (MOVWconst [0])
+(NotEqual (FlagLT)) -> (MOVWconst [1])
+(NotEqual (FlagGT)) -> (MOVWconst [1])
+
+(LessThan (FlagEQ)) -> (MOVWconst [0])
+(LessThan (FlagLT)) -> (MOVWconst [1])
+(LessThan (FlagGT)) -> (MOVWconst [0])
+
+(LessEqual (FlagEQ)) -> (MOVWconst [1])
+(LessEqual (FlagLT)) -> (MOVWconst [1])
+(LessEqual (FlagGT)) -> (MOVWconst [0])
+
+(GreaterThan (FlagEQ)) -> (MOVWconst [0])
+(GreaterThan (FlagLT)) -> (MOVWconst [0])
+(GreaterThan (FlagGT)) -> (MOVWconst [1])
+
+(GreaterEqual (FlagEQ)) -> (MOVWconst [1])
+(GreaterEqual (FlagLT)) -> (MOVWconst [0])
+(GreaterEqual (FlagGT)) -> (MOVWconst [1])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) -> (Equal x)
+(NotEqual (InvertFlags x)) -> (NotEqual x)
+(LessThan (InvertFlags x)) -> (GreaterThan x)
+(GreaterThan (InvertFlags x)) -> (LessThan x)
+(LessEqual (InvertFlags x)) -> (GreaterEqual x)
+(GreaterEqual (InvertFlags x)) -> (LessEqual x)
+(FLessThan (InvertFlags x)) -> (FGreaterThan x)
+(FGreaterThan (InvertFlags x)) -> (FLessThan x)
+(FLessEqual (InvertFlags x)) -> (FGreaterEqual x)
+(FGreaterEqual (InvertFlags x)) -> (FLessEqual x)
+
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && isSigned(t))) -> (MOVBload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
+
+(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
+
+(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store [8] ptr val mem) && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
+(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store [8] ptr val mem) && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
+(Store [4] ptr val mem) && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
+(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
+(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
+
+(Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstorezero destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstorezero destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 ->
+ (MOVBstorezero [1] destptr
+ (MOVBstorezero [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstorezero destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstorezero [2] destptr
+ (MOVHstorezero [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 ->
+ (MOVBstorezero [3] destptr
+ (MOVBstorezero [2] destptr
+ (MOVBstorezero [1] destptr
+ (MOVBstorezero [0] destptr mem))))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstorezero [0] destptr mem)
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstorezero [4] destptr
+ (MOVWstorezero [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstorezero [6] destptr
+ (MOVHstorezero [4] destptr
+ (MOVHstorezero [2] destptr
+ (MOVHstorezero [0] destptr mem))))
+
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstorezero [2] destptr
+ (MOVBstorezero [1] destptr
+ (MOVBstorezero [0] destptr mem)))
+
+// Zero small numbers of words directly.
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem)))
+(Zero [s] destptr mem) && SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstorezero [24] destptr
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))))
+
+// Large zeroing uses a loop
+(Zero [s] ptr mem)
+ && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0 ->
+ (LoweredZero [SizeAndAlign(s).Align()]
+ ptr
+ (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
+ mem)
+
+// moves
+(Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBZload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore dst (MOVHZload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 ->
+ (MOVBstore [1] dst (MOVBZload [1] src mem)
+ (MOVBstore dst (MOVBZload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
+ (MOVHstore [2] dst (MOVHZload [2] src mem)
+ (MOVHstore dst (MOVHZload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 ->
+ (MOVBstore [3] dst (MOVBZload [3] src mem)
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVBstore [1] dst (MOVBZload [1] src mem)
+ (MOVBstore dst (MOVBZload src mem) mem))))
+
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
+ (MOVWstore [4] dst (MOVWZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0->
+ (MOVHstore [6] dst (MOVHZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVHstore [2] dst (MOVHZload [2] src mem)
+ (MOVHstore dst (MOVHZload src mem) mem))))
+
+(Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVBstore [1] dst (MOVBZload [1] src mem)
+ (MOVBstore dst (MOVBZload src mem) mem)))
+
+// Large move uses a loop
+(Move [s] dst src mem)
+ && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0 ->
+ (LoweredMove [SizeAndAlign(s).Align()]
+ dst
+ src
+ (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
+ mem)
+
+// Calls
+// Lowering calls
+(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
+(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
+(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
+
+// Miscellaneous
+(Convert <t> x mem) -> (MOVDconvert <t> x mem)
+(GetClosurePtr) -> (LoweredGetClosurePtr)
+(IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) -> (LessThan (CMPU idx len))
+(IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
+(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
+
+// Optimizations
+
+(ADD (MOVDconst [c]) x) && int64(int32(c)) == c -> (ADDconst [c] x)
+(ADD x (MOVDconst [c])) && int64(int32(c)) == c -> (ADDconst [c] x)
+
+// Fold offsets for stores.
+(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
+
+// Store of zero -> storezero
+(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem)
+
+// Fold offsets for storezero
+(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
+ (MOVDstorezero [off1+off2] {sym} x mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
+ (MOVWstorezero [off1+off2] {sym} x mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
+ (MOVHstorezero [off1+off2] {sym} x mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
+ (MOVBstorezero [off1+off2] {sym} x mem)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to16 x) -> (MOVBreg x)
+(SignExt8to32 x) -> (MOVBreg x)
+(SignExt8to64 x) -> (MOVBreg x)
+(SignExt16to32 x) -> (MOVHreg x)
+(SignExt16to64 x) -> (MOVHreg x)
+(SignExt32to64 x) -> (MOVWreg x)
+
+(ZeroExt8to16 x) -> (MOVBZreg x)
+(ZeroExt8to32 x) -> (MOVBZreg x)
+(ZeroExt8to64 x) -> (MOVBZreg x)
+(ZeroExt16to32 x) -> (MOVHZreg x)
+(ZeroExt16to64 x) -> (MOVHZreg x)
+(ZeroExt32to64 x) -> (MOVWZreg x)
+
+(Trunc16to8 x) -> (MOVBreg x)
+(Trunc32to8 x) -> (MOVBreg x)
+(Trunc32to16 x) -> (MOVHreg x)
+(Trunc64to8 x) -> (MOVBreg x)
+(Trunc64to16 x) -> (MOVHreg x)
+(Trunc64to32 x) -> (MOVWreg x)
+
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
new file mode 100644
index 0000000000..9f4416a383
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -0,0 +1,395 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Less-than-64-bit integer types live in the low portion of registers.
+// For now, the upper portion is junk; sign/zero-extension might be optimized in the future, but not yet.
+// - Boolean types are zero or 1; stored in a byte, but loaded with AMOVBZ so the upper bytes of a register are zero.
+// - *const instructions may use a constant larger than the instuction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R31).
+
+var regNamesPPC64 = []string{
+ // "R0", // REGZERO
+ "SP", // REGSP
+ "SB", // REGSB
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11", // REGCTXT for closures
+ "R12",
+ "R13", // REGTLS
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ "R27",
+ "R28",
+ "R29",
+ "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen".
+ "R31", // REGTMP
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ // "F27", // reserved for "floating conversion constant"
+ // "F28", // 0.0
+ // "F29", // 0.5
+ // "F30", // 1.0
+ // "F31", // 2.0
+
+ // "CR0",
+ // "CR1",
+ // "CR2",
+ // "CR3",
+ // "CR4",
+ // "CR5",
+ // "CR6",
+ // "CR7",
+
+ // "CR",
+ // "XER",
+ // "LR",
+ // "CTR",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesPPC64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesPPC64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ var (
+ gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29")
+ fp = buildReg("F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26")
+ sp = buildReg("SP")
+ sb = buildReg("SB")
+ // gr = buildReg("g")
+ // cr = buildReg("CR")
+ // ctr = buildReg("CTR")
+ // lr = buildReg("LR")
+ tmp = buildReg("R31")
+ ctxt = buildReg("R11")
+ // tls = buildReg("R13")
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp1cr = regInfo{inputs: []regMask{gp | sp | sb}}
+ gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ crgp = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2cr = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
+ callerSave = regMask(gp | fp)
+ )
+ ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "SymOff"}, // arg0 + auxInt + aux.(*gc.Sym)
+ {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
+ {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
+
+ {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit)
+ {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit)
+
+ {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+
+ {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1
+
+ {name: "SRAD", argLength: 2, reg: gp21, asm: "SRAD"}, // arg0 >>a arg1, 64 bits (all sign if arg1 & 64 != 0)
+ {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >>a arg1, 32 bits (all sign if arg1 & 32 != 0)
+ {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // arg0 >> arg1, 64 bits (0 if arg1 & 64 != 0)
+ {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // arg0 >> arg1, 32 bits (0 if arg1 & 32 != 0)
+ {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << arg1, 64 bits (0 if arg1 & 64 != 0)
+ {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << arg1, 32 bits (0 if arg1 & 32 != 0)
+
+ {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + aux
+ {name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1)
+
+ {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // arg0 >>a aux, 64 bits
+ {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // arg0 >>a aux, 32 bits
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // arg0 >> aux, 64 bits
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // arg0 >> aux, 32 bits
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << aux, 64 bits
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << aux, 32 bits
+
+ {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1
+
+ {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit)
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit)
+ {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit)
+ {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit)
+
+ // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1
+
+ // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register.
+ {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero
+ {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero
+ {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float
+ {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value
+
+ // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC.
+ // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the
+ // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues).
+ // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use
+ // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr)
+
+ {name: "Xf2i64", argLength: 1, reg: fpgp, typ: "Int64"}, // move 64 bits of F register into G register
+ {name: "Xi2f64", argLength: 1, reg: gpfp, typ: "Float64"}, // move 64 bits of G register into F register
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1
+ {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1
+ {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer)
+ {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point)
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point)
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision)
+
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
+ {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always.
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
+ {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64
+ {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64
+ {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8"}, // sign extend int8 to int64
+ {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8"}, // zero extend uint8 to uint64
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16"}, // sign extend int16 to int64
+ {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16"}, // zero extend uint16 to uint64
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32"}, // sign extend int32 to int64
+ {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32"}, // zero extend uint32 to uint64
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64"},
+
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", typ: "Float64"},
+ {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", typ: "Float32"},
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"},
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem"},
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"},
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem"},
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem"},
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem"},
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem"}, // store zero byte to arg0+aux. arg1=mem
+ {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem"}, // store zero 2 bytes to ...
+ {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem"}, // store zero 4 bytes to ...
+ {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem"}, // store zero 8 bytes to ...
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", rematerializeable: true}, //
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", rematerializeable: true}, // 32 low bits of auxint
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, //
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, //
+ {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"},
+
+ {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"},
+ {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"},
+ {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"},
+ {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"},
+
+ // pseudo-ops
+ {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "FLessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise.
+ {name: "FLessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise; PPC <= === !> which is wrong for NaN
+ {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.
+ {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of the closure pointer.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}},
+
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true},
+
+ // Convert pointer to integer, takes a memory operand for ordering.
+ {name: "MOVDconvert", argLength: 2, reg: gp11, asm: "MOVD"},
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gp | sp, ctxt, 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call newproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R3, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // returns mem
+ // ADD -8,R3,R3 // intermediate value not valid GC ptr, cannot expose to opt+GC
+ // MOVDU R0, 8(R3)
+ // CMP R3, Rarg1
+ // BLE -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R3"), gp},
+ clobbers: buildReg("R3"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R3, changed as side effect)
+ // arg1 = address of src memory (in R4, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // ADD -8,R3,R3 // intermediate value not valid GC ptr, cannot expose to opt+GC
+ // ADD -8,R4,R4 // intermediate value not valid GC ptr, cannot expose to opt+GC
+ // MOVDU 8(R4), Rtmp
+ // MOVDU Rtmp, 8(R3)
+ // CMP R4, Rarg2
+ // BLT -3(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R3"), buildReg("R4"), gp},
+ clobbers: buildReg("R3 R4"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ },
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant,
+ // then we do (LessThan (InvertFlags (CMP b a))) instead.
+ // Rewrites will convert this to (GreaterThan (CMP b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Constant flag values. For any comparison, there are 3 possible
+ // outcomes: either the three from the signed total order (<,==,>)
+ // or the three from the unsigned total order, depending on which
+ // comparison operation was used (CMP or CMPU -- PPC is different from
+ // the other architectures, which have a single comparison producing
+ // both signed and unsigned comparison results.)
+
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT"}, // signed < or unsigned <
+ {name: "FlagGT"}, // signed > or unsigned >
+
+ }
+
+ blocks := []blockData{
+ {name: "EQ"},
+ {name: "NE"},
+ {name: "LT"},
+ {name: "LE"},
+ {name: "GT"},
+ {name: "GE"},
+ {name: "FLT"},
+ {name: "FLE"},
+ {name: "FGT"},
+ {name: "FGE"},
+ }
+
+ archs = append(archs, arch{
+ name: "PPC64",
+ pkg: "cmd/internal/obj/ppc64",
+ genfile: "../../ppc64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesPPC64,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["SP"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
new file mode 100644
index 0000000000..e419c741b6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -0,0 +1,407 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules to decompose [u]int64 types on 32-bit
+// architectures. These rules work together with the decomposeBuiltIn
+// pass which handles phis of these types.
+
+(Int64Hi (Int64Make hi _)) -> hi
+(Int64Lo (Int64Make _ lo)) -> lo
+
+// Assuming little endian (we don't support big endian 32-bit architecture yet)
+(Load <t> ptr mem) && is64BitInt(t) && t.IsSigned() ->
+ (Int64Make
+ (Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem)
+ (Load <config.fe.TypeUInt32()> ptr mem))
+(Load <t> ptr mem) && is64BitInt(t) && !t.IsSigned() ->
+ (Int64Make
+ (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem)
+ (Load <config.fe.TypeUInt32()> ptr mem))
+
+(Store [8] dst (Int64Make hi lo) mem) ->
+ (Store [4]
+ (OffPtr <hi.Type.PtrTo()> [4] dst)
+ hi
+ (Store [4] dst lo mem))
+
+(Arg {n} [off]) && is64BitInt(v.Type) && v.Type.IsSigned() ->
+ (Int64Make
+ (Arg <config.fe.TypeInt32()> {n} [off+4])
+ (Arg <config.fe.TypeUInt32()> {n} [off]))
+(Arg {n} [off]) && is64BitInt(v.Type) && !v.Type.IsSigned() ->
+ (Int64Make
+ (Arg <config.fe.TypeUInt32()> {n} [off+4])
+ (Arg <config.fe.TypeUInt32()> {n} [off]))
+
+(Add64 x y) ->
+ (Int64Make
+ (Add32withcarry <config.fe.TypeInt32()>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select0 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
+ (Select1 <config.fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
+
+(Sub64 x y) ->
+ (Int64Make
+ (Sub32withcarry <config.fe.TypeInt32()>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select0 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ (Select1 <config.fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
+
+(Mul64 x y) ->
+ (Int64Make
+ (Add32 <config.fe.TypeUInt32()>
+ (Mul32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Hi y))
+ (Add32 <config.fe.TypeUInt32()>
+ (Mul32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Lo y))
+ (Select0 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
+ (Select1 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+
+(And64 x y) ->
+ (Int64Make
+ (And32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
+ (And32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+
+(Or64 x y) ->
+ (Int64Make
+ (Or32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
+ (Or32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+
+(Xor64 x y) ->
+ (Int64Make
+ (Xor32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
+ (Xor32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+
+(Neg64 <t> x) -> (Sub64 (Const64 <t> [0]) x)
+
+(Com64 x) ->
+ (Int64Make
+ (Com32 <config.fe.TypeUInt32()> (Int64Hi x))
+ (Com32 <config.fe.TypeUInt32()> (Int64Lo x)))
+
+(SignExt32to64 x) -> (Int64Make (Signmask x) x)
+(SignExt16to64 x) -> (SignExt32to64 (SignExt16to32 x))
+(SignExt8to64 x) -> (SignExt32to64 (SignExt8to32 x))
+
+(ZeroExt32to64 x) -> (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) x)
+(ZeroExt16to64 x) -> (ZeroExt32to64 (ZeroExt16to32 x))
+(ZeroExt8to64 x) -> (ZeroExt32to64 (ZeroExt8to32 x))
+
+(Trunc64to32 (Int64Make _ lo)) -> lo
+(Trunc64to16 (Int64Make _ lo)) -> (Trunc32to16 lo)
+(Trunc64to8 (Int64Make _ lo)) -> (Trunc32to8 lo)
+
+(Lsh32x64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const32 [0])
+(Rsh32x64 x (Int64Make (Const32 [c]) _)) && c != 0 -> (Signmask x)
+(Rsh32Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const32 [0])
+(Lsh16x64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const32 [0])
+(Rsh16x64 x (Int64Make (Const32 [c]) _)) && c != 0 -> (Signmask (SignExt16to32 x))
+(Rsh16Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const32 [0])
+(Lsh8x64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const32 [0])
+(Rsh8x64 x (Int64Make (Const32 [c]) _)) && c != 0 -> (Signmask (SignExt8to32 x))
+(Rsh8Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const32 [0])
+
+(Lsh32x64 x (Int64Make (Const32 [0]) lo)) -> (Lsh32x32 x lo)
+(Rsh32x64 x (Int64Make (Const32 [0]) lo)) -> (Rsh32x32 x lo)
+(Rsh32Ux64 x (Int64Make (Const32 [0]) lo)) -> (Rsh32Ux32 x lo)
+(Lsh16x64 x (Int64Make (Const32 [0]) lo)) -> (Lsh16x32 x lo)
+(Rsh16x64 x (Int64Make (Const32 [0]) lo)) -> (Rsh16x32 x lo)
+(Rsh16Ux64 x (Int64Make (Const32 [0]) lo)) -> (Rsh16Ux32 x lo)
+(Lsh8x64 x (Int64Make (Const32 [0]) lo)) -> (Lsh8x32 x lo)
+(Rsh8x64 x (Int64Make (Const32 [0]) lo)) -> (Rsh8x32 x lo)
+(Rsh8Ux64 x (Int64Make (Const32 [0]) lo)) -> (Rsh8Ux32 x lo)
+
+(Lsh64x64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const64 [0])
+(Rsh64x64 x (Int64Make (Const32 [c]) _)) && c != 0 -> (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+(Rsh64Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 -> (Const64 [0])
+
+(Lsh64x64 x (Int64Make (Const32 [0]) lo)) -> (Lsh64x32 x lo)
+(Rsh64x64 x (Int64Make (Const32 [0]) lo)) -> (Rsh64x32 x lo)
+(Rsh64Ux64 x (Int64Make (Const32 [0]) lo)) -> (Rsh64Ux32 x lo)
+
+// turn x64 non-constant shifts to x32 shifts
+// if high 32-bit of the shift is nonzero, make a huge shift
+(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Lsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh64Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Lsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh32Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Lsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh16Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Lsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
+ (Rsh8Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+
+// 64x left shift
+// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
+// result.lo = lo<<s
+(Lsh64x32 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Lsh32x32 <config.fe.TypeUInt32()> hi s)
+ (Rsh32Ux32 <config.fe.TypeUInt32()>
+ lo
+ (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
+ (Lsh32x32 <config.fe.TypeUInt32()>
+ lo
+ (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32]))))
+ (Lsh32x32 <config.fe.TypeUInt32()> lo s))
+(Lsh64x16 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Lsh32x16 <config.fe.TypeUInt32()> hi s)
+ (Rsh32Ux16 <config.fe.TypeUInt32()>
+ lo
+ (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
+ (Lsh32x16 <config.fe.TypeUInt32()>
+ lo
+ (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32]))))
+ (Lsh32x16 <config.fe.TypeUInt32()> lo s))
+(Lsh64x8 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Lsh32x8 <config.fe.TypeUInt32()> hi s)
+ (Rsh32Ux8 <config.fe.TypeUInt32()>
+ lo
+ (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
+ (Lsh32x8 <config.fe.TypeUInt32()>
+ lo
+ (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32]))))
+ (Lsh32x8 <config.fe.TypeUInt32()> lo s))
+
+// 64x unsigned right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
+(Rsh64Ux32 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Rsh32Ux32 <config.fe.TypeUInt32()> hi s)
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Rsh32Ux32 <config.fe.TypeUInt32()> lo s)
+ (Lsh32x32 <config.fe.TypeUInt32()>
+ hi
+ (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
+ (Rsh32Ux32 <config.fe.TypeUInt32()>
+ hi
+ (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))))
+(Rsh64Ux16 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Rsh32Ux16 <config.fe.TypeUInt32()> hi s)
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Rsh32Ux16 <config.fe.TypeUInt32()> lo s)
+ (Lsh32x16 <config.fe.TypeUInt32()>
+ hi
+ (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
+ (Rsh32Ux16 <config.fe.TypeUInt32()>
+ hi
+ (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))))
+(Rsh64Ux8 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Rsh32Ux8 <config.fe.TypeUInt32()> hi s)
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Rsh32Ux8 <config.fe.TypeUInt32()> lo s)
+ (Lsh32x8 <config.fe.TypeUInt32()>
+ hi
+ (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
+ (Rsh32Ux8 <config.fe.TypeUInt32()>
+ hi
+ (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))))
+
+// 64x signed right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
+(Rsh64x32 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Rsh32x32 <config.fe.TypeUInt32()> hi s)
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Rsh32Ux32 <config.fe.TypeUInt32()> lo s)
+ (Lsh32x32 <config.fe.TypeUInt32()>
+ hi
+ (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
+ (And32 <config.fe.TypeUInt32()>
+ (Rsh32x32 <config.fe.TypeUInt32()>
+ hi
+ (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))
+ (Zeromask
+ (Rsh32Ux32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [5]))))))
+(Rsh64x16 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Rsh32x16 <config.fe.TypeUInt32()> hi s)
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Rsh32Ux16 <config.fe.TypeUInt32()> lo s)
+ (Lsh32x16 <config.fe.TypeUInt32()>
+ hi
+ (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
+ (And32 <config.fe.TypeUInt32()>
+ (Rsh32x16 <config.fe.TypeUInt32()>
+ hi
+ (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))
+ (Zeromask
+ (ZeroExt16to32
+ (Rsh16Ux32 <config.fe.TypeUInt16()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+(Rsh64x8 (Int64Make hi lo) s) ->
+ (Int64Make
+ (Rsh32x8 <config.fe.TypeUInt32()> hi s)
+ (Or32 <config.fe.TypeUInt32()>
+ (Or32 <config.fe.TypeUInt32()>
+ (Rsh32Ux8 <config.fe.TypeUInt32()> lo s)
+ (Lsh32x8 <config.fe.TypeUInt32()>
+ hi
+ (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
+ (And32 <config.fe.TypeUInt32()>
+ (Rsh32x8 <config.fe.TypeUInt32()>
+ hi
+ (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))
+ (Zeromask
+ (ZeroExt8to32
+ (Rsh8Ux32 <config.fe.TypeUInt8()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+
+// 64xConst32 shifts
+// we probably do not need them -- lateopt may take care of them just fine
+//(Lsh64x32 _ (Const32 [c])) && uint32(c) >= 64 -> (Const64 [0])
+//(Rsh64x32 x (Const32 [c])) && uint32(c) >= 64 -> (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+//(Rsh64Ux32 _ (Const32 [c])) && uint32(c) >= 64 -> (Const64 [0])
+//
+//(Lsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
+// (Int64Make
+// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c-32]))
+// (Const32 <config.fe.TypeUInt32()> [0]))
+//(Rsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
+// (Int64Make
+// (Signmask (Int64Hi x))
+// (Rsh32x32 <config.fe.TypeInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c-32])))
+//(Rsh64Ux32 x (Const32 [c])) && c < 64 && c > 32 ->
+// (Int64Make
+// (Const32 <config.fe.TypeUInt32()> [0])
+// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c-32])))
+//
+//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [0]))
+//(Rsh64x32 x (Const32 [32])) -> (Int64Make (Signmask (Int64Hi x)) (Int64Hi x))
+//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) (Int64Hi x))
+//
+//(Lsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
+// (Int64Make
+// (Or32 <config.fe.TypeUInt32()>
+// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
+// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [32-c])))
+// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c])))
+//(Rsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
+// (Int64Make
+// (Rsh32x32 <config.fe.TypeInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
+// (Or32 <config.fe.TypeUInt32()>
+// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c]))
+// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [32-c]))))
+//(Rsh64Ux32 x (Const32 [c])) && c < 32 && c > 0 ->
+// (Int64Make
+// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
+// (Or32 <config.fe.TypeUInt32()>
+// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c]))
+// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [32-c]))))
+//
+//(Lsh64x32 x (Const32 [0])) -> x
+//(Rsh64x32 x (Const32 [0])) -> x
+//(Rsh64Ux32 x (Const32 [0])) -> x
+
+(Lrot64 (Int64Make hi lo) [c]) && c <= 32 ->
+ (Int64Make
+ (Or32 <config.fe.TypeUInt32()>
+ (Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c]))
+ (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c])))
+ (Or32 <config.fe.TypeUInt32()>
+ (Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c]))
+ (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c]))))
+(Lrot64 (Int64Make hi lo) [c]) && c > 32 -> (Lrot64 (Int64Make lo hi) [c-32])
+
+(Const64 <t> [c]) && t.IsSigned() ->
+ (Int64Make (Const32 <config.fe.TypeInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+(Const64 <t> [c]) && !t.IsSigned() ->
+ (Int64Make (Const32 <config.fe.TypeUInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+
+(Eq64 x y) ->
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Eq32 (Int64Lo x) (Int64Lo y)))
+
+(Neq64 x y) ->
+ (OrB
+ (Neq32 (Int64Hi x) (Int64Hi y))
+ (Neq32 (Int64Lo x) (Int64Lo y)))
+
+(Less64U x y) ->
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64U x y) ->
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
+
+(Greater64U x y) ->
+ (OrB
+ (Greater32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Greater32U (Int64Lo x) (Int64Lo y))))
+
+(Geq64U x y) ->
+ (OrB
+ (Greater32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Geq32U (Int64Lo x) (Int64Lo y))))
+
+(Less64 x y) ->
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64 x y) ->
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
+
+(Greater64 x y) ->
+ (OrB
+ (Greater32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Greater32U (Int64Lo x) (Int64Lo y))))
+
+(Geq64 x y) ->
+ (OrB
+ (Greater32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Geq32U (Int64Lo x) (Int64Lo y))))
diff --git a/src/cmd/compile/internal/ssa/gen/dec64Ops.go b/src/cmd/compile/internal/ssa/gen/dec64Ops.go
new file mode 100644
index 0000000000..8c5883bc56
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec64Ops.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+var dec64Ops = []opData{}
+
+var dec64Blocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "dec64",
+ ops: dec64Ops,
+ blocks: dec64Blocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index f5d1c98a83..c0fe802aaa 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -67,6 +67,12 @@
(Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) * i2f(d))])
+// Convert x * -1 to -x. The front-end catches some but not all of these.
+(Mul8 (Const8 [-1]) x) -> (Neg8 x)
+(Mul16 (Const16 [-1]) x) -> (Neg16 x)
+(Mul32 (Const32 [-1]) x) -> (Neg32 x)
+(Mul64 (Const64 [-1]) x) -> (Neg64 x)
+
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(c % d))])
@@ -625,8 +631,10 @@
(Store [t.FieldType(0).Size()] dst f0 mem))))
// un-SSAable values use mem->mem copies
-(Store [size] dst (Load <t> src mem) mem) && !config.fe.CanSSA(t) -> (Move [size] dst src mem)
-(Store [size] dst (Load <t> src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t) -> (Move [size] dst src (VarDef {x} mem))
+(Store [size] dst (Load <t> src mem) mem) && !config.fe.CanSSA(t) ->
+ (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src mem)
+(Store [size] dst (Load <t> src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t) ->
+ (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src (VarDef {x} mem))
// string ops
// Decomposing StringMake and lowering of StringPtr and StringLen
@@ -832,3 +840,23 @@
-> (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && umagic64ok(c)
-> (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+
+// floating point optimizations
+(Add32F x (Const32F [0])) -> x
+(Add32F (Const32F [0]) x) -> x
+(Add64F x (Const64F [0])) -> x
+(Add64F (Const64F [0]) x) -> x
+(Sub32F x (Const32F [0])) -> x
+(Sub64F x (Const64F [0])) -> x
+(Mul32F x (Const32F [f2i(1)])) -> x
+(Mul32F (Const32F [f2i(1)]) x) -> x
+(Mul64F x (Const64F [f2i(1)])) -> x
+(Mul64F (Const64F [f2i(1)]) x) -> x
+(Mul32F x (Const32F [f2i(-1)])) -> (Neg32F x)
+(Mul32F (Const32F [f2i(-1)]) x) -> (Neg32F x)
+(Mul64F x (Const64F [f2i(-1)])) -> (Neg64F x)
+(Mul64F (Const64F [f2i(-1)]) x) -> (Neg64F x)
+(Div32F x (Const32F [f2i(1)])) -> x
+(Div64F x (Const64F [f2i(1)])) -> x
+(Div32F x (Const32F [f2i(-1)])) -> (Neg32F x)
+(Div64F x (Const64F [f2i(-1)])) -> (Neg32F x)
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 8388ea8946..c36aea7024 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -173,76 +173,76 @@ var genericOps = []opData{
{name: "Lrot64", argLength: 1, aux: "Int64"},
// 2-input comparisons
- {name: "Eq8", argLength: 2, commutative: true}, // arg0 == arg1
- {name: "Eq16", argLength: 2, commutative: true},
- {name: "Eq32", argLength: 2, commutative: true},
- {name: "Eq64", argLength: 2, commutative: true},
- {name: "EqPtr", argLength: 2, commutative: true},
- {name: "EqInter", argLength: 2}, // arg0 or arg1 is nil; other cases handled by frontend
- {name: "EqSlice", argLength: 2}, // arg0 or arg1 is nil; other cases handled by frontend
- {name: "Eq32F", argLength: 2},
- {name: "Eq64F", argLength: 2},
-
- {name: "Neq8", argLength: 2, commutative: true}, // arg0 != arg1
- {name: "Neq16", argLength: 2, commutative: true},
- {name: "Neq32", argLength: 2, commutative: true},
- {name: "Neq64", argLength: 2, commutative: true},
- {name: "NeqPtr", argLength: 2, commutative: true},
- {name: "NeqInter", argLength: 2}, // arg0 or arg1 is nil; other cases handled by frontend
- {name: "NeqSlice", argLength: 2}, // arg0 or arg1 is nil; other cases handled by frontend
- {name: "Neq32F", argLength: 2},
+ {name: "Eq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
+ {name: "Eq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Eq32F", argLength: 2, typ: "Bool"},
+ {name: "Eq64F", argLength: 2, typ: "Bool"},
+
+ {name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
+ {name: "Neq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Neq32F", argLength: 2, typ: "Bool"},
{name: "Neq64F", argLength: 2},
- {name: "Less8", argLength: 2}, // arg0 < arg1, signed
- {name: "Less8U", argLength: 2}, // arg0 < arg1, unsigned
- {name: "Less16", argLength: 2},
- {name: "Less16U", argLength: 2},
- {name: "Less32", argLength: 2},
- {name: "Less32U", argLength: 2},
- {name: "Less64", argLength: 2},
- {name: "Less64U", argLength: 2},
- {name: "Less32F", argLength: 2},
- {name: "Less64F", argLength: 2},
-
- {name: "Leq8", argLength: 2}, // arg0 <= arg1, signed
- {name: "Leq8U", argLength: 2}, // arg0 <= arg1, unsigned
- {name: "Leq16", argLength: 2},
- {name: "Leq16U", argLength: 2},
- {name: "Leq32", argLength: 2},
- {name: "Leq32U", argLength: 2},
- {name: "Leq64", argLength: 2},
- {name: "Leq64U", argLength: 2},
- {name: "Leq32F", argLength: 2},
- {name: "Leq64F", argLength: 2},
-
- {name: "Greater8", argLength: 2}, // arg0 > arg1, signed
- {name: "Greater8U", argLength: 2}, // arg0 > arg1, unsigned
- {name: "Greater16", argLength: 2},
- {name: "Greater16U", argLength: 2},
- {name: "Greater32", argLength: 2},
- {name: "Greater32U", argLength: 2},
- {name: "Greater64", argLength: 2},
- {name: "Greater64U", argLength: 2},
- {name: "Greater32F", argLength: 2},
- {name: "Greater64F", argLength: 2},
-
- {name: "Geq8", argLength: 2}, // arg0 <= arg1, signed
- {name: "Geq8U", argLength: 2}, // arg0 <= arg1, unsigned
- {name: "Geq16", argLength: 2},
- {name: "Geq16U", argLength: 2},
- {name: "Geq32", argLength: 2},
- {name: "Geq32U", argLength: 2},
- {name: "Geq64", argLength: 2},
- {name: "Geq64U", argLength: 2},
- {name: "Geq32F", argLength: 2},
- {name: "Geq64F", argLength: 2},
+ {name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed
+ {name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned
+ {name: "Less16", argLength: 2, typ: "Bool"},
+ {name: "Less16U", argLength: 2, typ: "Bool"},
+ {name: "Less32", argLength: 2, typ: "Bool"},
+ {name: "Less32U", argLength: 2, typ: "Bool"},
+ {name: "Less64", argLength: 2, typ: "Bool"},
+ {name: "Less64U", argLength: 2, typ: "Bool"},
+ {name: "Less32F", argLength: 2, typ: "Bool"},
+ {name: "Less64F", argLength: 2, typ: "Bool"},
+
+ {name: "Leq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed
+ {name: "Leq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned
+ {name: "Leq16", argLength: 2, typ: "Bool"},
+ {name: "Leq16U", argLength: 2, typ: "Bool"},
+ {name: "Leq32", argLength: 2, typ: "Bool"},
+ {name: "Leq32U", argLength: 2, typ: "Bool"},
+ {name: "Leq64", argLength: 2, typ: "Bool"},
+ {name: "Leq64U", argLength: 2, typ: "Bool"},
+ {name: "Leq32F", argLength: 2, typ: "Bool"},
+ {name: "Leq64F", argLength: 2, typ: "Bool"},
+
+ {name: "Greater8", argLength: 2, typ: "Bool"}, // arg0 > arg1, signed
+ {name: "Greater8U", argLength: 2, typ: "Bool"}, // arg0 > arg1, unsigned
+ {name: "Greater16", argLength: 2, typ: "Bool"},
+ {name: "Greater16U", argLength: 2, typ: "Bool"},
+ {name: "Greater32", argLength: 2, typ: "Bool"},
+ {name: "Greater32U", argLength: 2, typ: "Bool"},
+ {name: "Greater64", argLength: 2, typ: "Bool"},
+ {name: "Greater64U", argLength: 2, typ: "Bool"},
+ {name: "Greater32F", argLength: 2, typ: "Bool"},
+ {name: "Greater64F", argLength: 2, typ: "Bool"},
+
+ {name: "Geq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed
+ {name: "Geq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned
+ {name: "Geq16", argLength: 2, typ: "Bool"},
+ {name: "Geq16U", argLength: 2, typ: "Bool"},
+ {name: "Geq32", argLength: 2, typ: "Bool"},
+ {name: "Geq32U", argLength: 2, typ: "Bool"},
+ {name: "Geq64", argLength: 2, typ: "Bool"},
+ {name: "Geq64U", argLength: 2, typ: "Bool"},
+ {name: "Geq32F", argLength: 2, typ: "Bool"},
+ {name: "Geq64F", argLength: 2, typ: "Bool"},
// boolean ops
- {name: "AndB", argLength: 2}, // arg0 && arg1 (not shortcircuited)
- {name: "OrB", argLength: 2}, // arg0 || arg1 (not shortcircuited)
- {name: "EqB", argLength: 2}, // arg0 == arg1
- {name: "NeqB", argLength: 2}, // arg0 != arg1
- {name: "Not", argLength: 1}, // !arg0, boolean
+ {name: "AndB", argLength: 2, typ: "Bool"}, // arg0 && arg1 (not shortcircuited)
+ {name: "OrB", argLength: 2, typ: "Bool"}, // arg0 || arg1 (not shortcircuited)
+ {name: "EqB", argLength: 2, typ: "Bool"}, // arg0 == arg1
+ {name: "NeqB", argLength: 2, typ: "Bool"}, // arg0 != arg1
+ {name: "Not", argLength: 1, typ: "Bool"}, // !arg0, boolean
// 1-input ops
{name: "Neg8", argLength: 1}, // -arg0
@@ -312,8 +312,8 @@ var genericOps = []opData{
// Memory operations
{name: "Load", argLength: 2}, // Load from arg0. arg1=memory
{name: "Store", argLength: 3, typ: "Mem", aux: "Int64"}, // Store arg1 to arg0. arg2=memory, auxint=size. Returns memory.
- {name: "Move", argLength: 3, aux: "Int64"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size. Returns memory.
- {name: "Zero", argLength: 2, aux: "Int64"}, // arg0=destptr, arg1=mem, auxint=size. Returns memory.
+ {name: "Move", argLength: 3, typ: "Mem", aux: "Int64"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size. Returns memory.
+ {name: "Zero", argLength: 2, typ: "Mem", aux: "Int64"}, // arg0=destptr, arg1=mem, auxint=size. Returns memory.
// Function calls. Arguments to the call have already been written to the stack.
// Return values appear on the stack. The method receiver, if any, is treated
@@ -326,17 +326,17 @@ var genericOps = []opData{
// Conversions: signed extensions, zero (unsigned) extensions, truncations
{name: "SignExt8to16", argLength: 1, typ: "Int16"},
- {name: "SignExt8to32", argLength: 1},
- {name: "SignExt8to64", argLength: 1},
- {name: "SignExt16to32", argLength: 1},
- {name: "SignExt16to64", argLength: 1},
- {name: "SignExt32to64", argLength: 1},
+ {name: "SignExt8to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt8to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt16to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt16to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt32to64", argLength: 1, typ: "Int64"},
{name: "ZeroExt8to16", argLength: 1, typ: "UInt16"},
- {name: "ZeroExt8to32", argLength: 1},
- {name: "ZeroExt8to64", argLength: 1},
- {name: "ZeroExt16to32", argLength: 1},
- {name: "ZeroExt16to64", argLength: 1},
- {name: "ZeroExt32to64", argLength: 1},
+ {name: "ZeroExt8to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt8to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt16to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt16to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt32to64", argLength: 1, typ: "UInt64"},
{name: "Trunc16to8", argLength: 1},
{name: "Trunc32to8", argLength: 1},
{name: "Trunc32to16", argLength: 1},
@@ -416,6 +416,31 @@ var genericOps = []opData{
{name: "VarKill", argLength: 1, aux: "Sym"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
{name: "VarLive", argLength: 1, aux: "Sym"}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
{name: "KeepAlive", argLength: 2, typ: "Mem"}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
+
+ // Ops for breaking 64-bit operations on 32-bit architectures
+ {name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo
+ {name: "Int64Hi", argLength: 1, typ: "UInt32"}, // high 32-bit of arg0
+ {name: "Int64Lo", argLength: 1, typ: "UInt32"}, // low 32-bit of arg0
+
+ {name: "Add32carry", argLength: 2, commutative: true, typ: "(Flags,UInt32)"}, // arg0 + arg1, returns (carry, value)
+ {name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1)
+
+ {name: "Sub32carry", argLength: 2, typ: "(Flags,UInt32)"}, // arg0 - arg1, returns (carry, value)
+ {name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1)
+
+ {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)"}, // arg0 * arg1, returns (hi, lo)
+
+ {name: "Signmask", argLength: 1, typ: "Int32"}, // 0 if arg0 >= 0, -1 if arg0 < 0
+ {name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0
+
+ {name: "Cvt32Uto32F", argLength: 1}, // uint32 -> float32, only used on 32-bit arch
+ {name: "Cvt32Uto64F", argLength: 1}, // uint32 -> float64, only used on 32-bit arch
+ {name: "Cvt32Fto32U", argLength: 1}, // float32 -> uint32, only used on 32-bit arch
+ {name: "Cvt64Fto32U", argLength: 1}, // float64 -> uint32, only used on 32-bit arch
+
+ // pseudo-ops for breaking Tuple
+ {name: "Select0", argLength: 1}, // the first component of a tuple
+ {name: "Select1", argLength: 1}, // the second component of a tuple
}
// kind control successors implicit exit
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
index 2aec4a324b..19c1bc7163 100644
--- a/src/cmd/compile/internal/ssa/gen/main.go
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -21,13 +21,16 @@ import (
)
type arch struct {
- name string
- pkg string // obj package to import for this arch.
- genfile string // source file containing opcode code generation.
- ops []opData
- blocks []blockData
- regnames []string
- generic bool
+ name string
+ pkg string // obj package to import for this arch.
+ genfile string // source file containing opcode code generation.
+ ops []opData
+ blocks []blockData
+ regnames []string
+ gpregmask regMask
+ fpregmask regMask
+ framepointerreg int8
+ generic bool
}
type opData struct {
@@ -38,8 +41,9 @@ type opData struct {
aux string
rematerializeable bool
argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments
- commutative bool // this operation is commutative (e.g. addition)
- resultInArg0 bool // v and v.Args[0] must be allocated to the same register
+ commutative bool // this operation is commutative on its first 2 arguments (e.g. addition)
+ resultInArg0 bool // last output of v and v.Args[0] must be allocated to the same register
+ clobberFlags bool // this op clobbers flags register
}
type blockData struct {
@@ -73,6 +77,7 @@ var archs []arch
func main() {
flag.Parse()
+ sort.Sort(ArchsByName(archs))
genOp()
genLower()
}
@@ -155,13 +160,16 @@ func genOp() {
}
if v.resultInArg0 {
fmt.Fprintln(w, "resultInArg0: true,")
- if v.reg.inputs[0] != v.reg.outputs[0] {
- log.Fatalf("input[0] and output registers must be equal for %s", v.name)
+ if v.reg.inputs[0] != v.reg.outputs[len(v.reg.outputs)-1] {
+ log.Fatalf("input[0] and last output register must be equal for %s", v.name)
}
- if v.commutative && v.reg.inputs[1] != v.reg.outputs[0] {
- log.Fatalf("input[1] and output registers must be equal for %s", v.name)
+ if v.commutative && v.reg.inputs[1] != v.reg.outputs[len(v.reg.outputs)-1] {
+ log.Fatalf("input[1] and last output register must be equal for %s", v.name)
}
}
+ if v.clobberFlags {
+ fmt.Fprintln(w, "clobberFlags: true,")
+ }
if a.name == "generic" {
fmt.Fprintln(w, "generic:true,")
fmt.Fprintln(w, "},") // close op
@@ -191,14 +199,22 @@ func genOp() {
}
fmt.Fprintln(w, "},")
}
+
if v.reg.clobbers > 0 {
fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers))
}
+
// reg outputs
- if len(v.reg.outputs) > 0 {
- fmt.Fprintln(w, "outputs: []regMask{")
- for _, r := range v.reg.outputs {
- fmt.Fprintf(w, "%d,%s\n", r, a.regMaskComment(r))
+ s = s[:0]
+ for i, r := range v.reg.outputs {
+ s = append(s, intPair{countRegs(r), i})
+ }
+ if len(s) > 0 {
+ sort.Sort(byKey(s))
+ fmt.Fprintln(w, "outputs: []outputInfo{")
+ for _, p := range s {
+ r := v.reg.outputs[p.val]
+ fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
}
fmt.Fprintln(w, "},")
}
@@ -223,6 +239,9 @@ func genOp() {
fmt.Fprintf(w, " {%d, \"%s\"},\n", i, r)
}
fmt.Fprintln(w, "}")
+ fmt.Fprintf(w, "var gpRegMask%s = regMask(%d)\n", a.name, a.gpregmask)
+ fmt.Fprintf(w, "var fpRegMask%s = regMask(%d)\n", a.name, a.fpregmask)
+ fmt.Fprintf(w, "var framepointerReg%s = int8(%d)\n", a.name, a.framepointerreg)
}
// gofmt result
@@ -298,3 +317,9 @@ type byKey []intPair
func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].key < a[j].key }
+
+type ArchsByName []arch
+
+func (x ArchsByName) Len() int { return len(x) }
+func (x ArchsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ArchsByName) Less(i, j int) bool { return x[i].name < x[j].name }
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 0fc5749f1d..0cb428b6fa 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -117,15 +117,17 @@ func genRules(arch arch) {
if unbalanced(rule) {
continue
}
- op := strings.Split(rule, " ")[0][1:]
- if op[len(op)-1] == ')' {
- op = op[:len(op)-1] // rule has only opcode, e.g. (ConstNil) -> ...
- }
+
loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno)
- if isBlock(op, arch) {
- blockrules[op] = append(blockrules[op], Rule{rule: rule, loc: loc})
+ r := Rule{rule: rule, loc: loc}
+ if rawop := strings.Split(rule, " ")[0][1:]; isBlock(rawop, arch) {
+ blockrules[rawop] = append(blockrules[rawop], r)
} else {
- oprules[op] = append(oprules[op], Rule{rule: rule, loc: loc})
+ // Do fancier value op matching.
+ match, _, _ := r.parse()
+ op, oparch, _, _, _, _ := parseValue(match, arch, loc)
+ opname := fmt.Sprintf("Op%s%s", oparch, op.name)
+ oprules[opname] = append(oprules[opname], r)
}
rule = ""
ruleLineno = 0
@@ -157,8 +159,8 @@ func genRules(arch arch) {
fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
fmt.Fprintf(w, "switch v.Op {\n")
for _, op := range ops {
- fmt.Fprintf(w, "case %s:\n", opName(op, arch))
- fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, opName(op, arch))
+ fmt.Fprintf(w, "case %s:\n", op)
+ fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, op)
}
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return false\n")
@@ -167,7 +169,7 @@ func genRules(arch arch) {
// Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers.
for _, op := range ops {
- fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch))
+ fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, op)
fmt.Fprintln(w, "b := v.Block")
fmt.Fprintln(w, "_ = b")
var canFail bool
@@ -334,141 +336,108 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t
}
canFail := false
- // split body up into regions. Split by spaces/tabs, except those
- // contained in () or {}.
- s := split(match[1 : len(match)-1]) // remove parens, then split
-
- // Find op record
- var op opData
- for _, x := range genericOps {
- if x.name == s[0] {
- op = x
- break
- }
- }
- for _, x := range arch.ops {
- if x.name == s[0] {
- op = x
- break
- }
- }
- if op.name == "" {
- log.Fatalf("%s: unknown op %s", loc, s[0])
- }
+ op, oparch, typ, auxint, aux, args := parseValue(match, arch, loc)
// check op
if !top {
- fmt.Fprintf(w, "if %s.Op != %s {\nbreak\n}\n", v, opName(s[0], arch))
+ fmt.Fprintf(w, "if %s.Op != Op%s%s {\nbreak\n}\n", v, oparch, op.name)
canFail = true
}
- // check type/aux/args
- argnum := 0
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction
- t := a[1 : len(a)-1] // remove <>
- if !isVariable(t) {
- // code. We must match the results of this code.
- fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
+ if typ != "" {
+ if !isVariable(typ) {
+ // code. We must match the results of this code.
+ fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[typ]; ok {
+ // must match previous variable
+ fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
canFail = true
} else {
- // variable
- if _, ok := m[t]; ok {
- // must match previous variable
- fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
- canFail = true
- } else {
- m[t] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Type\n", t, v)
- }
+ m[typ] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Type\n", typ, v)
}
- } else if a[0] == '[' {
- // auxint restriction
- switch op.aux {
- case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove []
- if !isVariable(x) {
- // code
- fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
+ }
+ }
+
+ if auxint != "" {
+ if !isVariable(auxint) {
+ // code
+ fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[auxint]; ok {
+ fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
canFail = true
} else {
- // variable
- if _, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
- canFail = true
- } else {
- m[x] = struct{}{}
- fmt.Fprintf(w, "%s := %s.AuxInt\n", x, v)
- }
- }
- } else if a[0] == '{' {
- // aux restriction
- switch op.aux {
- case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ m[auxint] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.AuxInt\n", auxint, v)
}
- x := a[1 : len(a)-1] // remove {}
- if !isVariable(x) {
- // code
- fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
+ }
+ }
+
+ if aux != "" {
+
+ if !isVariable(aux) {
+ // code
+ fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[aux]; ok {
+ fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
canFail = true
} else {
- // variable
- if _, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
- canFail = true
- } else {
- m[x] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Aux\n", x, v)
- }
+ m[aux] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Aux\n", aux, v)
}
- } else if a == "_" {
- argnum++
- } else if !strings.Contains(a, "(") {
+ }
+ }
+
+ for i, arg := range args {
+ if arg == "_" {
+ continue
+ }
+ if !strings.Contains(arg, "(") {
// leaf variable
- if _, ok := m[a]; ok {
+ if _, ok := m[arg]; ok {
// variable already has a definition. Check whether
// the old definition and the new definition match.
// For example, (add x x). Equality is just pointer equality
// on Values (so cse is important to do before lowering).
- fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", a, v, argnum)
+ fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", arg, v, i)
canFail = true
} else {
// remember that this variable references the given value
- m[a] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Args[%d]\n", a, v, argnum)
+ m[arg] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Args[%d]\n", arg, v, i)
}
- argnum++
+ continue
+ }
+ // compound sexpr
+ var argname string
+ colon := strings.Index(arg, ":")
+ openparen := strings.Index(arg, "(")
+ if colon >= 0 && openparen >= 0 && colon < openparen {
+ // rule-specified name
+ argname = arg[:colon]
+ arg = arg[colon+1:]
} else {
- // compound sexpr
- var argname string
- colon := strings.Index(a, ":")
- openparen := strings.Index(a, "(")
- if colon >= 0 && openparen >= 0 && colon < openparen {
- // rule-specified name
- argname = a[:colon]
- a = a[colon+1:]
- } else {
- // autogenerated name
- argname = fmt.Sprintf("%s_%d", v, argnum)
- }
- fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, argnum)
- if genMatch0(w, arch, a, argname, m, false, loc) {
- canFail = true
- }
- argnum++
+ // autogenerated name
+ argname = fmt.Sprintf("%s_%d", v, i)
+ }
+ fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, i)
+ if genMatch0(w, arch, arg, argname, m, false, loc) {
+ canFail = true
}
}
+
if op.argLength == -1 {
- fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, argnum)
+ fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, len(args))
canFail = true
- } else if int(op.argLength) != argnum {
- log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
}
return canFail
}
@@ -500,105 +469,44 @@ func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move boo
return result
}
- s := split(result[1 : len(result)-1]) // remove parens, then split
-
- // Find op record
- var op opData
- for _, x := range genericOps {
- if x.name == s[0] {
- op = x
- break
- }
- }
- for _, x := range arch.ops {
- if x.name == s[0] {
- op = x
- break
- }
- }
- if op.name == "" {
- log.Fatalf("%s: unknown op %s", loc, s[0])
- }
+ op, oparch, typ, auxint, aux, args := parseValue(result, arch, loc)
// Find the type of the variable.
- var opType string
- var typeOverride bool
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction
- opType = a[1 : len(a)-1] // remove <>
- typeOverride = true
- break
- }
- }
- if opType == "" {
- // find default type, if any
- for _, op := range arch.ops {
- if op.name == s[0] && op.typ != "" {
- opType = typeName(op.typ)
- break
- }
- }
- }
- if opType == "" {
- for _, op := range genericOps {
- if op.name == s[0] && op.typ != "" {
- opType = typeName(op.typ)
- break
- }
- }
+ typeOverride := typ != ""
+ if typ == "" && op.typ != "" {
+ typ = typeName(op.typ)
}
+
var v string
if top && !move {
v = "v"
- fmt.Fprintf(w, "v.reset(%s)\n", opName(s[0], arch))
+ fmt.Fprintf(w, "v.reset(Op%s%s)\n", oparch, op.name)
if typeOverride {
- fmt.Fprintf(w, "v.Type = %s\n", opType)
+ fmt.Fprintf(w, "v.Type = %s\n", typ)
}
} else {
- if opType == "" {
- log.Fatalf("sub-expression %s (op=%s) must have a type", result, s[0])
+ if typ == "" {
+ log.Fatalf("sub-expression %s (op=Op%s%s) must have a type", result, oparch, op.name)
}
v = fmt.Sprintf("v%d", *alloc)
*alloc++
- fmt.Fprintf(w, "%s := b.NewValue0(v.Line, %s, %s)\n", v, opName(s[0], arch), opType)
+ fmt.Fprintf(w, "%s := b.NewValue0(v.Line, Op%s%s, %s)\n", v, oparch, op.name, typ)
if move && top {
// Rewrite original into a copy
fmt.Fprintf(w, "v.reset(OpCopy)\n")
fmt.Fprintf(w, "v.AddArg(%s)\n", v)
}
}
- argnum := 0
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction, handled above
- } else if a[0] == '[' {
- // auxint restriction
- switch op.aux {
- case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove []
- fmt.Fprintf(w, "%s.AuxInt = %s\n", v, x)
- } else if a[0] == '{' {
- // aux restriction
- switch op.aux {
- case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove {}
- fmt.Fprintf(w, "%s.Aux = %s\n", v, x)
- } else {
- // regular argument (sexpr or variable)
- x := genResult0(w, arch, a, alloc, false, move, loc)
- fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
- argnum++
- }
+
+ if auxint != "" {
+ fmt.Fprintf(w, "%s.AuxInt = %s\n", v, auxint)
}
- if op.argLength != -1 && int(op.argLength) != argnum {
- log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
+ if aux != "" {
+ fmt.Fprintf(w, "%s.Aux = %s\n", v, aux)
+ }
+ for _, arg := range args {
+ x := genResult0(w, arch, arg, alloc, false, move, loc)
+ fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
}
return v
@@ -666,16 +574,102 @@ func isBlock(name string, arch arch) bool {
return false
}
-// opName converts from an op name specified in a rule file to an Op enum.
-// if the name matches a generic op, returns "Op" plus the specified name.
-// Otherwise, returns "Op" plus arch name plus op name.
-func opName(name string, arch arch) string {
- for _, op := range genericOps {
- if op.name == name {
- return "Op" + name
+// parseValue parses a parenthesized value from a rule.
+// The value can be from the match or the result side.
+// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
+// oparch is the architecture that op is located in, or "" for generic.
+func parseValue(val string, arch arch, loc string) (op opData, oparch string, typ string, auxint string, aux string, args []string) {
+ val = val[1 : len(val)-1] // remove ()
+
+ // Split val up into regions.
+ // Split by spaces/tabs, except those contained in (), {}, [], or <>.
+ s := split(val)
+
+ // Extract restrictions and args.
+ for _, a := range s[1:] {
+ switch a[0] {
+ case '<':
+ typ = a[1 : len(a)-1] // remove <>
+ case '[':
+ auxint = a[1 : len(a)-1] // remove []
+ case '{':
+ aux = a[1 : len(a)-1] // remove {}
+ default:
+ args = append(args, a)
+ }
+ }
+
+ // Resolve the op.
+
+ // match reports whether x is a good op to select.
+ // If strict is true, rule generation might succeed.
+ // If strict is false, rule generation has failed,
+ // but we're trying to generate a useful error.
+ // Doing strict=true then strict=false allows
+ // precise op matching while retaining good error messages.
+ match := func(x opData, strict bool, archname string) bool {
+ if x.name != s[0] {
+ return false
+ }
+ if x.argLength != -1 && int(x.argLength) != len(args) {
+ if strict {
+ return false
+ } else {
+ log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s[0], archname, op.argLength, len(args))
+ }
+ }
+ return true
+ }
+
+ for _, x := range genericOps {
+ if match(x, true, "generic") {
+ op = x
+ break
+ }
+ }
+ if arch.name != "generic" {
+ for _, x := range arch.ops {
+ if match(x, true, arch.name) {
+ if op.name != "" {
+ log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
+ }
+ op = x
+ oparch = arch.name
+ break
+ }
}
}
- return "Op" + arch.name + name
+
+ if op.name == "" {
+ // Failed to find the op.
+ // Run through everything again with strict=false
+ // to generate useful diagnosic messages before failing.
+ for _, x := range genericOps {
+ match(x, false, "generic")
+ }
+ for _, x := range arch.ops {
+ match(x, false, arch.name)
+ }
+ log.Fatalf("%s: unknown op %s", loc, s)
+ }
+
+ // Sanity check aux, auxint.
+ if auxint != "" {
+ switch op.aux {
+ case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
+ default:
+ log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
+ }
+ }
+ if aux != "" {
+ switch op.aux {
+ case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
+ default:
+ log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ }
+ }
+
+ return
}
func blockName(name string, arch arch) string {
@@ -689,6 +683,13 @@ func blockName(name string, arch arch) string {
// typeName returns the string to use to generate a type.
func typeName(typ string) string {
+ if typ[0] == '(' {
+ ts := strings.Split(typ[1:len(typ)-1], ",")
+ if len(ts) != 2 {
+ panic("Tuple expect 2 arguments")
+ }
+ return "MakeTuple(" + typeName(ts[0]) + ", " + typeName(ts[1]) + ")"
+ }
switch typ {
case "Flags", "Mem", "Void", "Int128":
return "Type" + typ
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index 7432a07054..f60887a3e1 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -359,7 +359,7 @@ func (v *Value) LongHTML() string {
}
r := v.Block.Func.RegAlloc
if int(v.ID) < len(r) && r[v.ID] != nil {
- s += " : " + r[v.ID].Name()
+ s += " : " + html.EscapeString(r[v.ID].Name())
}
s += "</span>"
return s
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index 85f525565b..fc3a19ca35 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -36,3 +36,16 @@ func (s LocalSlot) Name() string {
}
return fmt.Sprintf("%s+%d[%s]", s.N, s.Off, s.Type)
}
+
+type LocPair [2]Location
+
+func (t LocPair) Name() string {
+ n0, n1 := "nil", "nil"
+ if t[0] != nil {
+ n0 = t[0].Name()
+ }
+ if t[1] != nil {
+ n1 = t[1].Name()
+ }
+ return fmt.Sprintf("<%s,%s>", n0, n1)
+}
diff --git a/src/cmd/compile/internal/ssa/lower.go b/src/cmd/compile/internal/ssa/lower.go
index e271ed4ef6..0a86345a05 100644
--- a/src/cmd/compile/internal/ssa/lower.go
+++ b/src/cmd/compile/internal/ssa/lower.go
@@ -21,10 +21,15 @@ func checkLower(f *Func) {
continue // lowered
}
switch v.Op {
- case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive:
+ case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1:
continue // ok not to lower
+ case OpGetG:
+ if f.Config.hasGReg {
+ // has hardware g register, regalloc takes care of it
+ continue // ok not to lower
+ }
}
- s := "not lowered: " + v.Op.String() + " " + v.Type.SimpleString()
+ s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString()
for _, a := range v.Args {
s += " " + a.Type.SimpleString()
}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index cadbc7cd7a..987cbd7b56 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -26,7 +26,8 @@ type opInfo struct {
generic bool // this is a generic (arch-independent) opcode
rematerializeable bool // this op is rematerializeable
commutative bool // this operation is commutative (e.g. addition)
- resultInArg0 bool // v and v.Args[0] must be allocated to the same register
+ resultInArg0 bool // last output of v and v.Args[0] must be allocated to the same register
+ clobberFlags bool // this op clobbers flags register
}
type inputInfo struct {
@@ -34,10 +35,15 @@ type inputInfo struct {
regs regMask // allowed input registers
}
+type outputInfo struct {
+ idx int // index in output tuple
+ regs regMask // allowed output registers
+}
+
type regInfo struct {
inputs []inputInfo // ordered in register allocation order
clobbers regMask
- outputs []regMask // NOTE: values can only have 1 output for now.
+ outputs []outputInfo // ordered in register allocation order
}
type auxType int8
@@ -124,3 +130,31 @@ func (x ValAndOff) add(off int64) int64 {
}
return makeValAndOff(x.Val(), x.Off()+off)
}
+
+// SizeAndAlign holds both the size and the alignment of a type,
+// used in Zero and Move ops.
+// The high 8 bits hold the alignment.
+// The low 56 bits hold the size.
+type SizeAndAlign int64
+
+func (x SizeAndAlign) Size() int64 {
+ return int64(x) & (1<<56 - 1)
+}
+func (x SizeAndAlign) Align() int64 {
+ return int64(uint64(x) >> 56)
+}
+func (x SizeAndAlign) Int64() int64 {
+ return int64(x)
+}
+func (x SizeAndAlign) String() string {
+ return fmt.Sprintf("size=%d,align=%d", x.Size(), x.Align())
+}
+func MakeSizeAndAlign(size, align int64) SizeAndAlign {
+ if size&^(1<<56-1) != 0 {
+ panic("size too big in SizeAndAlign")
+ }
+ if align >= 1<<8 {
+ panic("alignment too big in SizeAndAlign")
+ }
+ return SizeAndAlign(size | align<<56)
+}
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 383f1ae5f3..be8cdd60ac 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -6,12 +6,29 @@ package ssa
import (
"cmd/internal/obj"
"cmd/internal/obj/arm"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/ppc64"
"cmd/internal/obj/x86"
)
const (
BlockInvalid BlockKind = iota
+ Block386EQ
+ Block386NE
+ Block386LT
+ Block386LE
+ Block386GT
+ Block386GE
+ Block386ULT
+ Block386ULE
+ Block386UGT
+ Block386UGE
+ Block386EQF
+ Block386NEF
+ Block386ORD
+ Block386NAN
+
BlockAMD64EQ
BlockAMD64NE
BlockAMD64LT
@@ -38,6 +55,28 @@ const (
BlockARMUGT
BlockARMUGE
+ BlockARM64EQ
+ BlockARM64NE
+ BlockARM64LT
+ BlockARM64LE
+ BlockARM64GT
+ BlockARM64GE
+ BlockARM64ULT
+ BlockARM64ULE
+ BlockARM64UGT
+ BlockARM64UGE
+
+ BlockPPC64EQ
+ BlockPPC64NE
+ BlockPPC64LT
+ BlockPPC64LE
+ BlockPPC64GT
+ BlockPPC64GE
+ BlockPPC64FLT
+ BlockPPC64FLE
+ BlockPPC64FGT
+ BlockPPC64FGE
+
BlockPlain
BlockIf
BlockCall
@@ -52,6 +91,21 @@ const (
var blockString = [...]string{
BlockInvalid: "BlockInvalid",
+ Block386EQ: "EQ",
+ Block386NE: "NE",
+ Block386LT: "LT",
+ Block386LE: "LE",
+ Block386GT: "GT",
+ Block386GE: "GE",
+ Block386ULT: "ULT",
+ Block386ULE: "ULE",
+ Block386UGT: "UGT",
+ Block386UGE: "UGE",
+ Block386EQF: "EQF",
+ Block386NEF: "NEF",
+ Block386ORD: "ORD",
+ Block386NAN: "NAN",
+
BlockAMD64EQ: "EQ",
BlockAMD64NE: "NE",
BlockAMD64LT: "LT",
@@ -78,6 +132,28 @@ var blockString = [...]string{
BlockARMUGT: "UGT",
BlockARMUGE: "UGE",
+ BlockARM64EQ: "EQ",
+ BlockARM64NE: "NE",
+ BlockARM64LT: "LT",
+ BlockARM64LE: "LE",
+ BlockARM64GT: "GT",
+ BlockARM64GE: "GE",
+ BlockARM64ULT: "ULT",
+ BlockARM64ULE: "ULE",
+ BlockARM64UGT: "UGT",
+ BlockARM64UGE: "UGE",
+
+ BlockPPC64EQ: "EQ",
+ BlockPPC64NE: "NE",
+ BlockPPC64LT: "LT",
+ BlockPPC64LE: "LE",
+ BlockPPC64GT: "GT",
+ BlockPPC64GE: "GE",
+ BlockPPC64FLT: "FLT",
+ BlockPPC64FLE: "FLE",
+ BlockPPC64FGT: "FGT",
+ BlockPPC64FGE: "FGE",
+
BlockPlain: "Plain",
BlockIf: "If",
BlockCall: "Call",
@@ -94,6 +170,187 @@ func (k BlockKind) String() string { return blockString[k] }
const (
OpInvalid Op = iota
+ Op386ADDSS
+ Op386ADDSD
+ Op386SUBSS
+ Op386SUBSD
+ Op386MULSS
+ Op386MULSD
+ Op386DIVSS
+ Op386DIVSD
+ Op386MOVSSload
+ Op386MOVSDload
+ Op386MOVSSconst
+ Op386MOVSDconst
+ Op386MOVSSloadidx1
+ Op386MOVSSloadidx4
+ Op386MOVSDloadidx1
+ Op386MOVSDloadidx8
+ Op386MOVSSstore
+ Op386MOVSDstore
+ Op386MOVSSstoreidx1
+ Op386MOVSSstoreidx4
+ Op386MOVSDstoreidx1
+ Op386MOVSDstoreidx8
+ Op386ADDL
+ Op386ADDLconst
+ Op386ADDLcarry
+ Op386ADDLconstcarry
+ Op386ADCL
+ Op386ADCLconst
+ Op386SUBL
+ Op386SUBLconst
+ Op386SUBLcarry
+ Op386SUBLconstcarry
+ Op386SBBL
+ Op386SBBLconst
+ Op386MULL
+ Op386MULLconst
+ Op386HMULL
+ Op386HMULLU
+ Op386HMULW
+ Op386HMULB
+ Op386HMULWU
+ Op386HMULBU
+ Op386MULLQU
+ Op386DIVL
+ Op386DIVW
+ Op386DIVLU
+ Op386DIVWU
+ Op386MODL
+ Op386MODW
+ Op386MODLU
+ Op386MODWU
+ Op386ANDL
+ Op386ANDLconst
+ Op386ORL
+ Op386ORLconst
+ Op386XORL
+ Op386XORLconst
+ Op386CMPL
+ Op386CMPW
+ Op386CMPB
+ Op386CMPLconst
+ Op386CMPWconst
+ Op386CMPBconst
+ Op386UCOMISS
+ Op386UCOMISD
+ Op386TESTL
+ Op386TESTW
+ Op386TESTB
+ Op386TESTLconst
+ Op386TESTWconst
+ Op386TESTBconst
+ Op386SHLL
+ Op386SHLLconst
+ Op386SHRL
+ Op386SHRW
+ Op386SHRB
+ Op386SHRLconst
+ Op386SHRWconst
+ Op386SHRBconst
+ Op386SARL
+ Op386SARW
+ Op386SARB
+ Op386SARLconst
+ Op386SARWconst
+ Op386SARBconst
+ Op386ROLLconst
+ Op386ROLWconst
+ Op386ROLBconst
+ Op386NEGL
+ Op386NOTL
+ Op386BSFL
+ Op386BSFW
+ Op386BSRL
+ Op386BSRW
+ Op386BSWAPL
+ Op386SQRTSD
+ Op386SBBLcarrymask
+ Op386SETEQ
+ Op386SETNE
+ Op386SETL
+ Op386SETLE
+ Op386SETG
+ Op386SETGE
+ Op386SETB
+ Op386SETBE
+ Op386SETA
+ Op386SETAE
+ Op386SETEQF
+ Op386SETNEF
+ Op386SETORD
+ Op386SETNAN
+ Op386SETGF
+ Op386SETGEF
+ Op386MOVBLSX
+ Op386MOVBLZX
+ Op386MOVWLSX
+ Op386MOVWLZX
+ Op386MOVLconst
+ Op386CVTTSD2SL
+ Op386CVTTSS2SL
+ Op386CVTSL2SS
+ Op386CVTSL2SD
+ Op386CVTSD2SS
+ Op386CVTSS2SD
+ Op386PXOR
+ Op386LEAL
+ Op386LEAL1
+ Op386LEAL2
+ Op386LEAL4
+ Op386LEAL8
+ Op386MOVBload
+ Op386MOVBLSXload
+ Op386MOVWload
+ Op386MOVWLSXload
+ Op386MOVLload
+ Op386MOVBstore
+ Op386MOVWstore
+ Op386MOVLstore
+ Op386MOVBloadidx1
+ Op386MOVWloadidx1
+ Op386MOVWloadidx2
+ Op386MOVLloadidx1
+ Op386MOVLloadidx4
+ Op386MOVBstoreidx1
+ Op386MOVWstoreidx1
+ Op386MOVWstoreidx2
+ Op386MOVLstoreidx1
+ Op386MOVLstoreidx4
+ Op386MOVBstoreconst
+ Op386MOVWstoreconst
+ Op386MOVLstoreconst
+ Op386MOVBstoreconstidx1
+ Op386MOVWstoreconstidx1
+ Op386MOVWstoreconstidx2
+ Op386MOVLstoreconstidx1
+ Op386MOVLstoreconstidx4
+ Op386DUFFZERO
+ Op386REPSTOSL
+ Op386CALLstatic
+ Op386CALLclosure
+ Op386CALLdefer
+ Op386CALLgo
+ Op386CALLinter
+ Op386DUFFCOPY
+ Op386REPMOVSL
+ Op386InvertFlags
+ Op386LoweredGetG
+ Op386LoweredGetClosurePtr
+ Op386LoweredNilCheck
+ Op386MOVLconvert
+ Op386FlagEQ
+ Op386FlagLT_ULT
+ Op386FlagLT_UGT
+ Op386FlagGT_UGT
+ Op386FlagGT_ULT
+ Op386FCHS
+ Op386MOVSSconst1
+ Op386MOVSDconst1
+ Op386MOVSSconst2
+ Op386MOVSDconst2
+
OpAMD64ADDSS
OpAMD64ADDSD
OpAMD64SUBSS
@@ -143,12 +400,6 @@ const (
OpAMD64DIVQU
OpAMD64DIVLU
OpAMD64DIVWU
- OpAMD64MODQ
- OpAMD64MODL
- OpAMD64MODW
- OpAMD64MODQU
- OpAMD64MODLU
- OpAMD64MODWU
OpAMD64ANDQ
OpAMD64ANDL
OpAMD64ANDQconst
@@ -264,6 +515,7 @@ const (
OpAMD64LEAQ2
OpAMD64LEAQ4
OpAMD64LEAQ8
+ OpAMD64LEAL
OpAMD64MOVBload
OpAMD64MOVBQSXload
OpAMD64MOVWload
@@ -317,6 +569,7 @@ const (
OpAMD64LoweredGetClosurePtr
OpAMD64LoweredNilCheck
OpAMD64MOVQconvert
+ OpAMD64MOVLconvert
OpAMD64FlagEQ
OpAMD64FlagLT_ULT
OpAMD64FlagLT_UGT
@@ -325,12 +578,504 @@ const (
OpARMADD
OpARMADDconst
- OpARMMOVWconst
+ OpARMSUB
+ OpARMSUBconst
+ OpARMRSB
+ OpARMRSBconst
+ OpARMMUL
+ OpARMHMUL
+ OpARMHMULU
+ OpARMDIV
+ OpARMDIVU
+ OpARMMOD
+ OpARMMODU
+ OpARMADDS
+ OpARMADDSconst
+ OpARMADC
+ OpARMADCconst
+ OpARMSUBS
+ OpARMSUBSconst
+ OpARMRSBSconst
+ OpARMSBC
+ OpARMSBCconst
+ OpARMRSCconst
+ OpARMMULLU
+ OpARMMULA
+ OpARMADDF
+ OpARMADDD
+ OpARMSUBF
+ OpARMSUBD
+ OpARMMULF
+ OpARMMULD
+ OpARMDIVF
+ OpARMDIVD
+ OpARMAND
+ OpARMANDconst
+ OpARMOR
+ OpARMORconst
+ OpARMXOR
+ OpARMXORconst
+ OpARMBIC
+ OpARMBICconst
+ OpARMMVN
+ OpARMNEGF
+ OpARMNEGD
+ OpARMSQRTD
+ OpARMSLL
+ OpARMSLLconst
+ OpARMSRL
+ OpARMSRLconst
+ OpARMSRA
+ OpARMSRAconst
+ OpARMSRRconst
+ OpARMADDshiftLL
+ OpARMADDshiftRL
+ OpARMADDshiftRA
+ OpARMSUBshiftLL
+ OpARMSUBshiftRL
+ OpARMSUBshiftRA
+ OpARMRSBshiftLL
+ OpARMRSBshiftRL
+ OpARMRSBshiftRA
+ OpARMANDshiftLL
+ OpARMANDshiftRL
+ OpARMANDshiftRA
+ OpARMORshiftLL
+ OpARMORshiftRL
+ OpARMORshiftRA
+ OpARMXORshiftLL
+ OpARMXORshiftRL
+ OpARMXORshiftRA
+ OpARMBICshiftLL
+ OpARMBICshiftRL
+ OpARMBICshiftRA
+ OpARMMVNshiftLL
+ OpARMMVNshiftRL
+ OpARMMVNshiftRA
+ OpARMADCshiftLL
+ OpARMADCshiftRL
+ OpARMADCshiftRA
+ OpARMSBCshiftLL
+ OpARMSBCshiftRL
+ OpARMSBCshiftRA
+ OpARMRSCshiftLL
+ OpARMRSCshiftRL
+ OpARMRSCshiftRA
+ OpARMADDSshiftLL
+ OpARMADDSshiftRL
+ OpARMADDSshiftRA
+ OpARMSUBSshiftLL
+ OpARMSUBSshiftRL
+ OpARMSUBSshiftRA
+ OpARMRSBSshiftLL
+ OpARMRSBSshiftRL
+ OpARMRSBSshiftRA
+ OpARMADDshiftLLreg
+ OpARMADDshiftRLreg
+ OpARMADDshiftRAreg
+ OpARMSUBshiftLLreg
+ OpARMSUBshiftRLreg
+ OpARMSUBshiftRAreg
+ OpARMRSBshiftLLreg
+ OpARMRSBshiftRLreg
+ OpARMRSBshiftRAreg
+ OpARMANDshiftLLreg
+ OpARMANDshiftRLreg
+ OpARMANDshiftRAreg
+ OpARMORshiftLLreg
+ OpARMORshiftRLreg
+ OpARMORshiftRAreg
+ OpARMXORshiftLLreg
+ OpARMXORshiftRLreg
+ OpARMXORshiftRAreg
+ OpARMBICshiftLLreg
+ OpARMBICshiftRLreg
+ OpARMBICshiftRAreg
+ OpARMMVNshiftLLreg
+ OpARMMVNshiftRLreg
+ OpARMMVNshiftRAreg
+ OpARMADCshiftLLreg
+ OpARMADCshiftRLreg
+ OpARMADCshiftRAreg
+ OpARMSBCshiftLLreg
+ OpARMSBCshiftRLreg
+ OpARMSBCshiftRAreg
+ OpARMRSCshiftLLreg
+ OpARMRSCshiftRLreg
+ OpARMRSCshiftRAreg
+ OpARMADDSshiftLLreg
+ OpARMADDSshiftRLreg
+ OpARMADDSshiftRAreg
+ OpARMSUBSshiftLLreg
+ OpARMSUBSshiftRLreg
+ OpARMSUBSshiftRAreg
+ OpARMRSBSshiftLLreg
+ OpARMRSBSshiftRLreg
+ OpARMRSBSshiftRAreg
OpARMCMP
+ OpARMCMPconst
+ OpARMCMN
+ OpARMCMNconst
+ OpARMTST
+ OpARMTSTconst
+ OpARMTEQ
+ OpARMTEQconst
+ OpARMCMPF
+ OpARMCMPD
+ OpARMCMPshiftLL
+ OpARMCMPshiftRL
+ OpARMCMPshiftRA
+ OpARMCMPshiftLLreg
+ OpARMCMPshiftRLreg
+ OpARMCMPshiftRAreg
+ OpARMCMPF0
+ OpARMCMPD0
+ OpARMMOVWconst
+ OpARMMOVFconst
+ OpARMMOVDconst
+ OpARMMOVWaddr
+ OpARMMOVBload
+ OpARMMOVBUload
+ OpARMMOVHload
+ OpARMMOVHUload
OpARMMOVWload
+ OpARMMOVFload
+ OpARMMOVDload
+ OpARMMOVBstore
+ OpARMMOVHstore
OpARMMOVWstore
+ OpARMMOVFstore
+ OpARMMOVDstore
+ OpARMMOVWloadidx
+ OpARMMOVWloadshiftLL
+ OpARMMOVWloadshiftRL
+ OpARMMOVWloadshiftRA
+ OpARMMOVWstoreidx
+ OpARMMOVWstoreshiftLL
+ OpARMMOVWstoreshiftRL
+ OpARMMOVWstoreshiftRA
+ OpARMMOVBreg
+ OpARMMOVBUreg
+ OpARMMOVHreg
+ OpARMMOVHUreg
+ OpARMMOVWreg
+ OpARMMOVWnop
+ OpARMMOVWF
+ OpARMMOVWD
+ OpARMMOVWUF
+ OpARMMOVWUD
+ OpARMMOVFW
+ OpARMMOVDW
+ OpARMMOVFWU
+ OpARMMOVDWU
+ OpARMMOVFD
+ OpARMMOVDF
+ OpARMCMOVWHSconst
+ OpARMCMOVWLSconst
+ OpARMSRAcond
OpARMCALLstatic
+ OpARMCALLclosure
+ OpARMCALLdefer
+ OpARMCALLgo
+ OpARMCALLinter
+ OpARMLoweredNilCheck
+ OpARMEqual
+ OpARMNotEqual
OpARMLessThan
+ OpARMLessEqual
+ OpARMGreaterThan
+ OpARMGreaterEqual
+ OpARMLessThanU
+ OpARMLessEqualU
+ OpARMGreaterThanU
+ OpARMGreaterEqualU
+ OpARMDUFFZERO
+ OpARMDUFFCOPY
+ OpARMLoweredZero
+ OpARMLoweredMove
+ OpARMLoweredGetClosurePtr
+ OpARMMOVWconvert
+ OpARMFlagEQ
+ OpARMFlagLT_ULT
+ OpARMFlagLT_UGT
+ OpARMFlagGT_UGT
+ OpARMFlagGT_ULT
+ OpARMInvertFlags
+
+ OpARM64ADD
+ OpARM64ADDconst
+ OpARM64SUB
+ OpARM64SUBconst
+ OpARM64MUL
+ OpARM64MULW
+ OpARM64MULH
+ OpARM64UMULH
+ OpARM64MULL
+ OpARM64UMULL
+ OpARM64DIV
+ OpARM64UDIV
+ OpARM64DIVW
+ OpARM64UDIVW
+ OpARM64MOD
+ OpARM64UMOD
+ OpARM64MODW
+ OpARM64UMODW
+ OpARM64FADDS
+ OpARM64FADDD
+ OpARM64FSUBS
+ OpARM64FSUBD
+ OpARM64FMULS
+ OpARM64FMULD
+ OpARM64FDIVS
+ OpARM64FDIVD
+ OpARM64AND
+ OpARM64ANDconst
+ OpARM64OR
+ OpARM64ORconst
+ OpARM64XOR
+ OpARM64XORconst
+ OpARM64BIC
+ OpARM64BICconst
+ OpARM64MVN
+ OpARM64NEG
+ OpARM64FNEGS
+ OpARM64FNEGD
+ OpARM64FSQRTD
+ OpARM64SLL
+ OpARM64SLLconst
+ OpARM64SRL
+ OpARM64SRLconst
+ OpARM64SRA
+ OpARM64SRAconst
+ OpARM64RORconst
+ OpARM64RORWconst
+ OpARM64CMP
+ OpARM64CMPconst
+ OpARM64CMPW
+ OpARM64CMPWconst
+ OpARM64CMN
+ OpARM64CMNconst
+ OpARM64CMNW
+ OpARM64CMNWconst
+ OpARM64FCMPS
+ OpARM64FCMPD
+ OpARM64ADDshiftLL
+ OpARM64ADDshiftRL
+ OpARM64ADDshiftRA
+ OpARM64SUBshiftLL
+ OpARM64SUBshiftRL
+ OpARM64SUBshiftRA
+ OpARM64ANDshiftLL
+ OpARM64ANDshiftRL
+ OpARM64ANDshiftRA
+ OpARM64ORshiftLL
+ OpARM64ORshiftRL
+ OpARM64ORshiftRA
+ OpARM64XORshiftLL
+ OpARM64XORshiftRL
+ OpARM64XORshiftRA
+ OpARM64BICshiftLL
+ OpARM64BICshiftRL
+ OpARM64BICshiftRA
+ OpARM64CMPshiftLL
+ OpARM64CMPshiftRL
+ OpARM64CMPshiftRA
+ OpARM64MOVDconst
+ OpARM64FMOVSconst
+ OpARM64FMOVDconst
+ OpARM64MOVDaddr
+ OpARM64MOVBload
+ OpARM64MOVBUload
+ OpARM64MOVHload
+ OpARM64MOVHUload
+ OpARM64MOVWload
+ OpARM64MOVWUload
+ OpARM64MOVDload
+ OpARM64FMOVSload
+ OpARM64FMOVDload
+ OpARM64MOVBstore
+ OpARM64MOVHstore
+ OpARM64MOVWstore
+ OpARM64MOVDstore
+ OpARM64FMOVSstore
+ OpARM64FMOVDstore
+ OpARM64MOVBstorezero
+ OpARM64MOVHstorezero
+ OpARM64MOVWstorezero
+ OpARM64MOVDstorezero
+ OpARM64MOVBreg
+ OpARM64MOVBUreg
+ OpARM64MOVHreg
+ OpARM64MOVHUreg
+ OpARM64MOVWreg
+ OpARM64MOVWUreg
+ OpARM64MOVDreg
+ OpARM64MOVDnop
+ OpARM64SCVTFWS
+ OpARM64SCVTFWD
+ OpARM64UCVTFWS
+ OpARM64UCVTFWD
+ OpARM64SCVTFS
+ OpARM64SCVTFD
+ OpARM64UCVTFS
+ OpARM64UCVTFD
+ OpARM64FCVTZSSW
+ OpARM64FCVTZSDW
+ OpARM64FCVTZUSW
+ OpARM64FCVTZUDW
+ OpARM64FCVTZSS
+ OpARM64FCVTZSD
+ OpARM64FCVTZUS
+ OpARM64FCVTZUD
+ OpARM64FCVTSD
+ OpARM64FCVTDS
+ OpARM64CSELULT
+ OpARM64CSELULT0
+ OpARM64CALLstatic
+ OpARM64CALLclosure
+ OpARM64CALLdefer
+ OpARM64CALLgo
+ OpARM64CALLinter
+ OpARM64LoweredNilCheck
+ OpARM64Equal
+ OpARM64NotEqual
+ OpARM64LessThan
+ OpARM64LessEqual
+ OpARM64GreaterThan
+ OpARM64GreaterEqual
+ OpARM64LessThanU
+ OpARM64LessEqualU
+ OpARM64GreaterThanU
+ OpARM64GreaterEqualU
+ OpARM64DUFFZERO
+ OpARM64LoweredZero
+ OpARM64LoweredMove
+ OpARM64LoweredGetClosurePtr
+ OpARM64MOVDconvert
+ OpARM64FlagEQ
+ OpARM64FlagLT_ULT
+ OpARM64FlagLT_UGT
+ OpARM64FlagGT_UGT
+ OpARM64FlagGT_ULT
+ OpARM64InvertFlags
+
+ OpPPC64ADD
+ OpPPC64ADDconst
+ OpPPC64FADD
+ OpPPC64FADDS
+ OpPPC64SUB
+ OpPPC64FSUB
+ OpPPC64FSUBS
+ OpPPC64MULLD
+ OpPPC64MULLW
+ OpPPC64MULHD
+ OpPPC64MULHW
+ OpPPC64MULHDU
+ OpPPC64MULHWU
+ OpPPC64FMUL
+ OpPPC64FMULS
+ OpPPC64SRAD
+ OpPPC64SRAW
+ OpPPC64SRD
+ OpPPC64SRW
+ OpPPC64SLD
+ OpPPC64SLW
+ OpPPC64ADDconstForCarry
+ OpPPC64MaskIfNotCarry
+ OpPPC64SRADconst
+ OpPPC64SRAWconst
+ OpPPC64SRDconst
+ OpPPC64SRWconst
+ OpPPC64SLDconst
+ OpPPC64SLWconst
+ OpPPC64FDIV
+ OpPPC64FDIVS
+ OpPPC64DIVD
+ OpPPC64DIVW
+ OpPPC64DIVDU
+ OpPPC64DIVWU
+ OpPPC64FCTIDZ
+ OpPPC64FCTIWZ
+ OpPPC64FCFID
+ OpPPC64FRSP
+ OpPPC64Xf2i64
+ OpPPC64Xi2f64
+ OpPPC64AND
+ OpPPC64ANDN
+ OpPPC64OR
+ OpPPC64ORN
+ OpPPC64XOR
+ OpPPC64EQV
+ OpPPC64NEG
+ OpPPC64FNEG
+ OpPPC64FSQRT
+ OpPPC64FSQRTS
+ OpPPC64ORconst
+ OpPPC64XORconst
+ OpPPC64ANDconst
+ OpPPC64MOVBreg
+ OpPPC64MOVBZreg
+ OpPPC64MOVHreg
+ OpPPC64MOVHZreg
+ OpPPC64MOVWreg
+ OpPPC64MOVWZreg
+ OpPPC64MOVBload
+ OpPPC64MOVBZload
+ OpPPC64MOVHload
+ OpPPC64MOVHZload
+ OpPPC64MOVWload
+ OpPPC64MOVWZload
+ OpPPC64MOVDload
+ OpPPC64FMOVDload
+ OpPPC64FMOVSload
+ OpPPC64MOVBstore
+ OpPPC64MOVHstore
+ OpPPC64MOVWstore
+ OpPPC64MOVDstore
+ OpPPC64FMOVDstore
+ OpPPC64FMOVSstore
+ OpPPC64MOVBstorezero
+ OpPPC64MOVHstorezero
+ OpPPC64MOVWstorezero
+ OpPPC64MOVDstorezero
+ OpPPC64MOVDaddr
+ OpPPC64MOVDconst
+ OpPPC64MOVWconst
+ OpPPC64FMOVDconst
+ OpPPC64FMOVSconst
+ OpPPC64FCMPU
+ OpPPC64CMP
+ OpPPC64CMPU
+ OpPPC64CMPW
+ OpPPC64CMPWU
+ OpPPC64CMPconst
+ OpPPC64CMPUconst
+ OpPPC64CMPWconst
+ OpPPC64CMPWUconst
+ OpPPC64Equal
+ OpPPC64NotEqual
+ OpPPC64LessThan
+ OpPPC64FLessThan
+ OpPPC64LessEqual
+ OpPPC64FLessEqual
+ OpPPC64GreaterThan
+ OpPPC64FGreaterThan
+ OpPPC64GreaterEqual
+ OpPPC64FGreaterEqual
+ OpPPC64LoweredGetClosurePtr
+ OpPPC64LoweredNilCheck
+ OpPPC64MOVDconvert
+ OpPPC64CALLstatic
+ OpPPC64CALLclosure
+ OpPPC64CALLdefer
+ OpPPC64CALLgo
+ OpPPC64CALLinter
+ OpPPC64LoweredZero
+ OpPPC64LoweredMove
+ OpPPC64InvertFlags
+ OpPPC64FlagEQ
+ OpPPC64FlagLT
+ OpPPC64FlagGT
OpAdd8
OpAdd16
@@ -618,6 +1363,22 @@ const (
OpVarKill
OpVarLive
OpKeepAlive
+ OpInt64Make
+ OpInt64Hi
+ OpInt64Lo
+ OpAdd32carry
+ OpAdd32withcarry
+ OpSub32carry
+ OpSub32withcarry
+ OpMul32uhilo
+ OpSignmask
+ OpZeromask
+ OpCvt32Uto32F
+ OpCvt32Uto64F
+ OpCvt32Fto32U
+ OpCvt64Fto32U
+ OpSelect0
+ OpSelect1
)
var opcodeTable = [...]opInfo{
@@ -631,11 +1392,2404 @@ var opcodeTable = [...]opInfo{
asm: x86.AADDSS,
reg: regInfo{
inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ {1, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ clobbers: 32768, // X7
+ outputs: []outputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ },
+ },
+ {
+ name: "SUBSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ {1, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ clobbers: 32768, // X7
+ outputs: []outputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ },
+ },
+ {
+ name: "MULSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ {1, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ clobbers: 32768, // X7
+ outputs: []outputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ },
+ },
+ {
+ name: "DIVSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ {1, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ clobbers: 32768, // X7
+ outputs: []outputInfo{
+ {0, 32512}, // X0 X1 X2 X3 X4 X5 X6
+ },
+ },
+ },
+ {
+ name: "MOVSSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 239}, // AX CX DX BX BP SI DI
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLcarry",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCL",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLcarry",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBL",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "HMULL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULB",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULBU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MULLQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MODL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ argLen: 2,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ argLen: 2,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "UCOMISS",
+ argLen: 2,
+ asm: x86.AUCOMISS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "UCOMISD",
+ argLen: 2,
+ asm: x86.AUCOMISD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ argLen: 2,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ argLen: 2,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ argLen: 2,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSWAPL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABSWAPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SQRTSD",
+ argLen: 1,
+ asm: x86.ASQRTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ argLen: 1,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ argLen: 1,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ argLen: 1,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETL",
+ argLen: 1,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ argLen: 1,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETG",
+ argLen: 1,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ argLen: 1,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETB",
+ argLen: 1,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ argLen: 1,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETA",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETORD",
+ argLen: 1,
+ asm: x86.ASETPC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNAN",
+ argLen: 1,
+ asm: x86.ASETPS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGF",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGEF",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSX",
+ argLen: 1,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLZX",
+ argLen: 1,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSX",
+ argLen: 1,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLZX",
+ argLen: 1,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SL",
+ argLen: 1,
+ asm: x86.ACVTTSD2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SL",
+ argLen: 1,
+ asm: x86.ACVTTSS2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTSL2SS",
+ argLen: 1,
+ asm: x86.ACVTSL2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSL2SD",
+ argLen: 1,
+ asm: x86.ACVTSL2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSD2SS",
+ argLen: 1,
+ asm: x86.ACVTSD2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSS2SD",
+ argLen: 1,
+ asm: x86.ACVTSS2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "PXOR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL1",
+ auxType: auxSymOff,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL2",
+ auxType: auxSymOff,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL4",
+ auxType: auxSymOff,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL8",
+ auxType: auxSymOff,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx2",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "REPSTOSL",
+ argLen: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 2}, // CX
+ {2, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxSymOff,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4}, // DX
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLdefer",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLgo",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "REPMOVSL",
+ argLen: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ {2, 2}, // CX
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLconvert",
+ argLen: 2,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FCHS",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSconst1",
+ auxType: auxFloat32,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSDconst1",
+ auxType: auxFloat64,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSSconst2",
+ argLen: 1,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst2",
+ argLen: 1,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+
+ {
+ name: "ADDSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -650,8 +3804,8 @@ var opcodeTable = [...]opInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -666,8 +3820,8 @@ var opcodeTable = [...]opInfo{
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
clobbers: 2147483648, // X15
- outputs: []regMask{
- 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
@@ -682,8 +3836,8 @@ var opcodeTable = [...]opInfo{
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
clobbers: 2147483648, // X15
- outputs: []regMask{
- 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
@@ -698,8 +3852,8 @@ var opcodeTable = [...]opInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -714,8 +3868,8 @@ var opcodeTable = [...]opInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -730,8 +3884,8 @@ var opcodeTable = [...]opInfo{
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
clobbers: 2147483648, // X15
- outputs: []regMask{
- 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
@@ -746,8 +3900,8 @@ var opcodeTable = [...]opInfo{
{1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
clobbers: 2147483648, // X15
- outputs: []regMask{
- 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
},
},
},
@@ -760,8 +3914,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -774,8 +3928,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -786,8 +3940,8 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
asm: x86.AMOVSS,
reg: regInfo{
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -798,8 +3952,8 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
asm: x86.AMOVSD,
reg: regInfo{
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -813,8 +3967,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -828,8 +3982,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -843,8 +3997,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -858,8 +4012,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -940,64 +4094,64 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "ADDQ",
- argLen: 2,
- commutative: true,
- asm: x86.AADDQ,
+ name: "ADDQ",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDQ,
reg: regInfo{
inputs: []inputInfo{
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "ADDL",
- argLen: 2,
- commutative: true,
- asm: x86.AADDL,
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
reg: regInfo{
inputs: []inputInfo{
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "ADDQconst",
- auxType: auxInt64,
- argLen: 1,
- asm: x86.AADDQ,
+ name: "ADDQconst",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "ADDLconst",
- auxType: auxInt32,
- argLen: 1,
- asm: x86.AADDL,
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
reg: regInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1005,15 +4159,15 @@ var opcodeTable = [...]opInfo{
name: "SUBQ",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASUBQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1021,15 +4175,15 @@ var opcodeTable = [...]opInfo{
name: "SUBL",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASUBL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1038,14 +4192,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASUBQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1054,14 +4208,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASUBL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1070,15 +4224,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AIMULQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1087,15 +4241,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AIMULL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1104,14 +4258,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AIMULQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1120,134 +4274,142 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AIMULL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "HMULQ",
- argLen: 2,
- asm: x86.AIMULQ,
+ name: "HMULQ",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULQ,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "HMULL",
- argLen: 2,
- asm: x86.AIMULL,
+ name: "HMULL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULL,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "HMULW",
- argLen: 2,
- asm: x86.AIMULW,
+ name: "HMULW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULW,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "HMULB",
- argLen: 2,
- asm: x86.AIMULB,
+ name: "HMULB",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULB,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "HMULQU",
- argLen: 2,
- asm: x86.AMULQ,
+ name: "HMULQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULQ,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "HMULLU",
- argLen: 2,
- asm: x86.AMULL,
+ name: "HMULLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULL,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "HMULWU",
- argLen: 2,
- asm: x86.AMULW,
+ name: "HMULWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULW,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "HMULBU",
- argLen: 2,
- asm: x86.AMULB,
+ name: "HMULBU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULB,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
@@ -1256,194 +4418,110 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
- },
- },
- },
- {
- name: "DIVQ",
- argLen: 2,
- asm: x86.AIDIVQ,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 1}, // AX
- {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
- },
- clobbers: 8589934596, // DX FLAGS
- outputs: []regMask{
- 1, // AX
- },
- },
- },
- {
- name: "DIVL",
- argLen: 2,
- asm: x86.AIDIVL,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 1}, // AX
- {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
- },
- clobbers: 8589934596, // DX FLAGS
- outputs: []regMask{
- 1, // AX
- },
- },
- },
- {
- name: "DIVW",
- argLen: 2,
- asm: x86.AIDIVW,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 1}, // AX
- {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
- },
- clobbers: 8589934596, // DX FLAGS
- outputs: []regMask{
- 1, // AX
- },
- },
- },
- {
- name: "DIVQU",
- argLen: 2,
- asm: x86.ADIVQ,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 1}, // AX
- {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
- },
- clobbers: 8589934596, // DX FLAGS
- outputs: []regMask{
- 1, // AX
- },
- },
- },
- {
- name: "DIVLU",
- argLen: 2,
- asm: x86.ADIVL,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 1}, // AX
- {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
- },
- clobbers: 8589934596, // DX FLAGS
- outputs: []regMask{
- 1, // AX
- },
- },
- },
- {
- name: "DIVWU",
- argLen: 2,
- asm: x86.ADIVW,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 1}, // AX
- {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
- },
- clobbers: 8589934596, // DX FLAGS
- outputs: []regMask{
- 1, // AX
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "MODQ",
- argLen: 2,
- asm: x86.AIDIVQ,
+ name: "DIVQ",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVQ,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
},
},
},
{
- name: "MODL",
- argLen: 2,
- asm: x86.AIDIVL,
+ name: "DIVL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
},
},
},
{
- name: "MODW",
- argLen: 2,
- asm: x86.AIDIVW,
+ name: "DIVW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
},
},
},
{
- name: "MODQU",
- argLen: 2,
- asm: x86.ADIVQ,
+ name: "DIVQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVQ,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
},
},
},
{
- name: "MODLU",
- argLen: 2,
- asm: x86.ADIVL,
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
},
},
},
{
- name: "MODWU",
- argLen: 2,
- asm: x86.ADIVW,
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
reg: regInfo{
inputs: []inputInfo{
{0, 1}, // AX
{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
},
},
},
@@ -1452,15 +4530,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AANDQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1469,15 +4547,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AANDL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1486,14 +4564,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AANDQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1502,14 +4580,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AANDL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1518,15 +4596,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AORQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1535,15 +4613,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AORL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1552,14 +4630,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AORQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1568,14 +4646,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AORL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1584,15 +4662,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AXORQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1601,15 +4679,15 @@ var opcodeTable = [...]opInfo{
argLen: 2,
commutative: true,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AXORL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1618,14 +4696,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AXORQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1634,14 +4712,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AXORL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1654,9 +4732,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1668,9 +4743,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1682,9 +4754,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1696,9 +4765,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1710,9 +4776,6 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1724,9 +4787,6 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1738,9 +4798,6 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1752,9 +4809,6 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1766,9 +4820,6 @@ var opcodeTable = [...]opInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1780,9 +4831,6 @@ var opcodeTable = [...]opInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1794,9 +4842,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1808,9 +4853,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1822,9 +4864,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1836,9 +4875,6 @@ var opcodeTable = [...]opInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1850,9 +4886,6 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1864,9 +4897,6 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1878,9 +4908,6 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
@@ -1892,24 +4919,21 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 8589934592, // FLAGS
- },
},
},
{
name: "SHLQ",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHLQ,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1917,15 +4941,15 @@ var opcodeTable = [...]opInfo{
name: "SHLL",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHLL,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1934,14 +4958,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHLQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1950,14 +4974,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHLL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1965,15 +4989,15 @@ var opcodeTable = [...]opInfo{
name: "SHRQ",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRQ,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1981,15 +5005,15 @@ var opcodeTable = [...]opInfo{
name: "SHRL",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRL,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -1997,15 +5021,15 @@ var opcodeTable = [...]opInfo{
name: "SHRW",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRW,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2013,15 +5037,15 @@ var opcodeTable = [...]opInfo{
name: "SHRB",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRB,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2030,14 +5054,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2046,14 +5070,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2062,14 +5086,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt16,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRW,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2078,14 +5102,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt8,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASHRB,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2093,15 +5117,15 @@ var opcodeTable = [...]opInfo{
name: "SARQ",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARQ,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2109,15 +5133,15 @@ var opcodeTable = [...]opInfo{
name: "SARL",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARL,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2125,15 +5149,15 @@ var opcodeTable = [...]opInfo{
name: "SARW",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARW,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2141,15 +5165,15 @@ var opcodeTable = [...]opInfo{
name: "SARB",
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARB,
reg: regInfo{
inputs: []inputInfo{
{1, 2}, // CX
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2158,14 +5182,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2174,14 +5198,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2190,14 +5214,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt16,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARW,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2206,14 +5230,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt8,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ASARB,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2222,14 +5246,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AROLQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2238,14 +5262,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AROLL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2254,14 +5278,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt16,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AROLW,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2270,14 +5294,14 @@ var opcodeTable = [...]opInfo{
auxType: auxInt8,
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.AROLB,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2285,14 +5309,14 @@ var opcodeTable = [...]opInfo{
name: "NEGQ",
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ANEGQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2300,14 +5324,14 @@ var opcodeTable = [...]opInfo{
name: "NEGL",
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ANEGL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2315,14 +5339,14 @@ var opcodeTable = [...]opInfo{
name: "NOTQ",
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ANOTQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2330,98 +5354,98 @@ var opcodeTable = [...]opInfo{
name: "NOTL",
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ANOTL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "BSFQ",
- argLen: 1,
- asm: x86.ABSFQ,
+ name: "BSFQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "BSFL",
- argLen: 1,
- asm: x86.ABSFL,
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "BSFW",
- argLen: 1,
- asm: x86.ABSFW,
+ name: "BSFW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFW,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "BSRQ",
- argLen: 1,
- asm: x86.ABSRQ,
+ name: "BSRQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "BSRL",
- argLen: 1,
- asm: x86.ABSRL,
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "BSRW",
- argLen: 1,
- asm: x86.ABSRW,
+ name: "BSRW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRW,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2430,15 +5454,15 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ACMOVQEQ,
reg: regInfo{
inputs: []inputInfo{
- {1, 8589934592}, // FLAGS
- {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2447,15 +5471,15 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ACMOVLEQ,
reg: regInfo{
inputs: []inputInfo{
- {1, 8589934592}, // FLAGS
- {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2464,15 +5488,15 @@ var opcodeTable = [...]opInfo{
auxType: auxInt16,
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ACMOVLEQ,
reg: regInfo{
inputs: []inputInfo{
- {1, 8589934592}, // FLAGS
- {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2481,15 +5505,15 @@ var opcodeTable = [...]opInfo{
auxType: auxInt64,
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ACMOVQNE,
reg: regInfo{
inputs: []inputInfo{
- {1, 8589934592}, // FLAGS
- {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2498,15 +5522,15 @@ var opcodeTable = [...]opInfo{
auxType: auxInt32,
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ACMOVLNE,
reg: regInfo{
inputs: []inputInfo{
- {1, 8589934592}, // FLAGS
- {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2515,15 +5539,15 @@ var opcodeTable = [...]opInfo{
auxType: auxInt16,
argLen: 2,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ACMOVLNE,
reg: regInfo{
inputs: []inputInfo{
- {1, 8589934592}, // FLAGS
- {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2531,14 +5555,14 @@ var opcodeTable = [...]opInfo{
name: "BSWAPQ",
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ABSWAPQ,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2546,14 +5570,14 @@ var opcodeTable = [...]opInfo{
name: "BSWAPL",
argLen: 1,
resultInArg0: true,
+ clobberFlags: true,
asm: x86.ABSWAPL,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2565,8 +5589,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -2575,11 +5599,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASBBQ,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2588,11 +5609,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASBBL,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2601,11 +5619,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETEQ,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2614,11 +5629,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETNE,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2627,11 +5639,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETLT,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2640,11 +5649,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETLE,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2653,11 +5659,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETGT,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2666,11 +5669,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETGE,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2679,11 +5679,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETCS,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2692,11 +5689,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETLS,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2705,11 +5699,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETHI,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2718,39 +5709,32 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETCC,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "SETEQF",
- argLen: 1,
- asm: x86.ASETEQ,
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETEQ,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "SETNEF",
- argLen: 1,
- asm: x86.ASETNE,
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETNE,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- clobbers: 8589934593, // AX FLAGS
- outputs: []regMask{
- 65518, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2759,11 +5743,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETPC,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2772,11 +5753,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETPS,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2785,11 +5763,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETHI,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2798,11 +5773,8 @@ var opcodeTable = [...]opInfo{
argLen: 1,
asm: x86.ASETCC,
reg: regInfo{
- inputs: []inputInfo{
- {0, 8589934592}, // FLAGS
- },
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2812,10 +5784,10 @@ var opcodeTable = [...]opInfo{
asm: x86.AMOVBQSX,
reg: regInfo{
inputs: []inputInfo{
- {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2825,10 +5797,10 @@ var opcodeTable = [...]opInfo{
asm: x86.AMOVBQZX,
reg: regInfo{
inputs: []inputInfo{
- {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2838,10 +5810,10 @@ var opcodeTable = [...]opInfo{
asm: x86.AMOVWQSX,
reg: regInfo{
inputs: []inputInfo{
- {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2851,10 +5823,10 @@ var opcodeTable = [...]opInfo{
asm: x86.AMOVWQZX,
reg: regInfo{
inputs: []inputInfo{
- {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2864,10 +5836,10 @@ var opcodeTable = [...]opInfo{
asm: x86.AMOVLQSX,
reg: regInfo{
inputs: []inputInfo{
- {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2877,10 +5849,10 @@ var opcodeTable = [...]opInfo{
asm: x86.AMOVLQZX,
reg: regInfo{
inputs: []inputInfo{
- {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2891,8 +5863,8 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
asm: x86.AMOVL,
reg: regInfo{
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2903,8 +5875,8 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
asm: x86.AMOVQ,
reg: regInfo{
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2916,8 +5888,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2929,8 +5901,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2942,8 +5914,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2955,8 +5927,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -2968,8 +5940,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -2981,8 +5953,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -2994,8 +5966,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -3007,8 +5979,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -3020,8 +5992,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -3033,8 +6005,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -3049,8 +6021,8 @@ var opcodeTable = [...]opInfo{
{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -3059,12 +6031,13 @@ var opcodeTable = [...]opInfo{
auxType: auxSymOff,
argLen: 1,
rematerializeable: true,
+ asm: x86.ALEAQ,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3077,8 +6050,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3091,8 +6064,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3105,8 +6078,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3119,8 +6092,23 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ asm: x86.ALEAL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3133,8 +6121,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3147,8 +6135,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3161,8 +6149,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3175,8 +6163,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3189,8 +6177,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3203,8 +6191,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3217,8 +6205,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3279,8 +6267,8 @@ var opcodeTable = [...]opInfo{
inputs: []inputInfo{
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -3306,8 +6294,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3321,8 +6309,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3336,8 +6324,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3351,8 +6339,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3366,8 +6354,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3381,8 +6369,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3396,8 +6384,8 @@ var opcodeTable = [...]opInfo{
{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3621,15 +6609,16 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "DUFFZERO",
- auxType: auxInt64,
- argLen: 3,
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 128}, // DI
{1, 65536}, // X0
},
- clobbers: 8589934720, // DI FLAGS
+ clobbers: 128, // DI
},
},
{
@@ -3638,8 +6627,8 @@ var opcodeTable = [...]opInfo{
argLen: 0,
rematerializeable: true,
reg: regInfo{
- outputs: []regMask{
- 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
},
@@ -3656,62 +6645,68 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "CALLstatic",
- auxType: auxSymOff,
- argLen: 1,
+ name: "CALLstatic",
+ auxType: auxSymOff,
+ argLen: 1,
+ clobberFlags: true,
reg: regInfo{
- clobbers: 12884901871, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 FLAGS
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
{
- name: "CALLclosure",
- auxType: auxInt64,
- argLen: 3,
+ name: "CALLclosure",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{1, 4}, // DX
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 12884901871, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 FLAGS
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
{
- name: "CALLdefer",
- auxType: auxInt64,
- argLen: 1,
+ name: "CALLdefer",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
reg: regInfo{
- clobbers: 12884901871, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 FLAGS
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
{
- name: "CALLgo",
- auxType: auxInt64,
- argLen: 1,
+ name: "CALLgo",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
reg: regInfo{
- clobbers: 12884901871, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 FLAGS
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
{
- name: "CALLinter",
- auxType: auxInt64,
- argLen: 2,
+ name: "CALLinter",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 12884901871, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 FLAGS
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
},
},
{
- name: "DUFFCOPY",
- auxType: auxInt64,
- argLen: 3,
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 128}, // DI
{1, 64}, // SI
},
- clobbers: 8590000320, // SI DI X0 FLAGS
+ clobbers: 65728, // SI DI X0
},
},
{
@@ -3735,8 +6730,8 @@ var opcodeTable = [...]opInfo{
name: "LoweredGetG",
argLen: 1,
reg: regInfo{
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3744,19 +6739,19 @@ var opcodeTable = [...]opInfo{
name: "LoweredGetClosurePtr",
argLen: 0,
reg: regInfo{
- outputs: []regMask{
- 4, // DX
+ outputs: []outputInfo{
+ {0, 4}, // DX
},
},
},
{
- name: "LoweredNilCheck",
- argLen: 2,
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
reg: regInfo{
inputs: []inputInfo{
{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- clobbers: 8589934592, // FLAGS
},
},
{
@@ -3765,10 +6760,23 @@ var opcodeTable = [...]opInfo{
asm: x86.AMOVQ,
reg: regInfo{
inputs: []inputInfo{
- {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLconvert",
+ argLen: 2,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
- outputs: []regMask{
- 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
@@ -3805,25 +6813,2220 @@ var opcodeTable = [...]opInfo{
asm: arm.AADD,
reg: regInfo{
inputs: []inputInfo{
- {0, 31}, // R0 R1 R2 R3 SP
- {1, 31}, // R0 R1 R2 R3 SP
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
},
- outputs: []regMask{
- 31, // R0 R1 R2 R3 SP
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
name: "ADDconst",
- auxType: auxSymOff,
+ auxType: auxInt32,
argLen: 1,
asm: arm.AADD,
reg: regInfo{
inputs: []inputInfo{
- {0, 31}, // R0 R1 R2 R3 SP
+ {0, 14335}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSB",
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "HMUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "HMULU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ clobberFlags: true,
+ asm: arm.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: arm.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOD",
+ argLen: 2,
+ clobberFlags: true,
+ asm: arm.AMOD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MODU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: arm.AMODU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADC",
+ argLen: 3,
+ commutative: true,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBS",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBC",
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MULA",
+ argLen: 3,
+ asm: arm.AMULA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: arm.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: arm.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: arm.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: arm.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: arm.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: arm.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: arm.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SRRconst",
+ auxType: auxInt32,
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftLLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ANDshiftRAreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftLLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ORshiftRAreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftLLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "XORshiftRAreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftLLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "BICshiftRAreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftLLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
},
- outputs: []regMask{
- 31, // R0 R1 R2 R3 SP
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MVNshiftRAreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftLLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADCshiftRAreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftLLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SBCshiftRAreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftLLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSCshiftRAreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 0},
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "TST",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "TSTconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "TEQ",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "TEQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "CMPF",
+ argLen: 2,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD",
+ argLen: 2,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "CMPshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "CMPshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "CMPshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "CMPF0",
+ argLen: 1,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD0",
+ argLen: 1,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
@@ -3834,22 +9037,103 @@ var opcodeTable = [...]opInfo{
rematerializeable: true,
asm: arm.AMOVW,
reg: regInfo{
- outputs: []regMask{
- 31, // R0 R1 R2 R3 SP
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
- name: "CMP",
- argLen: 2,
- asm: arm.ACMP,
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294975488}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm.AMOVHU,
reg: regInfo{
inputs: []inputInfo{
- {0, 31}, // R0 R1 R2 R3 SP
- {1, 31}, // R0 R1 R2 R3 SP
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
},
- outputs: []regMask{
- 32, // FLAGS
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
@@ -3860,10 +9144,62 @@ var opcodeTable = [...]opInfo{
asm: arm.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 31}, // R0 R1 R2 R3 SP
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
},
- outputs: []regMask{
- 31, // R0 R1 R2 R3 SP
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
},
},
},
@@ -3874,31 +9210,4092 @@ var opcodeTable = [...]opInfo{
asm: arm.AMOVW,
reg: regInfo{
inputs: []inputInfo{
- {0, 31}, // R0 R1 R2 R3 SP
- {1, 31}, // R0 R1 R2 R3 SP
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
- name: "CALLstatic",
+ name: "MOVDstore",
auxType: auxSymOff,
+ argLen: 3,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftLL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRA",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {2, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ {0, 4294981631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm.AMOVBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm.AMOVHS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVFW",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVDW",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVFWU",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVDWU",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: arm.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: arm.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMOVWHSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "CMOVWLSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "SRAcond",
+ argLen: 3,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxSymOff,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 4294907903, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 128}, // R7
+ {0, 13311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP
+ },
+ clobbers: 4294907903, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLdefer",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 4294907903, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLgo",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 4294907903, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 4294907903, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 1}, // R0
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ },
+ clobbers: 7, // R0 R1 R2
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 128}, // R7
+ },
+ },
+ },
+ {
+ name: "MOVWconvert",
+ argLen: 2,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12
+ },
+ outputs: []outputInfo{
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt64,
argLen: 1,
+ asm: arm64.AADD,
reg: regInfo{
- clobbers: 15, // R0 R1 R2 R3
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "UMULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "UMULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: arm64.ASDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "UDIV",
+ argLen: 2,
+ asm: arm64.AUDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: arm64.ASDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "UDIVW",
+ argLen: 2,
+ asm: arm64.AUDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOD",
+ argLen: 2,
+ asm: arm64.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "UMOD",
+ argLen: 2,
+ asm: arm64.AUREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ asm: arm64.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "UMODW",
+ argLen: 2,
+ asm: arm64.AUREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: arm64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: arm64.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: arm64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: arm64.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "BICconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: arm64.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: arm64.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: arm64.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "RORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "RORWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMNW",
+ argLen: 2,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMNWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FCMPD",
+ argLen: 2,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018964258816}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019232432127}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "SCVTFWS",
+ argLen: 1,
+ asm: arm64.ASCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "SCVTFWD",
+ argLen: 1,
+ asm: arm64.ASCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFWS",
+ argLen: 1,
+ asm: arm64.AUCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFWD",
+ argLen: 1,
+ asm: arm64.AUCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "SCVTFS",
+ argLen: 1,
+ asm: arm64.ASCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "SCVTFD",
+ argLen: 1,
+ asm: arm64.ASCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFS",
+ argLen: 1,
+ asm: arm64.AUCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFD",
+ argLen: 1,
+ asm: arm64.AUCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FCVTZSSW",
+ argLen: 1,
+ asm: arm64.AFCVTZSSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTZSDW",
+ argLen: 1,
+ asm: arm64.AFCVTZSDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTZUSW",
+ argLen: 1,
+ asm: arm64.AFCVTZUSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTZUDW",
+ argLen: 1,
+ asm: arm64.AFCVTZUDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTZSS",
+ argLen: 1,
+ asm: arm64.AFCVTZSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTZSD",
+ argLen: 1,
+ asm: arm64.AFCVTZSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTZUS",
+ argLen: 1,
+ asm: arm64.AFCVTZUS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTZUD",
+ argLen: 1,
+ asm: arm64.AFCVTZUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: arm64.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: arm64.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "CSELULT",
+ argLen: 3,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ {1, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "CSELULT0",
+ argLen: 2,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxSymOff,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 288230375346143231, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 67108864}, // R26
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 SP
+ },
+ clobbers: 288230375346143231, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ {
+ name: "CALLdefer",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 288230375346143231, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ {
+ name: "CALLgo",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 288230375346143231, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ clobbers: 288230375346143231, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ clobbers: 65536, // R16
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65536}, // R16
+ {1, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ clobbers: 65536, // R16
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 131072}, // R17
+ {1, 65536}, // R16
+ {2, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ clobbers: 196608, // R16 R17
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 67108864}, // R26
+ },
+ },
+ },
+ {
+ name: "MOVDconvert",
+ argLen: 2,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268173311}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g
+ },
+ outputs: []outputInfo{
+ {0, 133955583}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxSymOff,
+ argLen: 1,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: ppc64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FSUB",
+ argLen: 2,
+ asm: ppc64.AFSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: ppc64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MULLD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHDU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHWU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMUL",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "SRAD",
+ argLen: 2,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRD",
+ argLen: 2,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRW",
+ argLen: 2,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLD",
+ argLen: 2,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLW",
+ argLen: 2,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDconstForCarry",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: ppc64.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 1073741824, // R31
+ },
+ },
+ {
+ name: "MaskIfNotCarry",
+ argLen: 1,
+ asm: ppc64.AADDME,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRADconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FDIV",
+ argLen: 2,
+ asm: ppc64.AFDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: ppc64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: ppc64.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: ppc64.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVDU",
+ argLen: 2,
+ asm: ppc64.ADIVDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ asm: ppc64.ADIVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FCTIDZ",
+ argLen: 1,
+ asm: ppc64.AFCTIDZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCTIWZ",
+ argLen: 1,
+ asm: ppc64.AFCTIWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCFID",
+ argLen: 1,
+ asm: ppc64.AFCFID,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FRSP",
+ argLen: 1,
+ asm: ppc64.AFRSP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "Xf2i64",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "Xi2f64",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDN",
+ argLen: 2,
+ asm: ppc64.AANDN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ORN",
+ argLen: 2,
+ asm: ppc64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "EQV",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AEQV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: ppc64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FNEG",
+ argLen: 1,
+ asm: ppc64.AFNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSQRT",
+ argLen: 1,
+ asm: ppc64.AFSQRT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: ppc64.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZreg",
+ argLen: 1,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZreg",
+ argLen: 1,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZreg",
+ argLen: 1,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ argLen: 2,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ argLen: 2,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 3}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCMPU",
+ argLen: 2,
+ asm: ppc64.AFCMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 288230371856744448}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPU",
+ argLen: 2,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWU",
+ argLen: 2,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
},
},
{
name: "LessThan",
argLen: 1,
reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1024}, // R11
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 1073741824, // R31
+ },
+ },
+ {
+ name: "MOVDconvert",
+ argLen: 2,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866815}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxSymOff,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 288230372393611260, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1024}, // R11
+ {0, 536866813}, // SP R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 288230372393611260, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLdefer",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 288230372393611260, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLgo",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 288230372393611260, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 288230372393611260, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ reg: regInfo{
inputs: []inputInfo{
- {0, 32}, // FLAGS
+ {0, 4}, // R3
+ {1, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
- outputs: []regMask{
- 31, // R0 R1 R2 R3 SP
+ clobbers: 4, // R3
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R3
+ {1, 8}, // R4
+ {2, 536866812}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
},
+ clobbers: 12, // R3 R4
},
},
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
{
name: "Add8",
@@ -5390,11 +14787,115 @@ var opcodeTable = [...]opInfo{
argLen: 2,
generic: true,
},
+ {
+ name: "Int64Make",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Int64Hi",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Int64Lo",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Add32carry",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add32withcarry",
+ argLen: 3,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub32carry",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32withcarry",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Mul32uhilo",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Signmask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Zeromask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Select0",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Select1",
+ argLen: 1,
+ generic: true,
+ },
}
func (o Op) Asm() obj.As { return opcodeTable[o].asm }
func (o Op) String() string { return opcodeTable[o].name }
+var registers386 = [...]Register{
+ {0, "AX"},
+ {1, "CX"},
+ {2, "DX"},
+ {3, "BX"},
+ {4, "SP"},
+ {5, "BP"},
+ {6, "SI"},
+ {7, "DI"},
+ {8, "X0"},
+ {9, "X1"},
+ {10, "X2"},
+ {11, "X3"},
+ {12, "X4"},
+ {13, "X5"},
+ {14, "X6"},
+ {15, "X7"},
+ {16, "SB"},
+}
+var gpRegMask386 = regMask(239)
+var fpRegMask386 = regMask(65280)
+var framepointerReg386 = int8(5)
var registersAMD64 = [...]Register{
{0, "AX"},
{1, "CX"},
@@ -5429,14 +14930,176 @@ var registersAMD64 = [...]Register{
{30, "X14"},
{31, "X15"},
{32, "SB"},
- {33, "FLAGS"},
}
+var gpRegMaskAMD64 = regMask(65519)
+var fpRegMaskAMD64 = regMask(4294901760)
+var framepointerRegAMD64 = int8(5)
var registersARM = [...]Register{
{0, "R0"},
{1, "R1"},
{2, "R2"},
{3, "R3"},
- {4, "SP"},
- {5, "FLAGS"},
- {6, "SB"},
+ {4, "R4"},
+ {5, "R5"},
+ {6, "R6"},
+ {7, "R7"},
+ {8, "R8"},
+ {9, "R9"},
+ {10, "g"},
+ {11, "R11"},
+ {12, "R12"},
+ {13, "SP"},
+ {14, "R14"},
+ {15, "R15"},
+ {16, "F0"},
+ {17, "F1"},
+ {18, "F2"},
+ {19, "F3"},
+ {20, "F4"},
+ {21, "F5"},
+ {22, "F6"},
+ {23, "F7"},
+ {24, "F8"},
+ {25, "F9"},
+ {26, "F10"},
+ {27, "F11"},
+ {28, "F12"},
+ {29, "F13"},
+ {30, "F14"},
+ {31, "F15"},
+ {32, "SB"},
+}
+var gpRegMaskARM = regMask(5119)
+var fpRegMaskARM = regMask(4294901760)
+var framepointerRegARM = int8(-1)
+var registersARM64 = [...]Register{
+ {0, "R0"},
+ {1, "R1"},
+ {2, "R2"},
+ {3, "R3"},
+ {4, "R4"},
+ {5, "R5"},
+ {6, "R6"},
+ {7, "R7"},
+ {8, "R8"},
+ {9, "R9"},
+ {10, "R10"},
+ {11, "R11"},
+ {12, "R12"},
+ {13, "R13"},
+ {14, "R14"},
+ {15, "R15"},
+ {16, "R16"},
+ {17, "R17"},
+ {18, "R18"},
+ {19, "R19"},
+ {20, "R20"},
+ {21, "R21"},
+ {22, "R22"},
+ {23, "R23"},
+ {24, "R24"},
+ {25, "R25"},
+ {26, "R26"},
+ {27, "g"},
+ {28, "R29"},
+ {29, "SP"},
+ {30, "F0"},
+ {31, "F1"},
+ {32, "F2"},
+ {33, "F3"},
+ {34, "F4"},
+ {35, "F5"},
+ {36, "F6"},
+ {37, "F7"},
+ {38, "F8"},
+ {39, "F9"},
+ {40, "F10"},
+ {41, "F11"},
+ {42, "F12"},
+ {43, "F13"},
+ {44, "F14"},
+ {45, "F15"},
+ {46, "F16"},
+ {47, "F17"},
+ {48, "F18"},
+ {49, "F19"},
+ {50, "F20"},
+ {51, "F21"},
+ {52, "F22"},
+ {53, "F23"},
+ {54, "F24"},
+ {55, "F25"},
+ {56, "F26"},
+ {57, "F27"},
+ {58, "F28"},
+ {59, "F29"},
+ {60, "F30"},
+ {61, "F31"},
+ {62, "SB"},
+}
+var gpRegMaskARM64 = regMask(133955583)
+var fpRegMaskARM64 = regMask(288230375077969920)
+var framepointerRegARM64 = int8(-1)
+var registersPPC64 = [...]Register{
+ {0, "SP"},
+ {1, "SB"},
+ {2, "R3"},
+ {3, "R4"},
+ {4, "R5"},
+ {5, "R6"},
+ {6, "R7"},
+ {7, "R8"},
+ {8, "R9"},
+ {9, "R10"},
+ {10, "R11"},
+ {11, "R12"},
+ {12, "R13"},
+ {13, "R14"},
+ {14, "R15"},
+ {15, "R16"},
+ {16, "R17"},
+ {17, "R18"},
+ {18, "R19"},
+ {19, "R20"},
+ {20, "R21"},
+ {21, "R22"},
+ {22, "R23"},
+ {23, "R24"},
+ {24, "R25"},
+ {25, "R26"},
+ {26, "R27"},
+ {27, "R28"},
+ {28, "R29"},
+ {29, "g"},
+ {30, "R31"},
+ {31, "F0"},
+ {32, "F1"},
+ {33, "F2"},
+ {34, "F3"},
+ {35, "F4"},
+ {36, "F5"},
+ {37, "F6"},
+ {38, "F7"},
+ {39, "F8"},
+ {40, "F9"},
+ {41, "F10"},
+ {42, "F11"},
+ {43, "F12"},
+ {44, "F13"},
+ {45, "F14"},
+ {46, "F15"},
+ {47, "F16"},
+ {48, "F17"},
+ {49, "F18"},
+ {50, "F19"},
+ {51, "F20"},
+ {52, "F21"},
+ {53, "F22"},
+ {54, "F23"},
+ {55, "F24"},
+ {56, "F25"},
+ {57, "F26"},
}
+var gpRegMaskPPC64 = regMask(536866812)
+var fpRegMaskPPC64 = regMask(288230371856744448)
+var framepointerRegPPC64 = int8(0)
diff --git a/src/cmd/compile/internal/ssa/opt.go b/src/cmd/compile/internal/ssa/opt.go
index caf8cca1e6..f211488cd7 100644
--- a/src/cmd/compile/internal/ssa/opt.go
+++ b/src/cmd/compile/internal/ssa/opt.go
@@ -11,4 +11,7 @@ func opt(f *Func) {
func dec(f *Func) {
applyRewrite(f, rewriteBlockdec, rewriteValuedec)
+ if f.Config.IntSize == 4 && f.Config.arch != "amd64p32" {
+ applyRewrite(f, rewriteBlockdec64, rewriteValuedec64)
+ }
}
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 1eecd49c40..e2c7fe1067 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -206,6 +206,7 @@ type regAllocState struct {
numRegs register
SPReg register
SBReg register
+ GReg register
allocatable regMask
// for each block, its primary predecessor.
@@ -332,14 +333,14 @@ func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
s.f.setHome(c, &s.registers[r])
}
-// allocReg chooses a register for v from the set of registers in mask.
+// allocReg chooses a register from the set of registers in mask.
// If there is no unused register, a Value will be kicked out of
// a register to make room.
-func (s *regAllocState) allocReg(v *Value, mask regMask) register {
+func (s *regAllocState) allocReg(mask regMask, v *Value) register {
mask &= s.allocatable
mask &^= s.nospill
if mask == 0 {
- s.f.Fatalf("no register available")
+ s.f.Fatalf("no register available for %s", v)
}
// Pick an unused register if one is available.
@@ -400,7 +401,7 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, line
}
// Allocate a register.
- r := s.allocReg(v, mask)
+ r := s.allocReg(mask, v)
// Allocate v to the new register.
var c *Value
@@ -438,28 +439,76 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, line
func (s *regAllocState) init(f *Func) {
s.f = f
s.registers = f.Config.registers
- s.numRegs = register(len(s.registers))
- if s.numRegs > noRegister || s.numRegs > register(unsafe.Sizeof(regMask(0))*8) {
- panic("too many registers")
+ if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
+ s.f.Fatalf("bad number of registers: %d", nr)
+ } else {
+ s.numRegs = register(nr)
}
+ // Locate SP, SB, and g registers.
+ s.SPReg = noRegister
+ s.SBReg = noRegister
+ s.GReg = noRegister
for r := register(0); r < s.numRegs; r++ {
- if s.registers[r].Name() == "SP" {
+ switch s.registers[r].Name() {
+ case "SP":
s.SPReg = r
- }
- if s.registers[r].Name() == "SB" {
+ case "SB":
s.SBReg = r
+ case "g":
+ s.GReg = r
+ }
+ }
+ // Make sure we found all required registers.
+ switch noRegister {
+ case s.SPReg:
+ s.f.Fatalf("no SP register found")
+ case s.SBReg:
+ s.f.Fatalf("no SB register found")
+ case s.GReg:
+ if f.Config.hasGReg {
+ s.f.Fatalf("no g register found")
}
}
// Figure out which registers we're allowed to use.
- s.allocatable = regMask(1)<<s.numRegs - 1
+ s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask
s.allocatable &^= 1 << s.SPReg
s.allocatable &^= 1 << s.SBReg
- if s.f.Config.ctxt.Framepointer_enabled {
- s.allocatable &^= 1 << 5 // BP
+ if s.f.Config.hasGReg {
+ s.allocatable &^= 1 << s.GReg
+ }
+ if s.f.Config.ctxt.Framepointer_enabled && s.f.Config.FPReg >= 0 {
+ s.allocatable &^= 1 << uint(s.f.Config.FPReg)
}
if s.f.Config.ctxt.Flag_dynlink {
- s.allocatable &^= 1 << 15 // R15
+ switch s.f.Config.arch {
+ case "amd64":
+ s.allocatable &^= 1 << 15 // R15
+ case "arm":
+ s.allocatable &^= 1 << 9 // R9
+ case "arm64":
+ // nothing to do?
+ case "386":
+ // nothing to do.
+ // Note that for Flag_shared (position independent code)
+ // we do need to be careful, but that carefulness is hidden
+ // in the rewrite rules so we always have a free register
+ // available for global load/stores. See gen/386.rules (search for Flag_shared).
+ default:
+ s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch)
+ }
+ }
+ if s.f.Config.nacl {
+ switch s.f.Config.arch {
+ case "arm":
+ s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
+ case "amd64p32":
+ s.allocatable &^= 1 << 5 // BP - reserved for nacl
+ s.allocatable &^= 1 << 15 // R15 - reserved for nacl
+ }
+ }
+ if s.f.Config.use387 {
+ s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
}
s.regs = make([]regState, s.numRegs)
@@ -467,11 +516,13 @@ func (s *regAllocState) init(f *Func) {
s.orig = make([]*Value, f.NumValues())
for _, b := range f.Blocks {
for _, v := range b.Values {
- if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() {
+ if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
s.values[v.ID].needReg = true
s.values[v.ID].rematerializeable = v.rematerializeable()
s.orig[v.ID] = v
}
+ // Note: needReg is false for values returning Tuple types.
+ // Instead, we mark the corresponding Selects as needReg.
}
}
s.computeLive()
@@ -564,9 +615,9 @@ func (s *regAllocState) setState(regs []endReg) {
func (s *regAllocState) compatRegs(t Type) regMask {
var m regMask
if t.IsFloat() || t == TypeInt128 {
- m = 0xffff << 16 // X0-X15
+ m = s.f.Config.fpRegMask
} else {
- m = 0xffff << 0 // AX-R15
+ m = s.f.Config.gpRegMask
}
return m & s.allocatable
}
@@ -786,6 +837,9 @@ func (s *regAllocState) regalloc(f *Func) {
if phiRegs[i] != noRegister {
continue
}
+ if s.f.Config.use387 && v.Type.IsFloat() {
+ continue // 387 can't handle floats in registers between blocks
+ }
m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
if m != 0 {
r := pickReg(m)
@@ -915,6 +969,7 @@ func (s *regAllocState) regalloc(f *Func) {
if s.f.pass.debug > regDebug {
fmt.Printf(" processing %s\n", v.LongString())
}
+ regspec := opcodeTable[v.Op].reg
if v.Op == OpPhi {
f.Fatalf("phi %s not at start of block", v)
}
@@ -930,6 +985,28 @@ func (s *regAllocState) regalloc(f *Func) {
s.advanceUses(v)
continue
}
+ if v.Op == OpSelect0 || v.Op == OpSelect1 {
+ if s.values[v.ID].needReg {
+ var i = 0
+ if v.Op == OpSelect1 {
+ i = 1
+ }
+ s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).Num), v, v)
+ }
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ goto issueSpill
+ }
+ if v.Op == OpGetG && s.f.Config.hasGReg {
+ // use hardware g register
+ if s.regs[s.GReg].v != nil {
+ s.freeReg(s.GReg) // kick out the old value
+ }
+ s.assignReg(s.GReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ goto issueSpill
+ }
if v.Op == OpArg {
// Args are "pre-spilled" values. We don't allocate
// any register here. We just set up the spill pointer to
@@ -957,7 +1034,6 @@ func (s *regAllocState) regalloc(f *Func) {
b.Values = append(b.Values, v)
continue
}
- regspec := opcodeTable[v.Op].reg
if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
// No register allocation required (or none specified yet)
s.freeRegs(regspec.clobbers)
@@ -1002,10 +1078,6 @@ func (s *regAllocState) regalloc(f *Func) {
args = append(args[:0], v.Args...)
for _, i := range regspec.inputs {
mask := i.regs
- if mask == flagRegMask {
- // TODO: remove flag input from regspec.inputs.
- continue
- }
if mask&s.values[args[i.idx].ID].regs == 0 {
// Need a new register for the input.
mask &= s.allocatable
@@ -1115,49 +1187,73 @@ func (s *regAllocState) regalloc(f *Func) {
// Dump any registers which will be clobbered
s.freeRegs(regspec.clobbers)
- // Pick register for output.
- if s.values[v.ID].needReg {
- mask := regspec.outputs[0] & s.allocatable
- if opcodeTable[v.Op].resultInArg0 {
- if !opcodeTable[v.Op].commutative {
- // Output must use the same register as input 0.
- r := register(s.f.getHome(args[0].ID).(*Register).Num)
- mask = regMask(1) << r
- } else {
- // Output must use the same register as input 0 or 1.
- r0 := register(s.f.getHome(args[0].ID).(*Register).Num)
- r1 := register(s.f.getHome(args[1].ID).(*Register).Num)
- // Check r0 and r1 for desired output register.
- found := false
- for _, r := range dinfo[idx].out {
- if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
- mask = regMask(1) << r
- found = true
- if r == r1 {
- args[0], args[1] = args[1], args[0]
+ // Pick registers for outputs.
+ {
+ outRegs := [2]register{noRegister, noRegister}
+ var used regMask
+ for _, out := range regspec.outputs {
+ mask := out.regs & s.allocatable &^ used
+ if mask == 0 {
+ continue
+ }
+ if opcodeTable[v.Op].resultInArg0 && out.idx == len(regspec.outputs)-1 {
+ if !opcodeTable[v.Op].commutative {
+ // Output must use the same register as input 0.
+ r := register(s.f.getHome(args[0].ID).(*Register).Num)
+ mask = regMask(1) << r
+ } else {
+ // Output must use the same register as input 0 or 1.
+ r0 := register(s.f.getHome(args[0].ID).(*Register).Num)
+ r1 := register(s.f.getHome(args[1].ID).(*Register).Num)
+ // Check r0 and r1 for desired output register.
+ found := false
+ for _, r := range dinfo[idx].out {
+ if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
+ mask = regMask(1) << r
+ found = true
+ if r == r1 {
+ args[0], args[1] = args[1], args[0]
+ }
+ break
}
- break
+ }
+ if !found {
+ // Neither are desired, pick r0.
+ mask = regMask(1) << r0
}
}
- if !found {
- // Neither are desired, pick r0.
- mask = regMask(1) << r0
+ }
+ for _, r := range dinfo[idx].out {
+ if r != noRegister && (mask&^s.used)>>r&1 != 0 {
+ // Desired register is allowed and unused.
+ mask = regMask(1) << r
+ break
}
}
- }
- for _, r := range dinfo[idx].out {
- if r != noRegister && (mask&^s.used)>>r&1 != 0 {
- // Desired register is allowed and unused.
- mask = regMask(1) << r
- break
+ // Avoid registers we're saving for other values.
+ if mask&^desired.avoid != 0 {
+ mask &^= desired.avoid
+ }
+ r := s.allocReg(mask, v)
+ outRegs[out.idx] = r
+ used |= regMask(1) << r
+ }
+ // Record register choices
+ if v.Type.IsTuple() {
+ var outLocs LocPair
+ if r := outRegs[0]; r != noRegister {
+ outLocs[0] = &s.registers[r]
+ }
+ if r := outRegs[1]; r != noRegister {
+ outLocs[1] = &s.registers[r]
+ }
+ s.f.setHome(v, outLocs)
+ // Note that subsequent SelectX instructions will do the assignReg calls.
+ } else {
+ if r := outRegs[0]; r != noRegister {
+ s.assignReg(r, v, v)
}
}
- // Avoid registers we're saving for other values.
- if mask&^desired.avoid != 0 {
- mask &^= desired.avoid
- }
- r := s.allocReg(v, mask)
- s.assignReg(r, v, v)
}
// Issue the Value itself.
@@ -1176,6 +1272,7 @@ func (s *regAllocState) regalloc(f *Func) {
// f()
// }
// It would be good to have both spill and restore inside the IF.
+ issueSpill:
if s.values[v.ID].needReg {
spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v)
s.setOrig(spill, v)
@@ -1194,9 +1291,10 @@ func (s *regAllocState) regalloc(f *Func) {
if s.f.pass.debug > regDebug {
fmt.Printf(" processing control %s\n", v.LongString())
}
- // TODO: regspec for block control values, instead of using
- // register set from the control op's output.
- s.allocValToReg(v, opcodeTable[v.Op].reg.outputs[0], false, b.Line)
+ // We assume that a control input can be passed in any
+ // type-compatible register. If this turns out not to be true,
+ // we'll need to introduce a regspec for a block's control value.
+ s.allocValToReg(v, s.compatRegs(v.Type), false, b.Line)
// Remove this use from the uses list.
vi := &s.values[v.ID]
u := vi.uses
@@ -1208,6 +1306,11 @@ func (s *regAllocState) regalloc(f *Func) {
s.freeUseRecords = u
}
+ // Spill any values that can't live across basic block boundaries.
+ if s.f.Config.use387 {
+ s.freeRegs(s.f.Config.fpRegMask)
+ }
+
// If we are approaching a merge point and we are the primary
// predecessor of it, find live values that we use soon after
// the merge point and promote them to registers now.
@@ -1231,6 +1334,9 @@ func (s *regAllocState) regalloc(f *Func) {
continue
}
v := s.orig[vid]
+ if s.f.Config.use387 && v.Type.IsFloat() {
+ continue // 387 can't handle floats in registers between blocks
+ }
m := s.compatRegs(v.Type) &^ s.used
if m&^desired.avoid != 0 {
m &^= desired.avoid
@@ -1769,6 +1875,9 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
(*splice).Uses--
*splice = occupant.c
occupant.c.Uses++
+ if occupant.c.Op == OpStoreReg {
+ e.s.lateSpillUse(vid)
+ }
}
// Note: if splice==nil then c will appear dead. This is
// non-SSA formed code, so be careful after this pass not to run
@@ -2010,6 +2119,8 @@ func (e *edgeState) findRegFor(typ Type) Location {
return nil
}
+// rematerializeable reports whether the register allocator should recompute
+// a value instead of spilling/restoring it.
func (v *Value) rematerializeable() bool {
if !opcodeTable[v.Op].rematerializeable {
return false
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 61d4234c65..1c4815dc81 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -205,6 +205,11 @@ func is32Bit(n int64) bool {
return n == int64(int32(n))
}
+// is16Bit reports whether n can be represented as a signed 16 bit integer.
+func is16Bit(n int64) bool {
+ return n == int64(int16(n))
+}
+
// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
func b2i(b bool) int64 {
if b {
@@ -254,6 +259,19 @@ func isSamePtr(p1, p2 *Value) bool {
return false
}
+// moveSize returns the number of bytes an aligned MOV instruction moves
+func moveSize(align int64, c *Config) int64 {
+ switch {
+ case align%8 == 0 && c.IntSize == 8:
+ return 8
+ case align%4 == 0:
+ return 4
+ case align%2 == 0:
+ return 2
+ }
+ return 1
+}
+
// mergePoint finds a block among a's blocks which dominates b and is itself
// dominated by all of a's blocks. Returns nil if it can't find one.
// Might return nil even if one does exist.
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
new file mode 100644
index 0000000000..eda37e787e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -0,0 +1,14822 @@
+// autogenerated from gen/386.rules: do not edit!
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+
+var _ = math.MinInt8 // in case not otherwise used
+func rewriteValue386(v *Value, config *Config) bool {
+ switch v.Op {
+ case Op386ADCL:
+ return rewriteValue386_Op386ADCL(v, config)
+ case Op386ADDL:
+ return rewriteValue386_Op386ADDL(v, config)
+ case Op386ADDLcarry:
+ return rewriteValue386_Op386ADDLcarry(v, config)
+ case Op386ADDLconst:
+ return rewriteValue386_Op386ADDLconst(v, config)
+ case Op386ANDL:
+ return rewriteValue386_Op386ANDL(v, config)
+ case Op386ANDLconst:
+ return rewriteValue386_Op386ANDLconst(v, config)
+ case Op386CMPB:
+ return rewriteValue386_Op386CMPB(v, config)
+ case Op386CMPBconst:
+ return rewriteValue386_Op386CMPBconst(v, config)
+ case Op386CMPL:
+ return rewriteValue386_Op386CMPL(v, config)
+ case Op386CMPLconst:
+ return rewriteValue386_Op386CMPLconst(v, config)
+ case Op386CMPW:
+ return rewriteValue386_Op386CMPW(v, config)
+ case Op386CMPWconst:
+ return rewriteValue386_Op386CMPWconst(v, config)
+ case Op386LEAL:
+ return rewriteValue386_Op386LEAL(v, config)
+ case Op386LEAL1:
+ return rewriteValue386_Op386LEAL1(v, config)
+ case Op386LEAL2:
+ return rewriteValue386_Op386LEAL2(v, config)
+ case Op386LEAL4:
+ return rewriteValue386_Op386LEAL4(v, config)
+ case Op386LEAL8:
+ return rewriteValue386_Op386LEAL8(v, config)
+ case Op386MOVBLSX:
+ return rewriteValue386_Op386MOVBLSX(v, config)
+ case Op386MOVBLSXload:
+ return rewriteValue386_Op386MOVBLSXload(v, config)
+ case Op386MOVBLZX:
+ return rewriteValue386_Op386MOVBLZX(v, config)
+ case Op386MOVBload:
+ return rewriteValue386_Op386MOVBload(v, config)
+ case Op386MOVBloadidx1:
+ return rewriteValue386_Op386MOVBloadidx1(v, config)
+ case Op386MOVBstore:
+ return rewriteValue386_Op386MOVBstore(v, config)
+ case Op386MOVBstoreconst:
+ return rewriteValue386_Op386MOVBstoreconst(v, config)
+ case Op386MOVBstoreconstidx1:
+ return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
+ case Op386MOVBstoreidx1:
+ return rewriteValue386_Op386MOVBstoreidx1(v, config)
+ case Op386MOVLload:
+ return rewriteValue386_Op386MOVLload(v, config)
+ case Op386MOVLloadidx1:
+ return rewriteValue386_Op386MOVLloadidx1(v, config)
+ case Op386MOVLloadidx4:
+ return rewriteValue386_Op386MOVLloadidx4(v, config)
+ case Op386MOVLstore:
+ return rewriteValue386_Op386MOVLstore(v, config)
+ case Op386MOVLstoreconst:
+ return rewriteValue386_Op386MOVLstoreconst(v, config)
+ case Op386MOVLstoreconstidx1:
+ return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
+ case Op386MOVLstoreconstidx4:
+ return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
+ case Op386MOVLstoreidx1:
+ return rewriteValue386_Op386MOVLstoreidx1(v, config)
+ case Op386MOVLstoreidx4:
+ return rewriteValue386_Op386MOVLstoreidx4(v, config)
+ case Op386MOVSDconst:
+ return rewriteValue386_Op386MOVSDconst(v, config)
+ case Op386MOVSDload:
+ return rewriteValue386_Op386MOVSDload(v, config)
+ case Op386MOVSDloadidx1:
+ return rewriteValue386_Op386MOVSDloadidx1(v, config)
+ case Op386MOVSDloadidx8:
+ return rewriteValue386_Op386MOVSDloadidx8(v, config)
+ case Op386MOVSDstore:
+ return rewriteValue386_Op386MOVSDstore(v, config)
+ case Op386MOVSDstoreidx1:
+ return rewriteValue386_Op386MOVSDstoreidx1(v, config)
+ case Op386MOVSDstoreidx8:
+ return rewriteValue386_Op386MOVSDstoreidx8(v, config)
+ case Op386MOVSSconst:
+ return rewriteValue386_Op386MOVSSconst(v, config)
+ case Op386MOVSSload:
+ return rewriteValue386_Op386MOVSSload(v, config)
+ case Op386MOVSSloadidx1:
+ return rewriteValue386_Op386MOVSSloadidx1(v, config)
+ case Op386MOVSSloadidx4:
+ return rewriteValue386_Op386MOVSSloadidx4(v, config)
+ case Op386MOVSSstore:
+ return rewriteValue386_Op386MOVSSstore(v, config)
+ case Op386MOVSSstoreidx1:
+ return rewriteValue386_Op386MOVSSstoreidx1(v, config)
+ case Op386MOVSSstoreidx4:
+ return rewriteValue386_Op386MOVSSstoreidx4(v, config)
+ case Op386MOVWLSX:
+ return rewriteValue386_Op386MOVWLSX(v, config)
+ case Op386MOVWLSXload:
+ return rewriteValue386_Op386MOVWLSXload(v, config)
+ case Op386MOVWLZX:
+ return rewriteValue386_Op386MOVWLZX(v, config)
+ case Op386MOVWload:
+ return rewriteValue386_Op386MOVWload(v, config)
+ case Op386MOVWloadidx1:
+ return rewriteValue386_Op386MOVWloadidx1(v, config)
+ case Op386MOVWloadidx2:
+ return rewriteValue386_Op386MOVWloadidx2(v, config)
+ case Op386MOVWstore:
+ return rewriteValue386_Op386MOVWstore(v, config)
+ case Op386MOVWstoreconst:
+ return rewriteValue386_Op386MOVWstoreconst(v, config)
+ case Op386MOVWstoreconstidx1:
+ return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
+ case Op386MOVWstoreconstidx2:
+ return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
+ case Op386MOVWstoreidx1:
+ return rewriteValue386_Op386MOVWstoreidx1(v, config)
+ case Op386MOVWstoreidx2:
+ return rewriteValue386_Op386MOVWstoreidx2(v, config)
+ case Op386MULL:
+ return rewriteValue386_Op386MULL(v, config)
+ case Op386MULLconst:
+ return rewriteValue386_Op386MULLconst(v, config)
+ case Op386NEGL:
+ return rewriteValue386_Op386NEGL(v, config)
+ case Op386NOTL:
+ return rewriteValue386_Op386NOTL(v, config)
+ case Op386ORL:
+ return rewriteValue386_Op386ORL(v, config)
+ case Op386ORLconst:
+ return rewriteValue386_Op386ORLconst(v, config)
+ case Op386ROLBconst:
+ return rewriteValue386_Op386ROLBconst(v, config)
+ case Op386ROLLconst:
+ return rewriteValue386_Op386ROLLconst(v, config)
+ case Op386ROLWconst:
+ return rewriteValue386_Op386ROLWconst(v, config)
+ case Op386SARB:
+ return rewriteValue386_Op386SARB(v, config)
+ case Op386SARBconst:
+ return rewriteValue386_Op386SARBconst(v, config)
+ case Op386SARL:
+ return rewriteValue386_Op386SARL(v, config)
+ case Op386SARLconst:
+ return rewriteValue386_Op386SARLconst(v, config)
+ case Op386SARW:
+ return rewriteValue386_Op386SARW(v, config)
+ case Op386SARWconst:
+ return rewriteValue386_Op386SARWconst(v, config)
+ case Op386SBBL:
+ return rewriteValue386_Op386SBBL(v, config)
+ case Op386SBBLcarrymask:
+ return rewriteValue386_Op386SBBLcarrymask(v, config)
+ case Op386SETA:
+ return rewriteValue386_Op386SETA(v, config)
+ case Op386SETAE:
+ return rewriteValue386_Op386SETAE(v, config)
+ case Op386SETB:
+ return rewriteValue386_Op386SETB(v, config)
+ case Op386SETBE:
+ return rewriteValue386_Op386SETBE(v, config)
+ case Op386SETEQ:
+ return rewriteValue386_Op386SETEQ(v, config)
+ case Op386SETG:
+ return rewriteValue386_Op386SETG(v, config)
+ case Op386SETGE:
+ return rewriteValue386_Op386SETGE(v, config)
+ case Op386SETL:
+ return rewriteValue386_Op386SETL(v, config)
+ case Op386SETLE:
+ return rewriteValue386_Op386SETLE(v, config)
+ case Op386SETNE:
+ return rewriteValue386_Op386SETNE(v, config)
+ case Op386SHLL:
+ return rewriteValue386_Op386SHLL(v, config)
+ case Op386SHRB:
+ return rewriteValue386_Op386SHRB(v, config)
+ case Op386SHRL:
+ return rewriteValue386_Op386SHRL(v, config)
+ case Op386SHRW:
+ return rewriteValue386_Op386SHRW(v, config)
+ case Op386SUBL:
+ return rewriteValue386_Op386SUBL(v, config)
+ case Op386SUBLcarry:
+ return rewriteValue386_Op386SUBLcarry(v, config)
+ case Op386SUBLconst:
+ return rewriteValue386_Op386SUBLconst(v, config)
+ case Op386XORL:
+ return rewriteValue386_Op386XORL(v, config)
+ case Op386XORLconst:
+ return rewriteValue386_Op386XORLconst(v, config)
+ case OpAdd16:
+ return rewriteValue386_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValue386_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValue386_OpAdd32F(v, config)
+ case OpAdd32carry:
+ return rewriteValue386_OpAdd32carry(v, config)
+ case OpAdd32withcarry:
+ return rewriteValue386_OpAdd32withcarry(v, config)
+ case OpAdd64F:
+ return rewriteValue386_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValue386_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValue386_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValue386_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValue386_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValue386_OpAnd32(v, config)
+ case OpAnd8:
+ return rewriteValue386_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValue386_OpAndB(v, config)
+ case OpBswap32:
+ return rewriteValue386_OpBswap32(v, config)
+ case OpClosureCall:
+ return rewriteValue386_OpClosureCall(v, config)
+ case OpCom16:
+ return rewriteValue386_OpCom16(v, config)
+ case OpCom32:
+ return rewriteValue386_OpCom32(v, config)
+ case OpCom8:
+ return rewriteValue386_OpCom8(v, config)
+ case OpConst16:
+ return rewriteValue386_OpConst16(v, config)
+ case OpConst32:
+ return rewriteValue386_OpConst32(v, config)
+ case OpConst32F:
+ return rewriteValue386_OpConst32F(v, config)
+ case OpConst64F:
+ return rewriteValue386_OpConst64F(v, config)
+ case OpConst8:
+ return rewriteValue386_OpConst8(v, config)
+ case OpConstBool:
+ return rewriteValue386_OpConstBool(v, config)
+ case OpConstNil:
+ return rewriteValue386_OpConstNil(v, config)
+ case OpConvert:
+ return rewriteValue386_OpConvert(v, config)
+ case OpCvt32Fto32:
+ return rewriteValue386_OpCvt32Fto32(v, config)
+ case OpCvt32Fto64F:
+ return rewriteValue386_OpCvt32Fto64F(v, config)
+ case OpCvt32to32F:
+ return rewriteValue386_OpCvt32to32F(v, config)
+ case OpCvt32to64F:
+ return rewriteValue386_OpCvt32to64F(v, config)
+ case OpCvt64Fto32:
+ return rewriteValue386_OpCvt64Fto32(v, config)
+ case OpCvt64Fto32F:
+ return rewriteValue386_OpCvt64Fto32F(v, config)
+ case OpDeferCall:
+ return rewriteValue386_OpDeferCall(v, config)
+ case OpDiv16:
+ return rewriteValue386_OpDiv16(v, config)
+ case OpDiv16u:
+ return rewriteValue386_OpDiv16u(v, config)
+ case OpDiv32:
+ return rewriteValue386_OpDiv32(v, config)
+ case OpDiv32F:
+ return rewriteValue386_OpDiv32F(v, config)
+ case OpDiv32u:
+ return rewriteValue386_OpDiv32u(v, config)
+ case OpDiv64F:
+ return rewriteValue386_OpDiv64F(v, config)
+ case OpDiv8:
+ return rewriteValue386_OpDiv8(v, config)
+ case OpDiv8u:
+ return rewriteValue386_OpDiv8u(v, config)
+ case OpEq16:
+ return rewriteValue386_OpEq16(v, config)
+ case OpEq32:
+ return rewriteValue386_OpEq32(v, config)
+ case OpEq32F:
+ return rewriteValue386_OpEq32F(v, config)
+ case OpEq64F:
+ return rewriteValue386_OpEq64F(v, config)
+ case OpEq8:
+ return rewriteValue386_OpEq8(v, config)
+ case OpEqB:
+ return rewriteValue386_OpEqB(v, config)
+ case OpEqPtr:
+ return rewriteValue386_OpEqPtr(v, config)
+ case OpGeq16:
+ return rewriteValue386_OpGeq16(v, config)
+ case OpGeq16U:
+ return rewriteValue386_OpGeq16U(v, config)
+ case OpGeq32:
+ return rewriteValue386_OpGeq32(v, config)
+ case OpGeq32F:
+ return rewriteValue386_OpGeq32F(v, config)
+ case OpGeq32U:
+ return rewriteValue386_OpGeq32U(v, config)
+ case OpGeq64F:
+ return rewriteValue386_OpGeq64F(v, config)
+ case OpGeq8:
+ return rewriteValue386_OpGeq8(v, config)
+ case OpGeq8U:
+ return rewriteValue386_OpGeq8U(v, config)
+ case OpGetClosurePtr:
+ return rewriteValue386_OpGetClosurePtr(v, config)
+ case OpGetG:
+ return rewriteValue386_OpGetG(v, config)
+ case OpGoCall:
+ return rewriteValue386_OpGoCall(v, config)
+ case OpGreater16:
+ return rewriteValue386_OpGreater16(v, config)
+ case OpGreater16U:
+ return rewriteValue386_OpGreater16U(v, config)
+ case OpGreater32:
+ return rewriteValue386_OpGreater32(v, config)
+ case OpGreater32F:
+ return rewriteValue386_OpGreater32F(v, config)
+ case OpGreater32U:
+ return rewriteValue386_OpGreater32U(v, config)
+ case OpGreater64F:
+ return rewriteValue386_OpGreater64F(v, config)
+ case OpGreater8:
+ return rewriteValue386_OpGreater8(v, config)
+ case OpGreater8U:
+ return rewriteValue386_OpGreater8U(v, config)
+ case OpHmul16:
+ return rewriteValue386_OpHmul16(v, config)
+ case OpHmul16u:
+ return rewriteValue386_OpHmul16u(v, config)
+ case OpHmul32:
+ return rewriteValue386_OpHmul32(v, config)
+ case OpHmul32u:
+ return rewriteValue386_OpHmul32u(v, config)
+ case OpHmul8:
+ return rewriteValue386_OpHmul8(v, config)
+ case OpHmul8u:
+ return rewriteValue386_OpHmul8u(v, config)
+ case OpInterCall:
+ return rewriteValue386_OpInterCall(v, config)
+ case OpIsInBounds:
+ return rewriteValue386_OpIsInBounds(v, config)
+ case OpIsNonNil:
+ return rewriteValue386_OpIsNonNil(v, config)
+ case OpIsSliceInBounds:
+ return rewriteValue386_OpIsSliceInBounds(v, config)
+ case OpLeq16:
+ return rewriteValue386_OpLeq16(v, config)
+ case OpLeq16U:
+ return rewriteValue386_OpLeq16U(v, config)
+ case OpLeq32:
+ return rewriteValue386_OpLeq32(v, config)
+ case OpLeq32F:
+ return rewriteValue386_OpLeq32F(v, config)
+ case OpLeq32U:
+ return rewriteValue386_OpLeq32U(v, config)
+ case OpLeq64F:
+ return rewriteValue386_OpLeq64F(v, config)
+ case OpLeq8:
+ return rewriteValue386_OpLeq8(v, config)
+ case OpLeq8U:
+ return rewriteValue386_OpLeq8U(v, config)
+ case OpLess16:
+ return rewriteValue386_OpLess16(v, config)
+ case OpLess16U:
+ return rewriteValue386_OpLess16U(v, config)
+ case OpLess32:
+ return rewriteValue386_OpLess32(v, config)
+ case OpLess32F:
+ return rewriteValue386_OpLess32F(v, config)
+ case OpLess32U:
+ return rewriteValue386_OpLess32U(v, config)
+ case OpLess64F:
+ return rewriteValue386_OpLess64F(v, config)
+ case OpLess8:
+ return rewriteValue386_OpLess8(v, config)
+ case OpLess8U:
+ return rewriteValue386_OpLess8U(v, config)
+ case OpLoad:
+ return rewriteValue386_OpLoad(v, config)
+ case OpLrot16:
+ return rewriteValue386_OpLrot16(v, config)
+ case OpLrot32:
+ return rewriteValue386_OpLrot32(v, config)
+ case OpLrot8:
+ return rewriteValue386_OpLrot8(v, config)
+ case OpLsh16x16:
+ return rewriteValue386_OpLsh16x16(v, config)
+ case OpLsh16x32:
+ return rewriteValue386_OpLsh16x32(v, config)
+ case OpLsh16x64:
+ return rewriteValue386_OpLsh16x64(v, config)
+ case OpLsh16x8:
+ return rewriteValue386_OpLsh16x8(v, config)
+ case OpLsh32x16:
+ return rewriteValue386_OpLsh32x16(v, config)
+ case OpLsh32x32:
+ return rewriteValue386_OpLsh32x32(v, config)
+ case OpLsh32x64:
+ return rewriteValue386_OpLsh32x64(v, config)
+ case OpLsh32x8:
+ return rewriteValue386_OpLsh32x8(v, config)
+ case OpLsh8x16:
+ return rewriteValue386_OpLsh8x16(v, config)
+ case OpLsh8x32:
+ return rewriteValue386_OpLsh8x32(v, config)
+ case OpLsh8x64:
+ return rewriteValue386_OpLsh8x64(v, config)
+ case OpLsh8x8:
+ return rewriteValue386_OpLsh8x8(v, config)
+ case OpMod16:
+ return rewriteValue386_OpMod16(v, config)
+ case OpMod16u:
+ return rewriteValue386_OpMod16u(v, config)
+ case OpMod32:
+ return rewriteValue386_OpMod32(v, config)
+ case OpMod32u:
+ return rewriteValue386_OpMod32u(v, config)
+ case OpMod8:
+ return rewriteValue386_OpMod8(v, config)
+ case OpMod8u:
+ return rewriteValue386_OpMod8u(v, config)
+ case OpMove:
+ return rewriteValue386_OpMove(v, config)
+ case OpMul16:
+ return rewriteValue386_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValue386_OpMul32(v, config)
+ case OpMul32F:
+ return rewriteValue386_OpMul32F(v, config)
+ case OpMul32uhilo:
+ return rewriteValue386_OpMul32uhilo(v, config)
+ case OpMul64F:
+ return rewriteValue386_OpMul64F(v, config)
+ case OpMul8:
+ return rewriteValue386_OpMul8(v, config)
+ case OpNeg16:
+ return rewriteValue386_OpNeg16(v, config)
+ case OpNeg32:
+ return rewriteValue386_OpNeg32(v, config)
+ case OpNeg32F:
+ return rewriteValue386_OpNeg32F(v, config)
+ case OpNeg64F:
+ return rewriteValue386_OpNeg64F(v, config)
+ case OpNeg8:
+ return rewriteValue386_OpNeg8(v, config)
+ case OpNeq16:
+ return rewriteValue386_OpNeq16(v, config)
+ case OpNeq32:
+ return rewriteValue386_OpNeq32(v, config)
+ case OpNeq32F:
+ return rewriteValue386_OpNeq32F(v, config)
+ case OpNeq64F:
+ return rewriteValue386_OpNeq64F(v, config)
+ case OpNeq8:
+ return rewriteValue386_OpNeq8(v, config)
+ case OpNeqB:
+ return rewriteValue386_OpNeqB(v, config)
+ case OpNeqPtr:
+ return rewriteValue386_OpNeqPtr(v, config)
+ case OpNilCheck:
+ return rewriteValue386_OpNilCheck(v, config)
+ case OpNot:
+ return rewriteValue386_OpNot(v, config)
+ case OpOffPtr:
+ return rewriteValue386_OpOffPtr(v, config)
+ case OpOr16:
+ return rewriteValue386_OpOr16(v, config)
+ case OpOr32:
+ return rewriteValue386_OpOr32(v, config)
+ case OpOr8:
+ return rewriteValue386_OpOr8(v, config)
+ case OpOrB:
+ return rewriteValue386_OpOrB(v, config)
+ case OpRsh16Ux16:
+ return rewriteValue386_OpRsh16Ux16(v, config)
+ case OpRsh16Ux32:
+ return rewriteValue386_OpRsh16Ux32(v, config)
+ case OpRsh16Ux64:
+ return rewriteValue386_OpRsh16Ux64(v, config)
+ case OpRsh16Ux8:
+ return rewriteValue386_OpRsh16Ux8(v, config)
+ case OpRsh16x16:
+ return rewriteValue386_OpRsh16x16(v, config)
+ case OpRsh16x32:
+ return rewriteValue386_OpRsh16x32(v, config)
+ case OpRsh16x64:
+ return rewriteValue386_OpRsh16x64(v, config)
+ case OpRsh16x8:
+ return rewriteValue386_OpRsh16x8(v, config)
+ case OpRsh32Ux16:
+ return rewriteValue386_OpRsh32Ux16(v, config)
+ case OpRsh32Ux32:
+ return rewriteValue386_OpRsh32Ux32(v, config)
+ case OpRsh32Ux64:
+ return rewriteValue386_OpRsh32Ux64(v, config)
+ case OpRsh32Ux8:
+ return rewriteValue386_OpRsh32Ux8(v, config)
+ case OpRsh32x16:
+ return rewriteValue386_OpRsh32x16(v, config)
+ case OpRsh32x32:
+ return rewriteValue386_OpRsh32x32(v, config)
+ case OpRsh32x64:
+ return rewriteValue386_OpRsh32x64(v, config)
+ case OpRsh32x8:
+ return rewriteValue386_OpRsh32x8(v, config)
+ case OpRsh8Ux16:
+ return rewriteValue386_OpRsh8Ux16(v, config)
+ case OpRsh8Ux32:
+ return rewriteValue386_OpRsh8Ux32(v, config)
+ case OpRsh8Ux64:
+ return rewriteValue386_OpRsh8Ux64(v, config)
+ case OpRsh8Ux8:
+ return rewriteValue386_OpRsh8Ux8(v, config)
+ case OpRsh8x16:
+ return rewriteValue386_OpRsh8x16(v, config)
+ case OpRsh8x32:
+ return rewriteValue386_OpRsh8x32(v, config)
+ case OpRsh8x64:
+ return rewriteValue386_OpRsh8x64(v, config)
+ case OpRsh8x8:
+ return rewriteValue386_OpRsh8x8(v, config)
+ case OpSignExt16to32:
+ return rewriteValue386_OpSignExt16to32(v, config)
+ case OpSignExt8to16:
+ return rewriteValue386_OpSignExt8to16(v, config)
+ case OpSignExt8to32:
+ return rewriteValue386_OpSignExt8to32(v, config)
+ case OpSignmask:
+ return rewriteValue386_OpSignmask(v, config)
+ case OpSqrt:
+ return rewriteValue386_OpSqrt(v, config)
+ case OpStaticCall:
+ return rewriteValue386_OpStaticCall(v, config)
+ case OpStore:
+ return rewriteValue386_OpStore(v, config)
+ case OpSub16:
+ return rewriteValue386_OpSub16(v, config)
+ case OpSub32:
+ return rewriteValue386_OpSub32(v, config)
+ case OpSub32F:
+ return rewriteValue386_OpSub32F(v, config)
+ case OpSub32carry:
+ return rewriteValue386_OpSub32carry(v, config)
+ case OpSub32withcarry:
+ return rewriteValue386_OpSub32withcarry(v, config)
+ case OpSub64F:
+ return rewriteValue386_OpSub64F(v, config)
+ case OpSub8:
+ return rewriteValue386_OpSub8(v, config)
+ case OpSubPtr:
+ return rewriteValue386_OpSubPtr(v, config)
+ case OpTrunc16to8:
+ return rewriteValue386_OpTrunc16to8(v, config)
+ case OpTrunc32to16:
+ return rewriteValue386_OpTrunc32to16(v, config)
+ case OpTrunc32to8:
+ return rewriteValue386_OpTrunc32to8(v, config)
+ case OpXor16:
+ return rewriteValue386_OpXor16(v, config)
+ case OpXor32:
+ return rewriteValue386_OpXor32(v, config)
+ case OpXor8:
+ return rewriteValue386_OpXor8(v, config)
+ case OpZero:
+ return rewriteValue386_OpZero(v, config)
+ case OpZeroExt16to32:
+ return rewriteValue386_OpZeroExt16to32(v, config)
+ case OpZeroExt8to16:
+ return rewriteValue386_OpZeroExt8to16(v, config)
+ case OpZeroExt8to32:
+ return rewriteValue386_OpZeroExt8to32(v, config)
+ case OpZeromask:
+ return rewriteValue386_OpZeromask(v, config)
+ }
+ return false
+}
+func rewriteValue386_Op386ADCL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCL x (MOVLconst [c]) f)
+ // cond:
+ // result: (ADCLconst [c] x f)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ f := v.Args[2]
+ v.reset(Op386ADCLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(f)
+ return true
+ }
+ // match: (ADCL (MOVLconst [c]) x f)
+ // cond:
+ // result: (ADCLconst [c] x f)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ f := v.Args[2]
+ v.reset(Op386ADCLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(f)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDL x (MOVLconst [c]))
+ // cond:
+ // result: (ADDLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386ADDLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDL (MOVLconst [c]) x)
+ // cond:
+ // result: (ADDLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDL x (SHLLconst [3] y))
+ // cond:
+ // result: (LEAL8 x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 3 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL x (SHLLconst [2] y))
+ // cond:
+ // result: (LEAL4 x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL x (SHLLconst [1] y))
+ // cond:
+ // result: (LEAL2 x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL x (ADDL y y))
+ // cond:
+ // result: (LEAL2 x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDL {
+ break
+ }
+ y := v_1.Args[0]
+ if y != v_1.Args[1] {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL x (ADDL x y))
+ // cond:
+ // result: (LEAL2 y x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDL {
+ break
+ }
+ if x != v_1.Args[0] {
+ break
+ }
+ y := v_1.Args[1]
+ v.reset(Op386LEAL2)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDL x (ADDL y x))
+ // cond:
+ // result: (LEAL2 y x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDL {
+ break
+ }
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDL (ADDLconst [c] x) y)
+ // cond:
+ // result: (LEAL1 [c] x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ v.reset(Op386LEAL1)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL x (ADDLconst [c] y))
+ // cond:
+ // result: (LEAL1 [c] x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(Op386LEAL1)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL x (LEAL [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ c := v_1.AuxInt
+ s := v_1.Aux
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL (LEAL [c] {s} x) y)
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ c := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDL x (NEGL y))
+ // cond:
+ // result: (SUBL x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SUBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDLcarry x (MOVLconst [c]))
+ // cond:
+ // result: (ADDLconstcarry [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386ADDLconstcarry)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLcarry (MOVLconst [c]) x)
+ // cond:
+ // result: (ADDLconstcarry [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386ADDLconstcarry)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDLconst [c] (ADDL x y))
+ // cond:
+ // result: (LEAL1 [c] x y)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(Op386LEAL1)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(c+d)
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
+ // cond: is32Bit(c+d)
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
+ // cond: is32Bit(c+d)
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
+ // cond: is32Bit(c+d)
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
+ // cond: is32Bit(c+d)
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [int64(int32(c+d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(int32(c + d))
+ return true
+ }
+ // match: (ADDLconst [c] (ADDLconst [d] x))
+ // cond:
+ // result: (ADDLconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDL x (MOVLconst [c]))
+ // cond:
+ // result: (ANDLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDL (MOVLconst [c]) x)
+ // cond:
+ // result: (ANDLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDL x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDLconst [c] (ANDLconst [d] x))
+ // cond:
+ // result: (ANDLconst [c & d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c & d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] _)
+ // cond: int32(c)==0
+ // result: (MOVLconst [0])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (ANDLconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c&d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = c & d
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPB x (MOVLconst [c]))
+ // cond:
+ // result: (CMPBconst x [int64(int8(c))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386CMPBconst)
+ v.AuxInt = int64(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPB (MOVLconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v0.AuxInt = int64(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==int8(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int8(x) == int8(y)) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < int8(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := v_0.AuxInt
+ if !(0 <= int8(m) && int8(m) < int8(n)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (ANDL x y) [0])
+ // cond:
+ // result: (TESTB x y)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(Op386TESTB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst [c] x) [0])
+ // cond:
+ // result: (TESTBconst [int64(int8(c))] x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386TESTBconst)
+ v.AuxInt = int64(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPBconst x [0])
+ // cond:
+ // result: (TESTB x x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386TESTB)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPL x (MOVLconst [c]))
+ // cond:
+ // result: (CMPLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386CMPLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPL (MOVLconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPLconst x [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SHRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int32(m) && int32(m) < int32(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := v_0.AuxInt
+ if !(0 <= int32(m) && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDL x y) [0])
+ // cond:
+ // result: (TESTL x y)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(Op386TESTL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst [c] x) [0])
+ // cond:
+ // result: (TESTLconst [c] x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386TESTLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPLconst x [0])
+ // cond:
+ // result: (TESTL x x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386TESTL)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPW x (MOVLconst [c]))
+ // cond:
+ // result: (CMPWconst x [int64(int16(c))])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386CMPWconst)
+ v.AuxInt = int64(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVLconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPWconst x [int64(int16(c))]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v0.AuxInt = int64(int16(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==int16(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int16(x) == int16(y)) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < int16(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := v_0.AuxInt
+ if !(0 <= int16(m) && int16(m) < int16(n)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (ANDL x y) [0])
+ // cond:
+ // result: (TESTW x y)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(Op386TESTW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst [c] x) [0])
+ // cond:
+ // result: (TESTWconst [int64(int16(c))] x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386TESTWconst)
+ v.AuxInt = int64(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst x [0])
+ // cond:
+ // result: (TESTW x x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386TESTW)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [c] {s} (ADDL x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL1 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(c+d) && y.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+d) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+ // cond:
+ // result: (LEAL2 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL1 [c] {s} (SHLLconst [1] x) y)
+ // cond:
+ // result: (LEAL2 [c] {s} y x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SHLLconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ y := v.Args[1]
+ v.reset(Op386LEAL2)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+ // cond:
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL1 [c] {s} (SHLLconst [2] x) y)
+ // cond:
+ // result: (LEAL4 [c] {s} y x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SHLLconst {
+ break
+ }
+ if v_0.AuxInt != 2 {
+ break
+ }
+ x := v_0.Args[0]
+ y := v.Args[1]
+ v.reset(Op386LEAL4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
+ // cond:
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 3 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL1 [c] {s} (SHLLconst [3] x) y)
+ // cond:
+ // result: (LEAL8 [c] {s} y x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SHLLconst {
+ break
+ }
+ if v_0.AuxInt != 3 {
+ break
+ }
+ x := v_0.Args[0]
+ y := v.Args[1]
+ v.reset(Op386LEAL8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ y := v_1.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL2(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(c+2*d) && y.Op != OpSB
+ // result: (LEAL2 [c+2*d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+2*d) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = c + 2*d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
+ // cond:
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
+ // cond:
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL4(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(c+4*d) && y.Op != OpSB
+ // result: (LEAL4 [c+4*d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+4*d) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = c + 4*d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+ // cond:
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(c+8*d) && y.Op != OpSB
+ // result: (LEAL8 [c+8*d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+8*d) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = c + 8*d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSX(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, Op386MOVBLSXload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVBLSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c & 0x7f
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSXload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBLSXload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLZX(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, Op386MOVBload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
+ for {
+ x := v.Args[0]
+ if x.Op != Op386MOVBloadidx1 {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, Op386MOVBloadidx1, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVBLZX (ANDLconst [c] x))
+ // cond:
+ // result: (ANDLconst [c & 0xff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c & 0xff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVBstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVBloadidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (ADDL ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVBloadidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBloadidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVBloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVBloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVBLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVBLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = makeValAndOff(int64(int8(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVBstoreidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVBstoreidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ if v_1.AuxInt != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v.Args[2]
+ if x.Op != Op386MOVBstore {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if w != x.Args[1] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := v_1.AuxInt
+ w := v_1.Args[0]
+ x := v.Args[2]
+ if x.Op != Op386MOVBstore {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst {
+ break
+ }
+ if w0.AuxInt != j-8 {
+ break
+ }
+ if w != w0.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem)
+ // cond:
+ // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ v.reset(Op386MOVBstoreconstidx1)
+ v.AuxInt = x
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreconstidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
+ // cond:
+ // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVBstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
+ // cond:
+ // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVBstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ i := v.Args[1]
+ x := v.Args[2]
+ if x.Op != Op386MOVBstoreconstidx1 {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if i != x.Args[1] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconstidx1)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(i)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVBstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVBstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRLconst {
+ break
+ }
+ if v_2.AuxInt != 8 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ if w != x.Args[2] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRLconst {
+ break
+ }
+ j := v_2.AuxInt
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVBstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-1 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ w0 := x.Args[2]
+ if w0.Op != Op386SHRLconst {
+ break
+ }
+ if w0.AuxInt != j-8 {
+ break
+ }
+ if w != w0.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVLloadidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVLloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLload [off] {sym} (ADDL ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVLloadidx1 [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVLloadidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLloadidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
+ // cond:
+ // result: (MOVLloadidx4 [c] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVLloadidx4)
+ v.AuxInt = c
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVLloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVLloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLloadidx4(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVLloadidx4)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVLloadidx4)
+ v.AuxInt = c + 4*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(int64(int32(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVLstoreidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconstidx4)
+ v.AuxInt = ValAndOff(x).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem)
+ // cond:
+ // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ v.reset(Op386MOVLstoreconstidx1)
+ v.AuxInt = x
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreconstidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
+ // cond:
+ // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVLstoreconstidx4)
+ v.AuxInt = c
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
+ // cond:
+ // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVLstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
+ // cond:
+ // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVLstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreconstidx4(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem)
+ // cond:
+ // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVLstoreconstidx4)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem)
+ // cond:
+ // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVLstoreconstidx4)
+ v.AuxInt = ValAndOff(x).add(4 * c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem)
+ // cond:
+ // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVLstoreidx4)
+ v.AuxInt = c
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreidx4(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVLstoreidx4)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVLstoreidx4)
+ v.AuxInt = c + 4*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSDconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSDconst2 (MOVSDconst1 [c]))
+ for {
+ c := v.AuxInt
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSDconst2)
+ v0 := b.NewValue0(v.Line, Op386MOVSDconst1, config.fe.TypeUInt32())
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSDloadidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSDloadidx8)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVSDloadidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDloadidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVSDloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVSDloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDloadidx8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVSDloadidx8)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVSDloadidx8)
+ v.AuxInt = c + 8*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSDstoreidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSDstoreidx8)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVSDstoreidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDstoreidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSDstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSDstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDstoreidx8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSDstoreidx8)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSDstoreidx8)
+ v.AuxInt = c + 8*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSSconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSSconst2 (MOVSSconst1 [c]))
+ for {
+ c := v.AuxInt
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSSconst2)
+ v0 := b.NewValue0(v.Line, Op386MOVSSconst1, config.fe.TypeUInt32())
+ v0.AuxInt = c
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSSloadidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSSloadidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVSSloadidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSloadidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVSSloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVSSloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSloadidx4(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVSSloadidx4)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVSSloadidx4)
+ v.AuxInt = c + 4*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSSstoreidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVSSstoreidx4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVSSstoreidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSstoreidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSSstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSSstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSstoreidx4(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSSstoreidx4)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVSSstoreidx4)
+ v.AuxInt = c + 4*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSX(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, Op386MOVWLSXload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVWLSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c & 0x7fff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSXload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWLSXload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLZX(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v.Args[0]
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, Op386MOVWload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
+ for {
+ x := v.Args[0]
+ if x.Op != Op386MOVWloadidx1 {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, Op386MOVWloadidx1, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
+ for {
+ x := v.Args[0]
+ if x.Op != Op386MOVWloadidx2 {
+ break
+ }
+ off := x.AuxInt
+ sym := x.Aux
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Line, Op386MOVWloadidx2, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (MOVWLZX (ANDLconst [c] x))
+ // cond:
+ // result: (ANDLconst [c & 0xffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = c & 0xffff
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVWstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVWloadidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVWloadidx2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (ADDL ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVWloadidx1 [off] {sym} ptr idx mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVWloadidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWloadidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
+ // cond:
+ // result: (MOVWloadidx2 [c] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWloadidx2)
+ v.AuxInt = c
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVWloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWloadidx2(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem)
+ // cond:
+ // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVWloadidx2)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem)
+ // cond:
+ // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWloadidx2)
+ v.AuxInt = c + 2*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
+ // cond:
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVWLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
+ // cond:
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVWLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = makeValAndOff(int64(int16(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
+ break
+ }
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ if v_1.AuxInt != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v.Args[2]
+ if x.Op != Op386MOVWstore {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if w != x.Args[1] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := v_1.AuxInt
+ w := v_1.Args[0]
+ x := v.Args[2]
+ if x.Op != Op386MOVWstore {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst {
+ break
+ }
+ if w0.AuxInt != j-16 {
+ break
+ }
+ if w != w0.Args[0] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconstidx2)
+ v.AuxInt = ValAndOff(x).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem)
+ // cond:
+ // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ v.reset(Op386MOVWstoreconstidx1)
+ v.AuxInt = x
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ x := v.Args[1]
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ mem := x.Args[1]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreconstidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
+ // cond:
+ // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstoreconstidx2)
+ v.AuxInt = c
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
+ // cond:
+ // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
+ // cond:
+ // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstoreconstidx1)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ i := v.Args[1]
+ x := v.Args[2]
+ if x.Op != Op386MOVWstoreconstidx1 {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if i != x.Args[1] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconstidx1)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(i)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreconstidx2(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem)
+ // cond:
+ // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstoreconstidx2)
+ v.AuxInt = ValAndOff(x).add(c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem)
+ // cond:
+ // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
+ for {
+ x := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstoreconstidx2)
+ v.AuxInt = ValAndOff(x).add(2 * c)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ i := v.Args[1]
+ x := v.Args[2]
+ if x.Op != Op386MOVWstoreconstidx2 {
+ break
+ }
+ a := x.AuxInt
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if i != x.Args[1] {
+ break
+ }
+ mem := x.Args[2]
+ if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconstidx1)
+ v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+ v.Aux = s
+ v.AddArg(p)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, i.Type)
+ v0.AuxInt = 1
+ v0.AddArg(i)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreidx1(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem)
+ // cond:
+ // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SHLLconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVWstoreidx2)
+ v.AuxInt = c
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVWstoreidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRLconst {
+ break
+ }
+ if v_2.AuxInt != 16 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVWstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ if w != x.Args[2] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRLconst {
+ break
+ }
+ j := v_2.AuxInt
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVWstoreidx1 {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ w0 := x.Args[2]
+ if w0.Op != Op386SHRLconst {
+ break
+ }
+ if w0.AuxInt != j-16 {
+ break
+ }
+ if w != w0.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v.AddArg(idx)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreidx2(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+ // cond:
+ // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVWstoreidx2)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+ // cond:
+ // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
+ for {
+ c := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(Op386MOVWstoreidx2)
+ v.AuxInt = c + 2*d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRLconst {
+ break
+ }
+ if v_2.AuxInt != 16 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVWstoreidx2 {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ if w != x.Args[2] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
+ v0.AuxInt = 1
+ v0.AddArg(idx)
+ v.AddArg(v0)
+ v.AddArg(w)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w0 mem)
+ for {
+ i := v.AuxInt
+ s := v.Aux
+ p := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != Op386SHRLconst {
+ break
+ }
+ j := v_2.AuxInt
+ w := v_2.Args[0]
+ x := v.Args[3]
+ if x.Op != Op386MOVWstoreidx2 {
+ break
+ }
+ if x.AuxInt != i-2 {
+ break
+ }
+ if x.Aux != s {
+ break
+ }
+ if p != x.Args[0] {
+ break
+ }
+ if idx != x.Args[1] {
+ break
+ }
+ w0 := x.Args[2]
+ if w0.Op != Op386SHRLconst {
+ break
+ }
+ if w0.AuxInt != j-16 {
+ break
+ }
+ if w != w0.Args[0] {
+ break
+ }
+ mem := x.Args[3]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreidx1)
+ v.AuxInt = i - 2
+ v.Aux = s
+ v.AddArg(p)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
+ v0.AuxInt = 1
+ v0.AddArg(idx)
+ v.AddArg(v0)
+ v.AddArg(w0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULL x (MOVLconst [c]))
+ // cond:
+ // result: (MULLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386MULLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULL (MOVLconst [c]) x)
+ // cond:
+ // result: (MULLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386MULLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // cond:
+ // result: (MULLconst [int64(int32(c * d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MULLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386MULLconst)
+ v.AuxInt = int64(int32(c * d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [-1] x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [0] _)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MULLconst [1] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [3] x)
+ // cond:
+ // result: (LEAL2 x x)
+ for {
+ if v.AuxInt != 3 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL2)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [5] x)
+ // cond:
+ // result: (LEAL4 x x)
+ for {
+ if v.AuxInt != 5 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [7] x)
+ // cond:
+ // result: (LEAL8 (NEGL <v.Type> x) x)
+ for {
+ if v.AuxInt != 7 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Line, Op386NEGL, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [9] x)
+ // cond:
+ // result: (LEAL8 x x)
+ for {
+ if v.AuxInt != 9 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [11] x)
+ // cond:
+ // result: (LEAL2 x (LEAL4 <v.Type> x x))
+ for {
+ if v.AuxInt != 11 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL2)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [13] x)
+ // cond:
+ // result: (LEAL4 x (LEAL2 <v.Type> x x))
+ for {
+ if v.AuxInt != 13 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [21] x)
+ // cond:
+ // result: (LEAL4 x (LEAL4 <v.Type> x x))
+ for {
+ if v.AuxInt != 21 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [25] x)
+ // cond:
+ // result: (LEAL8 x (LEAL2 <v.Type> x x))
+ for {
+ if v.AuxInt != 25 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [37] x)
+ // cond:
+ // result: (LEAL4 x (LEAL8 <v.Type> x x))
+ for {
+ if v.AuxInt != 37 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [41] x)
+ // cond:
+ // result: (LEAL8 x (LEAL4 <v.Type> x x))
+ for {
+ if v.AuxInt != 41 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [73] x)
+ // cond:
+ // result: (LEAL8 x (LEAL8 <v.Type> x x))
+ for {
+ if v.AuxInt != 73 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo(c)
+ // result: (SHLLconst [log2(c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo(c+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c+1) && c >= 15) {
+ break
+ }
+ v.reset(Op386SUBL)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c-1) && c >= 17) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c-2) && c >= 34) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+ v0.AuxInt = log2(c - 2)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c-4) && c >= 68) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+ v0.AuxInt = log2(c - 4)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isPowerOfTwo(c-8) && c >= 136) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+ v0.AuxInt = log2(c - 8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [int64(int32(c*d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(int32(c * d))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NEGL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NEGL (MOVLconst [c]))
+ // cond:
+ // result: (MOVLconst [int64(int32(-c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int64(int32(-c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NOTL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NOTL (MOVLconst [c]))
+ // cond:
+ // result: (MOVLconst [^c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = ^c
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORL x (MOVLconst [c]))
+ // cond:
+ // result: (ORLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386ORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORL (MOVLconst [c]) x)
+ // cond:
+ // result: (ORLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386ORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORL x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
+ for {
+ x0 := v.Args[0]
+ if x0.Op != Op386MOVBload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := v.Args[1]
+ if s0.Op != Op386SHLLconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (ORL o0:(ORL o1:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != Op386ORL {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != Op386ORL {
+ break
+ }
+ x0 := o1.Args[0]
+ if x0.Op != Op386MOVBload {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ mem := x0.Args[1]
+ s0 := o1.Args[1]
+ if s0.Op != Op386SHLLconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if mem != x1.Args[1] {
+ break
+ }
+ s1 := o0.Args[1]
+ if s1.Op != Op386SHLLconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != Op386MOVBload {
+ break
+ }
+ if x2.AuxInt != i+2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if mem != x2.Args[1] {
+ break
+ }
+ s2 := v.Args[1]
+ if s2.Op != Op386SHLLconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != Op386MOVBload {
+ break
+ }
+ if x3.AuxInt != i+3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if mem != x3.Args[1] {
+ break
+ }
+ if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)
+ // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
+ for {
+ x0 := v.Args[0]
+ if x0.Op != Op386MOVBloadidx1 {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := v.Args[1]
+ if s0.Op != Op386SHLLconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBloadidx1 {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Line, Op386MOVWloadidx1, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
+ for {
+ o0 := v.Args[0]
+ if o0.Op != Op386ORL {
+ break
+ }
+ o1 := o0.Args[0]
+ if o1.Op != Op386ORL {
+ break
+ }
+ x0 := o1.Args[0]
+ if x0.Op != Op386MOVBloadidx1 {
+ break
+ }
+ i := x0.AuxInt
+ s := x0.Aux
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s0 := o1.Args[1]
+ if s0.Op != Op386SHLLconst {
+ break
+ }
+ if s0.AuxInt != 8 {
+ break
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBloadidx1 {
+ break
+ }
+ if x1.AuxInt != i+1 {
+ break
+ }
+ if x1.Aux != s {
+ break
+ }
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ s1 := o0.Args[1]
+ if s1.Op != Op386SHLLconst {
+ break
+ }
+ if s1.AuxInt != 16 {
+ break
+ }
+ x2 := s1.Args[0]
+ if x2.Op != Op386MOVBloadidx1 {
+ break
+ }
+ if x2.AuxInt != i+2 {
+ break
+ }
+ if x2.Aux != s {
+ break
+ }
+ if p != x2.Args[0] {
+ break
+ }
+ if idx != x2.Args[1] {
+ break
+ }
+ if mem != x2.Args[2] {
+ break
+ }
+ s2 := v.Args[1]
+ if s2.Op != Op386SHLLconst {
+ break
+ }
+ if s2.AuxInt != 24 {
+ break
+ }
+ x3 := s2.Args[0]
+ if x3.Op != Op386MOVBloadidx1 {
+ break
+ }
+ if x3.AuxInt != i+3 {
+ break
+ }
+ if x3.Aux != s {
+ break
+ }
+ if p != x3.Args[0] {
+ break
+ }
+ if idx != x3.Args[1] {
+ break
+ }
+ if mem != x3.Args[2] {
+ break
+ }
+ if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Line, Op386MOVLloadidx1, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = i
+ v0.Aux = s
+ v0.AddArg(p)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVLconst [-1])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c|d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = c | d
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ROLBconst [c] (ROLBconst [d] x))
+ // cond:
+ // result: (ROLBconst [(c+d)& 7] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ROLBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ROLBconst)
+ v.AuxInt = (c + d) & 7
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLBconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ROLLconst [c] (ROLLconst [d] x))
+ // cond:
+ // result: (ROLLconst [(c+d)&31] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ROLLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ROLLconst)
+ v.AuxInt = (c + d) & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLLconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ROLWconst [c] (ROLWconst [d] x))
+ // cond:
+ // result: (ROLWconst [(c+d)&15] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386ROLWconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386ROLWconst)
+ v.AuxInt = (c + d) & 15
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLWconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SARB x (MOVLconst [c]))
+ // cond:
+ // result: (SARBconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SARBconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARB x (MOVLconst [c]))
+ // cond:
+ // result: (SARBconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SARBconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SARBconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = d >> uint64(c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SARL x (MOVLconst [c]))
+ // cond:
+ // result: (SARLconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SARLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (MOVLconst [c]))
+ // cond:
+ // result: (SARLconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SARLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (ANDLconst [31] y))
+ // cond:
+ // result: (SARL x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ANDLconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SARL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SARLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = d >> uint64(c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SARW x (MOVLconst [c]))
+ // cond:
+ // result: (SARWconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SARWconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARW x (MOVLconst [c]))
+ // cond:
+ // result: (SARWconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SARWconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SARWconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = d >> uint64(c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBBL x (MOVLconst [c]) f)
+ // cond:
+ // result: (SBBLconst [c] x f)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ f := v.Args[2]
+ v.reset(Op386SBBLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(f)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBLcarrymask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBBLcarrymask (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [-1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [-1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETA (InvertFlags x))
+ // cond:
+ // result: (SETB x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETA (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETA (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETA (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETA (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETA (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETAE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETAE (InvertFlags x))
+ // cond:
+ // result: (SETBE x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETAE (FlagEQ))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETAE (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETAE (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETAE (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETAE (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETB (InvertFlags x))
+ // cond:
+ // result: (SETA x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETB (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETB (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETB (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETB (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETBE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETBE (InvertFlags x))
+ // cond:
+ // result: (SETAE x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETBE (FlagEQ))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETBE (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETBE (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETBE (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETBE (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETEQ(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETEQ (InvertFlags x))
+ // cond:
+ // result: (SETEQ x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETEQ (FlagEQ))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETEQ (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETEQ (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETEQ (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETEQ (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETG (InvertFlags x))
+ // cond:
+ // result: (SETL x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETG (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETG (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETG (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETG (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETG (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETGE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETGE (InvertFlags x))
+ // cond:
+ // result: (SETLE x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETGE (FlagEQ))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETGE (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETGE (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETGE (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETGE (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETL (InvertFlags x))
+ // cond:
+ // result: (SETG x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETL (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETL (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETLE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETLE (InvertFlags x))
+ // cond:
+ // result: (SETGE x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETLE (FlagEQ))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETLE (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETLE (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETLE (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETLE (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETNE(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SETNE (InvertFlags x))
+ // cond:
+ // result: (SETNE x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETNE (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETNE (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETNE (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SHLL x (MOVLconst [c]))
+ // cond:
+ // result: (SHLLconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (MOVLconst [c]))
+ // cond:
+ // result: (SHLLconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (ANDLconst [31] y))
+ // cond:
+ // result: (SHLL x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ANDLconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SHRB x (MOVLconst [c]))
+ // cond:
+ // result: (SHRBconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHRBconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB x (MOVLconst [c]))
+ // cond:
+ // result: (SHRBconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHRBconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SHRL x (MOVLconst [c]))
+ // cond:
+ // result: (SHRLconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHRLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (MOVLconst [c]))
+ // cond:
+ // result: (SHRLconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHRLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (ANDLconst [31] y))
+ // cond:
+ // result: (SHRL x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386ANDLconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SHRW x (MOVLconst [c]))
+ // cond:
+ // result: (SHRWconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHRWconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW x (MOVLconst [c]))
+ // cond:
+ // result: (SHRWconst [c&31] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SHRWconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBL x (MOVLconst [c]))
+ // cond:
+ // result: (SUBLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SUBLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBL (MOVLconst [c]) x)
+ // cond:
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Line, Op386SUBLconst, v.Type)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBL x x)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBLcarry x (MOVLconst [c]))
+ // cond:
+ // result: (SUBLconstcarry [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386SUBLconstcarry)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBLconst [c] x)
+ // cond: int32(c) == 0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBLconst [c] x)
+ // cond:
+ // result: (ADDLconst [int64(int32(-c))] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int64(int32(-c))
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORL x (MOVLconst [c]))
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL x x)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32carry x y)
+ // cond:
+ // result: (ADDLcarry x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDLcarry)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32withcarry x y c)
+ // cond:
+ // result: (ADCL x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(Op386ADCL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (LEAL {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(Op386LEAL)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap32 x)
+ // cond:
+ // result: (BSWAPL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386BSWAPL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVSSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVSSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVSDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVSDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVLconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValue386_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond:
+ // result: (MOVLconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386MOVLconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (CVTTSS2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTTSS2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (CVTSS2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSS2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (CVTSL2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSL2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (CVTSL2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSL2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (CVTTSD2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTTSD2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (CVTSD2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSD2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(Op386CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIVL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (SETGE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (SETAE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (SETGE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (SETAE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (SETGE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (SETAE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(Op386LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValue386_OpGetG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetG mem)
+ // cond:
+ // result: (LoweredGetG mem)
+ for {
+ mem := v.Args[0]
+ v.reset(Op386LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(Op386CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (SETG (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (SETA (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (SETG (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (SETA (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (SETG (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (SETA (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (HMULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (HMULWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (HMULB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (HMULBU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULBU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (SETB (CMPL idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil p)
+ // cond:
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v.Args[0]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (SETBE (CMPL idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (SETL (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (SETB (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (SETL (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (SETB (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (SETL (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (SETB (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (ROLWconst <t> [c&15] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLWconst)
+ v.Type = t
+ v.AuxInt = c & 15
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 <t> x [c])
+ // cond:
+ // result: (ROLLconst <t> [c&31] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLLconst)
+ v.Type = t
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (ROLBconst <t> [c&7] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLBconst)
+ v.Type = t
+ v.AuxInt = c & 7
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MODW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (MODWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MODL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (MODLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4]) (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4]) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
+ v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 4
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 4
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(Op386DUFFCOPY)
+ v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
+ // result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
+ break
+ }
+ v.reset(Op386REPMOVSL)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+ v0.AuxInt = SizeAndAlign(s).Size() / 4
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32uhilo x y)
+ // cond:
+ // result: (MULLQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULLQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond: !config.use387
+ // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ if !(!config.use387) {
+ break
+ }
+ v.reset(Op386PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Neg32F x)
+ // cond: config.use387
+ // result: (FCHS x)
+ for {
+ x := v.Args[0]
+ if !(config.use387) {
+ break
+ }
+ v.reset(Op386FCHS)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond: !config.use387
+ // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ if !(!config.use387) {
+ break
+ }
+ v.reset(Op386PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Neg64F x)
+ // cond: config.use387
+ // result: (FCHS x)
+ for {
+ x := v.Args[0]
+ if !(config.use387) {
+ break
+ }
+ v.reset(Op386FCHS)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORLconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDLconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValue386_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHRWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SARWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SARWconst x [15])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = 15
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHRLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SARLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SARLconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHRBconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SARBconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SARBconst x [7])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = 7
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVWLSX x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386MOVWLSX)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBLSX x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386MOVBLSX)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBLSX x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386MOVBLSX)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpSignmask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Signmask x)
+ // cond:
+ // result: (SARLconst x [31])
+ for {
+ x := v.Args[0]
+ v.reset(Op386SARLconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (SQRTSD x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386SQRTSD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(Op386CALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (MOVSDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (MOVSSstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond:
+ // result: (MOVLstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVLstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386MOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUBL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SUBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUBL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SUBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (SUBSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SUBSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpSub32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32carry x y)
+ // cond:
+ // result: (SUBLcarry x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SUBLcarry)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpSub32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32withcarry x y c)
+ // cond:
+ // result: (SBBL x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(Op386SBBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValue386_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (SUBSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SUBSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUBL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SUBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUBL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SUBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpXor16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386XORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpXor32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386XORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpXor8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386XORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpZero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zero [s] _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVLstoreconst [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = makeValAndOff(0, 2)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVWstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = makeValAndOff(0, 4)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = makeValAndOff(0, 4)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(0, 3)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size()%4 != 0 && SizeAndAlign(s).Size() > 4
+ // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst destptr [SizeAndAlign(s).Size()%4]) (MOVLstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size()%4 != 0 && SizeAndAlign(s).Size() > 4) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
+ v0 := b.NewValue0(v.Line, Op386ADDLconst, config.fe.TypeUInt32())
+ v0.AuxInt = SizeAndAlign(s).Size() % 4
+ v0.AddArg(destptr)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(destptr)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(0, 4)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 12
+ // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 12) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(0, 8)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v0.AuxInt = makeValAndOff(0, 4)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(destptr)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 16
+ // result: (MOVLstoreconst [makeValAndOff(0,12)] destptr (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 16) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = makeValAndOff(0, 12)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v0.AuxInt = makeValAndOff(0, 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v1.AuxInt = makeValAndOff(0, 4)
+ v1.AddArg(destptr)
+ v2 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
+ v2.AuxInt = 0
+ v2.AddArg(destptr)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [1*(128-SizeAndAlign(s).Size()/4)] destptr (MOVLconst [0]) mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(Op386DUFFZERO)
+ v.AuxInt = 1 * (128 - SizeAndAlign(s).Size()/4)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: (SizeAndAlign(s).Size() > 4*128 || (config.noDuffDevice && SizeAndAlign(s).Size() > 16)) && SizeAndAlign(s).Size()%4 == 0
+ // result: (REPSTOSL destptr (MOVLconst [SizeAndAlign(s).Size()/4]) (MOVLconst [0]) mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !((SizeAndAlign(s).Size() > 4*128 || (config.noDuffDevice && SizeAndAlign(s).Size() > 16)) && SizeAndAlign(s).Size()%4 == 0) {
+ break
+ }
+ v.reset(Op386REPSTOSL)
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+ v0.AuxInt = SizeAndAlign(s).Size() / 4
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpZeroExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVWLZX x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386MOVWLZX)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpZeroExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBLZX x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386MOVBLZX)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpZeroExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBLZX x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386MOVBLZX)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpZeromask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zeromask <t> x)
+ // cond:
+ // result: (XORLconst [-1] (SBBLcarrymask <t> (CMPL x (MOVLconst [1]))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = -1
+ v0 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v1 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+ v2.AuxInt = 1
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlock386(b *Block) bool {
+ switch b.Kind {
+ case Block386EQ:
+ // match: (EQ (InvertFlags cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386EQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ case Block386GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386LE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ case Block386GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386LT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETL {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386LT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETLE cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETLE {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386LE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETG cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETG {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386GT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETGE cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETGE {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386GE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETEQ cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETEQ {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386EQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETNE cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETNE {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386NE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETB cmp) yes no)
+ // cond:
+ // result: (ULT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETB {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386ULT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETBE cmp) yes no)
+ // cond:
+ // result: (ULE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETBE {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386ULE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETA cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETA {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETAE cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETAE {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETGF cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETGF {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETGEF cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETGEF {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETEQF cmp) yes no)
+ // cond:
+ // result: (EQF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETEQF {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386EQF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (SETNEF cmp) yes no)
+ // cond:
+ // result: (NEF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386SETNEF {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386NEF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If cond yes no)
+ // cond:
+ // result: (NE (TESTB cond cond) yes no)
+ for {
+ v := b.Control
+ cond := b.Control
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386NE
+ v0 := b.NewValue0(v.Line, Op386TESTB, TypeFlags)
+ v0.AddArg(cond)
+ v0.AddArg(cond)
+ b.SetControl(v0)
+ _ = yes
+ _ = no
+ return true
+ }
+ case Block386LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386GE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ case Block386LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386GT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ case Block386NE:
+ // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETL {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETL {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386LT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETLE {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETLE {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386LE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETG {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETG {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386GT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETGE {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETGE {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386GE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETEQ {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETEQ {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386EQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETNE {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETNE {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386NE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
+ // cond:
+ // result: (ULT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETB {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETB {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386ULT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
+ // cond:
+ // result: (ULE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETBE {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETBE {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386ULE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETA {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETA {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETAE {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETAE {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETGF {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETGF {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETGEF {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETGEF {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
+ // cond:
+ // result: (EQF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETEQF {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETEQF {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386EQF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
+ // cond:
+ // result: (NEF cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386TESTB {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != Op386SETNEF {
+ break
+ }
+ cmp := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386SETNEF {
+ break
+ }
+ if cmp != v_1.Args[0] {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386NEF
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386NE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ case Block386UGE:
+ // match: (UGE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386ULE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ case Block386UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386ULT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ case Block386ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ case Block386ULT:
+ // match: (ULT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = Block386UGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != Op386FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index a2b9e15a4f..d09e706fdd 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -24,6 +24,252 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpAMD64ANDQ(v, config)
case OpAMD64ANDQconst:
return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
+ case OpAMD64CMOVLEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
+ case OpAMD64CMOVQEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config)
+ case OpAMD64CMOVWEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config)
+ case OpAMD64CMPB:
+ return rewriteValueAMD64_OpAMD64CMPB(v, config)
+ case OpAMD64CMPBconst:
+ return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
+ case OpAMD64CMPL:
+ return rewriteValueAMD64_OpAMD64CMPL(v, config)
+ case OpAMD64CMPLconst:
+ return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
+ case OpAMD64CMPQ:
+ return rewriteValueAMD64_OpAMD64CMPQ(v, config)
+ case OpAMD64CMPQconst:
+ return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
+ case OpAMD64CMPW:
+ return rewriteValueAMD64_OpAMD64CMPW(v, config)
+ case OpAMD64CMPWconst:
+ return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
+ case OpAMD64LEAL:
+ return rewriteValueAMD64_OpAMD64LEAL(v, config)
+ case OpAMD64LEAQ:
+ return rewriteValueAMD64_OpAMD64LEAQ(v, config)
+ case OpAMD64LEAQ1:
+ return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
+ case OpAMD64LEAQ2:
+ return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
+ case OpAMD64LEAQ4:
+ return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
+ case OpAMD64LEAQ8:
+ return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
+ case OpAMD64MOVBQSX:
+ return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
+ case OpAMD64MOVBQSXload:
+ return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
+ case OpAMD64MOVBQZX:
+ return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
+ case OpAMD64MOVBload:
+ return rewriteValueAMD64_OpAMD64MOVBload(v, config)
+ case OpAMD64MOVBloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
+ case OpAMD64MOVBstore:
+ return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
+ case OpAMD64MOVBstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
+ case OpAMD64MOVBstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
+ case OpAMD64MOVBstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
+ case OpAMD64MOVLQSX:
+ return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
+ case OpAMD64MOVLQSXload:
+ return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
+ case OpAMD64MOVLQZX:
+ return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
+ case OpAMD64MOVLload:
+ return rewriteValueAMD64_OpAMD64MOVLload(v, config)
+ case OpAMD64MOVLloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
+ case OpAMD64MOVLloadidx4:
+ return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
+ case OpAMD64MOVLstore:
+ return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
+ case OpAMD64MOVLstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
+ case OpAMD64MOVLstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
+ case OpAMD64MOVLstoreconstidx4:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
+ case OpAMD64MOVLstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
+ case OpAMD64MOVLstoreidx4:
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
+ case OpAMD64MOVOload:
+ return rewriteValueAMD64_OpAMD64MOVOload(v, config)
+ case OpAMD64MOVOstore:
+ return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
+ case OpAMD64MOVQload:
+ return rewriteValueAMD64_OpAMD64MOVQload(v, config)
+ case OpAMD64MOVQloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
+ case OpAMD64MOVQloadidx8:
+ return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
+ case OpAMD64MOVQstore:
+ return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
+ case OpAMD64MOVQstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
+ case OpAMD64MOVQstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
+ case OpAMD64MOVQstoreconstidx8:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
+ case OpAMD64MOVQstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
+ case OpAMD64MOVQstoreidx8:
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
+ case OpAMD64MOVSDload:
+ return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
+ case OpAMD64MOVSDloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
+ case OpAMD64MOVSDloadidx8:
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
+ case OpAMD64MOVSDstore:
+ return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
+ case OpAMD64MOVSDstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
+ case OpAMD64MOVSDstoreidx8:
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
+ case OpAMD64MOVSSload:
+ return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
+ case OpAMD64MOVSSloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
+ case OpAMD64MOVSSloadidx4:
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
+ case OpAMD64MOVSSstore:
+ return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
+ case OpAMD64MOVSSstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
+ case OpAMD64MOVSSstoreidx4:
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
+ case OpAMD64MOVWQSX:
+ return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
+ case OpAMD64MOVWQSXload:
+ return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
+ case OpAMD64MOVWQZX:
+ return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
+ case OpAMD64MOVWload:
+ return rewriteValueAMD64_OpAMD64MOVWload(v, config)
+ case OpAMD64MOVWloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
+ case OpAMD64MOVWloadidx2:
+ return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
+ case OpAMD64MOVWstore:
+ return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
+ case OpAMD64MOVWstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
+ case OpAMD64MOVWstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
+ case OpAMD64MOVWstoreconstidx2:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
+ case OpAMD64MOVWstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
+ case OpAMD64MOVWstoreidx2:
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
+ case OpAMD64MULL:
+ return rewriteValueAMD64_OpAMD64MULL(v, config)
+ case OpAMD64MULLconst:
+ return rewriteValueAMD64_OpAMD64MULLconst(v, config)
+ case OpAMD64MULQ:
+ return rewriteValueAMD64_OpAMD64MULQ(v, config)
+ case OpAMD64MULQconst:
+ return rewriteValueAMD64_OpAMD64MULQconst(v, config)
+ case OpAMD64NEGL:
+ return rewriteValueAMD64_OpAMD64NEGL(v, config)
+ case OpAMD64NEGQ:
+ return rewriteValueAMD64_OpAMD64NEGQ(v, config)
+ case OpAMD64NOTL:
+ return rewriteValueAMD64_OpAMD64NOTL(v, config)
+ case OpAMD64NOTQ:
+ return rewriteValueAMD64_OpAMD64NOTQ(v, config)
+ case OpAMD64ORL:
+ return rewriteValueAMD64_OpAMD64ORL(v, config)
+ case OpAMD64ORLconst:
+ return rewriteValueAMD64_OpAMD64ORLconst(v, config)
+ case OpAMD64ORQ:
+ return rewriteValueAMD64_OpAMD64ORQ(v, config)
+ case OpAMD64ORQconst:
+ return rewriteValueAMD64_OpAMD64ORQconst(v, config)
+ case OpAMD64ROLBconst:
+ return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
+ case OpAMD64ROLLconst:
+ return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
+ case OpAMD64ROLQconst:
+ return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
+ case OpAMD64ROLWconst:
+ return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
+ case OpAMD64SARB:
+ return rewriteValueAMD64_OpAMD64SARB(v, config)
+ case OpAMD64SARBconst:
+ return rewriteValueAMD64_OpAMD64SARBconst(v, config)
+ case OpAMD64SARL:
+ return rewriteValueAMD64_OpAMD64SARL(v, config)
+ case OpAMD64SARLconst:
+ return rewriteValueAMD64_OpAMD64SARLconst(v, config)
+ case OpAMD64SARQ:
+ return rewriteValueAMD64_OpAMD64SARQ(v, config)
+ case OpAMD64SARQconst:
+ return rewriteValueAMD64_OpAMD64SARQconst(v, config)
+ case OpAMD64SARW:
+ return rewriteValueAMD64_OpAMD64SARW(v, config)
+ case OpAMD64SARWconst:
+ return rewriteValueAMD64_OpAMD64SARWconst(v, config)
+ case OpAMD64SBBLcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
+ case OpAMD64SBBQcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
+ case OpAMD64SETA:
+ return rewriteValueAMD64_OpAMD64SETA(v, config)
+ case OpAMD64SETAE:
+ return rewriteValueAMD64_OpAMD64SETAE(v, config)
+ case OpAMD64SETB:
+ return rewriteValueAMD64_OpAMD64SETB(v, config)
+ case OpAMD64SETBE:
+ return rewriteValueAMD64_OpAMD64SETBE(v, config)
+ case OpAMD64SETEQ:
+ return rewriteValueAMD64_OpAMD64SETEQ(v, config)
+ case OpAMD64SETG:
+ return rewriteValueAMD64_OpAMD64SETG(v, config)
+ case OpAMD64SETGE:
+ return rewriteValueAMD64_OpAMD64SETGE(v, config)
+ case OpAMD64SETL:
+ return rewriteValueAMD64_OpAMD64SETL(v, config)
+ case OpAMD64SETLE:
+ return rewriteValueAMD64_OpAMD64SETLE(v, config)
+ case OpAMD64SETNE:
+ return rewriteValueAMD64_OpAMD64SETNE(v, config)
+ case OpAMD64SHLL:
+ return rewriteValueAMD64_OpAMD64SHLL(v, config)
+ case OpAMD64SHLQ:
+ return rewriteValueAMD64_OpAMD64SHLQ(v, config)
+ case OpAMD64SHRB:
+ return rewriteValueAMD64_OpAMD64SHRB(v, config)
+ case OpAMD64SHRL:
+ return rewriteValueAMD64_OpAMD64SHRL(v, config)
+ case OpAMD64SHRQ:
+ return rewriteValueAMD64_OpAMD64SHRQ(v, config)
+ case OpAMD64SHRW:
+ return rewriteValueAMD64_OpAMD64SHRW(v, config)
+ case OpAMD64SUBL:
+ return rewriteValueAMD64_OpAMD64SUBL(v, config)
+ case OpAMD64SUBLconst:
+ return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
+ case OpAMD64SUBQ:
+ return rewriteValueAMD64_OpAMD64SUBQ(v, config)
+ case OpAMD64SUBQconst:
+ return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
+ case OpAMD64XORL:
+ return rewriteValueAMD64_OpAMD64XORL(v, config)
+ case OpAMD64XORLconst:
+ return rewriteValueAMD64_OpAMD64XORLconst(v, config)
+ case OpAMD64XORQ:
+ return rewriteValueAMD64_OpAMD64XORQ(v, config)
+ case OpAMD64XORQconst:
+ return rewriteValueAMD64_OpAMD64XORQconst(v, config)
case OpAdd16:
return rewriteValueAMD64_OpAdd16(v, config)
case OpAdd32:
@@ -56,28 +302,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpBswap32(v, config)
case OpBswap64:
return rewriteValueAMD64_OpBswap64(v, config)
- case OpAMD64CMOVLEQconst:
- return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
- case OpAMD64CMOVQEQconst:
- return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config)
- case OpAMD64CMOVWEQconst:
- return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config)
- case OpAMD64CMPB:
- return rewriteValueAMD64_OpAMD64CMPB(v, config)
- case OpAMD64CMPBconst:
- return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
- case OpAMD64CMPL:
- return rewriteValueAMD64_OpAMD64CMPL(v, config)
- case OpAMD64CMPLconst:
- return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
- case OpAMD64CMPQ:
- return rewriteValueAMD64_OpAMD64CMPQ(v, config)
- case OpAMD64CMPQconst:
- return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
- case OpAMD64CMPW:
- return rewriteValueAMD64_OpAMD64CMPW(v, config)
- case OpAMD64CMPWconst:
- return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
case OpClosureCall:
return rewriteValueAMD64_OpClosureCall(v, config)
case OpCom16:
@@ -232,8 +456,8 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpHmul8(v, config)
case OpHmul8u:
return rewriteValueAMD64_OpHmul8u(v, config)
- case OpITab:
- return rewriteValueAMD64_OpITab(v, config)
+ case OpInt64Hi:
+ return rewriteValueAMD64_OpInt64Hi(v, config)
case OpInterCall:
return rewriteValueAMD64_OpInterCall(v, config)
case OpIsInBounds:
@@ -242,16 +466,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpIsNonNil(v, config)
case OpIsSliceInBounds:
return rewriteValueAMD64_OpIsSliceInBounds(v, config)
- case OpAMD64LEAQ:
- return rewriteValueAMD64_OpAMD64LEAQ(v, config)
- case OpAMD64LEAQ1:
- return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
- case OpAMD64LEAQ2:
- return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
- case OpAMD64LEAQ4:
- return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
- case OpAMD64LEAQ8:
- return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
case OpLeq16:
return rewriteValueAMD64_OpLeq16(v, config)
case OpLeq16U:
@@ -334,126 +548,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValueAMD64_OpLsh8x8(v, config)
- case OpAMD64MOVBQSX:
- return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
- case OpAMD64MOVBQSXload:
- return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
- case OpAMD64MOVBQZX:
- return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
- case OpAMD64MOVBload:
- return rewriteValueAMD64_OpAMD64MOVBload(v, config)
- case OpAMD64MOVBloadidx1:
- return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
- case OpAMD64MOVBstore:
- return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
- case OpAMD64MOVBstoreconst:
- return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
- case OpAMD64MOVBstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
- case OpAMD64MOVBstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
- case OpAMD64MOVLQSX:
- return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
- case OpAMD64MOVLQSXload:
- return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
- case OpAMD64MOVLQZX:
- return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
- case OpAMD64MOVLload:
- return rewriteValueAMD64_OpAMD64MOVLload(v, config)
- case OpAMD64MOVLloadidx1:
- return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
- case OpAMD64MOVLloadidx4:
- return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
- case OpAMD64MOVLstore:
- return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
- case OpAMD64MOVLstoreconst:
- return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
- case OpAMD64MOVLstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
- case OpAMD64MOVLstoreconstidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
- case OpAMD64MOVLstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
- case OpAMD64MOVLstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
- case OpAMD64MOVOload:
- return rewriteValueAMD64_OpAMD64MOVOload(v, config)
- case OpAMD64MOVOstore:
- return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
- case OpAMD64MOVQload:
- return rewriteValueAMD64_OpAMD64MOVQload(v, config)
- case OpAMD64MOVQloadidx1:
- return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
- case OpAMD64MOVQloadidx8:
- return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
- case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
- case OpAMD64MOVQstoreconst:
- return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
- case OpAMD64MOVQstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
- case OpAMD64MOVQstoreconstidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
- case OpAMD64MOVQstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
- case OpAMD64MOVQstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
- case OpAMD64MOVSDload:
- return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
- case OpAMD64MOVSDloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
- case OpAMD64MOVSDloadidx8:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
- case OpAMD64MOVSDstore:
- return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
- case OpAMD64MOVSDstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
- case OpAMD64MOVSDstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
- case OpAMD64MOVSSload:
- return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
- case OpAMD64MOVSSloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
- case OpAMD64MOVSSloadidx4:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
- case OpAMD64MOVSSstore:
- return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
- case OpAMD64MOVSSstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
- case OpAMD64MOVSSstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
- case OpAMD64MOVWQSX:
- return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
- case OpAMD64MOVWQSXload:
- return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
- case OpAMD64MOVWQZX:
- return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
- case OpAMD64MOVWload:
- return rewriteValueAMD64_OpAMD64MOVWload(v, config)
- case OpAMD64MOVWloadidx1:
- return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
- case OpAMD64MOVWloadidx2:
- return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
- case OpAMD64MOVWstore:
- return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
- case OpAMD64MOVWstoreconst:
- return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
- case OpAMD64MOVWstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
- case OpAMD64MOVWstoreconstidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
- case OpAMD64MOVWstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
- case OpAMD64MOVWstoreidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
- case OpAMD64MULL:
- return rewriteValueAMD64_OpAMD64MULL(v, config)
- case OpAMD64MULLconst:
- return rewriteValueAMD64_OpAMD64MULLconst(v, config)
- case OpAMD64MULQ:
- return rewriteValueAMD64_OpAMD64MULQ(v, config)
- case OpAMD64MULQconst:
- return rewriteValueAMD64_OpAMD64MULQconst(v, config)
case OpMod16:
return rewriteValueAMD64_OpMod16(v, config)
case OpMod16u:
@@ -484,14 +578,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpMul64F(v, config)
case OpMul8:
return rewriteValueAMD64_OpMul8(v, config)
- case OpAMD64NEGL:
- return rewriteValueAMD64_OpAMD64NEGL(v, config)
- case OpAMD64NEGQ:
- return rewriteValueAMD64_OpAMD64NEGQ(v, config)
- case OpAMD64NOTL:
- return rewriteValueAMD64_OpAMD64NOTL(v, config)
- case OpAMD64NOTQ:
- return rewriteValueAMD64_OpAMD64NOTQ(v, config)
case OpNeg16:
return rewriteValueAMD64_OpNeg16(v, config)
case OpNeg32:
@@ -524,14 +610,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpNilCheck(v, config)
case OpNot:
return rewriteValueAMD64_OpNot(v, config)
- case OpAMD64ORL:
- return rewriteValueAMD64_OpAMD64ORL(v, config)
- case OpAMD64ORLconst:
- return rewriteValueAMD64_OpAMD64ORLconst(v, config)
- case OpAMD64ORQ:
- return rewriteValueAMD64_OpAMD64ORQ(v, config)
- case OpAMD64ORQconst:
- return rewriteValueAMD64_OpAMD64ORQconst(v, config)
case OpOffPtr:
return rewriteValueAMD64_OpOffPtr(v, config)
case OpOr16:
@@ -608,66 +686,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValueAMD64_OpRsh8x8(v, config)
- case OpAMD64SARB:
- return rewriteValueAMD64_OpAMD64SARB(v, config)
- case OpAMD64SARBconst:
- return rewriteValueAMD64_OpAMD64SARBconst(v, config)
- case OpAMD64SARL:
- return rewriteValueAMD64_OpAMD64SARL(v, config)
- case OpAMD64SARLconst:
- return rewriteValueAMD64_OpAMD64SARLconst(v, config)
- case OpAMD64SARQ:
- return rewriteValueAMD64_OpAMD64SARQ(v, config)
- case OpAMD64SARQconst:
- return rewriteValueAMD64_OpAMD64SARQconst(v, config)
- case OpAMD64SARW:
- return rewriteValueAMD64_OpAMD64SARW(v, config)
- case OpAMD64SARWconst:
- return rewriteValueAMD64_OpAMD64SARWconst(v, config)
- case OpAMD64SBBLcarrymask:
- return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
- case OpAMD64SBBQcarrymask:
- return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
- case OpAMD64SETA:
- return rewriteValueAMD64_OpAMD64SETA(v, config)
- case OpAMD64SETAE:
- return rewriteValueAMD64_OpAMD64SETAE(v, config)
- case OpAMD64SETB:
- return rewriteValueAMD64_OpAMD64SETB(v, config)
- case OpAMD64SETBE:
- return rewriteValueAMD64_OpAMD64SETBE(v, config)
- case OpAMD64SETEQ:
- return rewriteValueAMD64_OpAMD64SETEQ(v, config)
- case OpAMD64SETG:
- return rewriteValueAMD64_OpAMD64SETG(v, config)
- case OpAMD64SETGE:
- return rewriteValueAMD64_OpAMD64SETGE(v, config)
- case OpAMD64SETL:
- return rewriteValueAMD64_OpAMD64SETL(v, config)
- case OpAMD64SETLE:
- return rewriteValueAMD64_OpAMD64SETLE(v, config)
- case OpAMD64SETNE:
- return rewriteValueAMD64_OpAMD64SETNE(v, config)
- case OpAMD64SHLL:
- return rewriteValueAMD64_OpAMD64SHLL(v, config)
- case OpAMD64SHLQ:
- return rewriteValueAMD64_OpAMD64SHLQ(v, config)
- case OpAMD64SHRB:
- return rewriteValueAMD64_OpAMD64SHRB(v, config)
- case OpAMD64SHRL:
- return rewriteValueAMD64_OpAMD64SHRL(v, config)
- case OpAMD64SHRQ:
- return rewriteValueAMD64_OpAMD64SHRQ(v, config)
- case OpAMD64SHRW:
- return rewriteValueAMD64_OpAMD64SHRW(v, config)
- case OpAMD64SUBL:
- return rewriteValueAMD64_OpAMD64SUBL(v, config)
- case OpAMD64SUBLconst:
- return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
- case OpAMD64SUBQ:
- return rewriteValueAMD64_OpAMD64SUBQ(v, config)
- case OpAMD64SUBQconst:
- return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
case OpSignExt16to32:
return rewriteValueAMD64_OpSignExt16to32(v, config)
case OpSignExt16to64:
@@ -712,14 +730,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
return rewriteValueAMD64_OpTrunc64to32(v, config)
case OpTrunc64to8:
return rewriteValueAMD64_OpTrunc64to8(v, config)
- case OpAMD64XORL:
- return rewriteValueAMD64_OpAMD64XORL(v, config)
- case OpAMD64XORLconst:
- return rewriteValueAMD64_OpAMD64XORLconst(v, config)
- case OpAMD64XORQ:
- return rewriteValueAMD64_OpAMD64XORQ(v, config)
- case OpAMD64XORQconst:
- return rewriteValueAMD64_OpAMD64XORQconst(v, config)
case OpXor16:
return rewriteValueAMD64_OpXor16(v, config)
case OpXor32:
@@ -842,6 +852,27 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(c+d)
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ d := v_0.AuxInt
+ s := v_0.Aux
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool {
@@ -1525,242 +1556,6 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64 x y)
- // cond:
- // result: (ADDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (LEAQ {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(OpAMD64LEAQ)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And64 x y)
- // cond:
- // result: (ANDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Avg64u x y)
- // cond:
- // result: (AVGQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64AVGQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap32 x)
- // cond:
- // result: (BSWAPL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64BSWAPL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap64 x)
- // cond:
- // result: (BSWAPQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64BSWAPQ)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1768,28 +1563,28 @@ func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVLNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVLNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVLEQconst _ (FlagEQ) [c])
// cond:
// result: (Const32 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst32)
v.AuxInt = c
return true
@@ -1859,28 +1654,28 @@ func rewriteValueAMD64_OpAMD64CMOVQEQconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVQNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVQNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVQEQconst _ (FlagEQ) [c])
// cond:
// result: (Const64 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst64)
v.AuxInt = c
return true
@@ -1950,28 +1745,28 @@ func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool {
// cond:
// result: (CMOVWNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVWNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVWEQconst _ (FlagEQ) [c])
// cond:
// result: (Const16 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst16)
v.AuxInt = c
return true
@@ -2048,8 +1843,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64CMPBconst)
- v.AddArg(x)
v.AuxInt = int64(int8(c))
+ v.AddArg(x)
return true
}
// match: (CMPB (MOVLconst [c]) x)
@@ -2064,8 +1859,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int8(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2078,12 +1873,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)==int8(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) == int8(y)) {
break
}
@@ -2094,12 +1889,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -2110,12 +1905,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -2126,12 +1921,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -2142,12 +1937,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -2158,12 +1953,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond: 0 <= int8(m) && int8(m) < int8(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int8(m) && int8(m) < int8(n)) {
break
}
@@ -2174,15 +1969,15 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTB x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTB)
v.AddArg(x)
v.AddArg(y)
@@ -2192,15 +1987,15 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTBconst [int64(int8(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTBconst)
v.AuxInt = int64(int8(c))
v.AddArg(x)
@@ -2210,10 +2005,10 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
// cond:
// result: (TESTB x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTB)
v.AddArg(x)
v.AddArg(x)
@@ -2235,8 +2030,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64CMPLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPL (MOVLconst [c]) x)
@@ -2251,8 +2046,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2265,12 +2060,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -2281,12 +2076,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -2297,12 +2092,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -2313,12 +2108,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -2329,12 +2124,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -2345,12 +2140,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SHRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
break
}
@@ -2361,12 +2156,12 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -2377,15 +2172,15 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTL x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTL)
v.AddArg(x)
v.AddArg(y)
@@ -2395,15 +2190,15 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTLconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTLconst)
v.AuxInt = c
v.AddArg(x)
@@ -2413,10 +2208,10 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
// cond:
// result: (TESTL x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTL)
v.AddArg(x)
v.AddArg(x)
@@ -2441,8 +2236,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
break
}
v.reset(OpAMD64CMPQconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPQ (MOVQconst [c]) x)
@@ -2460,8 +2255,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2474,12 +2269,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x==y
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x == y) {
break
}
@@ -2490,12 +2285,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x<y && uint64(x)<uint64(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x < y && uint64(x) < uint64(y)) {
break
}
@@ -2506,12 +2301,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x<y && uint64(x)>uint64(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x < y && uint64(x) > uint64(y)) {
break
}
@@ -2522,12 +2317,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x>y && uint64(x)<uint64(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x > y && uint64(x) < uint64(y)) {
break
}
@@ -2538,12 +2333,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: x>y && uint64(x)>uint64(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x > y && uint64(x) > uint64(y)) {
break
}
@@ -2554,11 +2349,11 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0xFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVBQZX {
break
}
- c := v.AuxInt
if !(0xFF < c) {
break
}
@@ -2569,11 +2364,11 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0xFFFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVWQZX {
break
}
- c := v.AuxInt
if !(0xFFFF < c) {
break
}
@@ -2584,11 +2379,11 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0xFFFFFFFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLQZX {
break
}
- c := v.AuxInt
if !(0xFFFFFFFF < c) {
break
}
@@ -2599,12 +2394,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SHRQconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
break
}
@@ -2615,12 +2410,12 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond: 0 <= m && m < n
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= m && m < n) {
break
}
@@ -2631,15 +2426,15 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond:
// result: (TESTQ x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQ {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTQ)
v.AddArg(x)
v.AddArg(y)
@@ -2649,15 +2444,15 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond:
// result: (TESTQconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTQconst)
v.AuxInt = c
v.AddArg(x)
@@ -2667,10 +2462,10 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
// cond:
// result: (TESTQ x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTQ)
v.AddArg(x)
v.AddArg(x)
@@ -2692,8 +2487,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64CMPWconst)
- v.AddArg(x)
v.AuxInt = int64(int16(c))
+ v.AddArg(x)
return true
}
// match: (CMPW (MOVLconst [c]) x)
@@ -2708,8 +2503,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int16(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2722,12 +2517,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)==int16(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) == int16(y)) {
break
}
@@ -2738,12 +2533,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -2754,12 +2549,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -2770,12 +2565,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -2786,12 +2581,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -2802,12 +2597,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond: 0 <= int16(m) && int16(m) < int16(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int16(m) && int16(m) < int16(n)) {
break
}
@@ -2818,15 +2613,15 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTW x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(y)
@@ -2836,15 +2631,15 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTWconst [int64(int16(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTWconst)
v.AuxInt = int64(int16(c))
v.AddArg(x)
@@ -2854,10 +2649,10 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
// cond:
// result: (TESTW x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(x)
@@ -2865,1285 +2660,32 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64CALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com64 x)
- // cond:
- // result: (NOTQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVSSconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVSSconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64 [val])
- // cond:
- // result: (MOVQconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVSDconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVSDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVLconst [b])
- for {
- b := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVQconst [0])
- for {
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert <t> x mem)
- // cond:
- // result: (MOVQconvert <t> x mem)
- for {
- t := v.Type
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64MOVQconvert)
- v.Type = t
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz16 <t> x)
- // cond:
- // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVWEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz32 <t> x)
- // cond:
- // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVLEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 32
- return true
- }
-}
-func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz64 <t> x)
- // cond:
- // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVQEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 64
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (CVTTSS2SL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSS2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64 x)
- // cond:
- // result: (CVTTSS2SQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSS2SQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (CVTSS2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSS2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (CVTSL2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSL2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (CVTSL2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSL2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (CVTTSD2SL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSD2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (CVTSD2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSD2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto64 x)
- // cond:
- // result: (CVTTSD2SQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSD2SQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Cvt64to32F x)
- // cond:
- // result: (CVTSQ2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSQ2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64to64F x)
- // cond:
- // result: (CVTSQ2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSQ2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpAMD64CALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (DIVW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (DIVWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (DIVL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (DIVLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64 x y)
- // cond:
- // result: (DIVQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64u x y)
- // cond:
- // result: (DIVQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVW)
- v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVWU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (SETEQ (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (SETEQ (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (SETEQF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64 x y)
- // cond:
- // result: (SETEQ (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (SETEQF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (SETEQ (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (SETGE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (SETAE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (SETGE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (SETAE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64 x y)
- // cond:
- // result: (SETGE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64U x y)
- // cond:
- // result: (SETAE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (SETGE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (SETAE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(OpAMD64LoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetG mem)
- // cond:
- // result: (LoweredGetG mem)
- for {
- mem := v.Args[0]
- v.reset(OpAMD64LoweredGetG)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpAMD64CALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (SETG (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (SETA (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (SETG (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (SETGF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (SETA (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64 x y)
- // cond:
- // result: (SETG (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (SETGF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64U x y)
- // cond:
- // result: (SETA (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (SETG (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (SETA (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (HMULW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (HMULWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul64 x y)
- // cond:
- // result: (HMULQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul64u x y)
- // cond:
- // result: (HMULQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (HMULB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (HMULBU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULBU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpITab(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ITab (Load ptr mem))
- // cond:
- // result: (MOVQload ptr mem)
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (LEAL [c+d] {s} x)
for {
+ c := v.AuxInt
+ s := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpLoad {
+ if v_0.Op != OpAMD64ADDLconst {
break
}
- ptr := v_0.Args[0]
- mem := v_0.Args[1]
- v.reset(OpAMD64MOVQload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
return true
}
return false
}
-func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64CALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (SETB (CMPQ idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil p)
- // cond:
- // result: (SETNE (TESTQ p p))
- for {
- p := v.Args[0]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
- v0.AddArg(p)
- v0.AddArg(p)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (SETBE (CMPQ idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4832,893 +3374,6 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (SETLE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (SETBE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (SETLE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (SETBE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64 x y)
- // cond:
- // result: (SETLE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64U x y)
- // cond:
- // result: (SETBE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (SETLE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (SETBE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (SETL (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (SETB (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (SETL (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (SETGF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (SETB (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64 x y)
- // cond:
- // result: (SETL (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (SETGF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64U x y)
- // cond:
- // result: (SETB (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (SETL (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (SETB (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (MOVQload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpAMD64MOVQload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitInt(t)
- // result: (MOVLload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVLload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is16BitInt(t)
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (t.IsBoolean() || is8BitInt(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean() || is8BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVSSload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(OpAMD64MOVSSload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVSDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(OpAMD64MOVSDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (ROLWconst <t> [c&15] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLWconst)
- v.Type = t
- v.AuxInt = c & 15
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 <t> x [c])
- // cond:
- // result: (ROLLconst <t> [c&31] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLLconst)
- v.Type = t
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot64 <t> x [c])
- // cond:
- // result: (ROLQconst <t> [c&63] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLQconst)
- v.Type = t
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (ROLBconst <t> [c&7] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLBconst)
- v.Type = t
- v.AuxInt = c & 7
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x16 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x32 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x64 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x8 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5991,6 +3646,53 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
@@ -6306,6 +4008,57 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
@@ -6434,6 +4187,53 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
@@ -6998,6 +4798,53 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
@@ -7414,6 +5261,57 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
@@ -7571,6 +5469,53 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
@@ -8369,6 +6314,53 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVQload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
@@ -8652,6 +6644,57 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVQstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
@@ -8777,6 +6820,53 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
@@ -10298,6 +8388,53 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool {
@@ -10714,6 +8851,57 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
@@ -10868,6 +9056,53 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+ for {
+ sc := v.AuxInt
+ s := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(sc).canAdd(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = ValAndOff(sc).add(off)
+ v.Aux = s
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
@@ -11441,6 +9676,22 @@ func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // cond:
+ // result: (MULLconst [int64(int32(c * d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MULLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int64(int32(c * d))
+ v.AddArg(x)
+ return true
+ }
// match: (MULLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [int64(int32(c*d))])
@@ -11501,6 +9752,22 @@ func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MULQconst [c] (MULQconst [d] x))
+ // cond:
+ // result: (MULQconst [c * d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MULQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = c * d
+ v.AddArg(x)
+ return true
+ }
// match: (MULQconst [-1] x)
// cond:
// result: (NEGQ x)
@@ -11869,578 +10136,6 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (MODW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16u x y)
- // cond:
- // result: (MODWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32 x y)
- // cond:
- // result: (MODL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32u x y)
- // cond:
- // result: (MODLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod64 x y)
- // cond:
- // result: (MODQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod64u x y)
- // cond:
- // result: (MODQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODW)
- v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8u x y)
- // cond:
- // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MODWU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Move [0] _ _ mem)
- // cond:
- // result: mem
- for {
- if v.AuxInt != 0 {
- break
- }
- mem := v.Args[2]
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
- return true
- }
- // match: (Move [1] dst src mem)
- // cond:
- // result: (MOVBstore dst (MOVBload src mem) mem)
- for {
- if v.AuxInt != 1 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [2] dst src mem)
- // cond:
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- if v.AuxInt != 2 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [4] dst src mem)
- // cond:
- // result: (MOVLstore dst (MOVLload src mem) mem)
- for {
- if v.AuxInt != 4 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVLstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [8] dst src mem)
- // cond:
- // result: (MOVQstore dst (MOVQload src mem) mem)
- for {
- if v.AuxInt != 8 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVQstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [16] dst src mem)
- // cond:
- // result: (MOVOstore dst (MOVOload src mem) mem)
- for {
- if v.AuxInt != 16 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVOstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [3] dst src mem)
- // cond:
- // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
- for {
- if v.AuxInt != 3 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [5] dst src mem)
- // cond:
- // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- if v.AuxInt != 5 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [6] dst src mem)
- // cond:
- // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- if v.AuxInt != 6 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVWstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [7] dst src mem)
- // cond:
- // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- if v.AuxInt != 7 {
- break
- }
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64MOVLstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [size] dst src mem)
- // cond: size > 8 && size < 16
- // result: (MOVQstore [size-8] dst (MOVQload [size-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
- for {
- size := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(size > 8 && size < 16) {
- break
- }
- v.reset(OpAMD64MOVQstore)
- v.AuxInt = size - 8
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v0.AuxInt = size - 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [size] dst src mem)
- // cond: size > 16 && size%16 != 0 && size%16 <= 8
- // result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) (MOVQstore dst (MOVQload src mem) mem))
- for {
- size := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(size > 16 && size%16 != 0 && size%16 <= 8) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = size - size%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = size % 16
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = size % 16
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [size] dst src mem)
- // cond: size > 16 && size%16 != 0 && size%16 > 8
- // result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) (MOVOstore dst (MOVOload src mem) mem))
- for {
- size := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(size > 16 && size%16 != 0 && size%16 > 8) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = size - size%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = size % 16
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = size % 16
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [size] dst src mem)
- // cond: size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice
- // result: (DUFFCOPY [14*(64-size/16)] dst src mem)
- for {
- size := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice) {
- break
- }
- v.reset(OpAMD64DUFFCOPY)
- v.AuxInt = 14 * (64 - size/16)
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
- return true
- }
- // match: (Move [size] dst src mem)
- // cond: (size > 16*64 || config.noDuffDevice) && size%8 == 0
- // result: (REPMOVSQ dst src (MOVQconst [size/8]) mem)
- for {
- size := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !((size > 16*64 || config.noDuffDevice) && size%8 == 0) {
- break
- }
- v.reset(OpAMD64REPMOVSQ)
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
- v0.AuxInt = size / 8
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul16 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32F x y)
- // cond:
- // result: (MULSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64 x y)
- // cond:
- // result: (MULQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
- // cond:
- // result: (MULSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12513,255 +10208,6 @@ func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32F x)
- // cond:
- // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(OpAMD64PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64 x)
- // cond:
- // result: (NEGQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(OpAMD64PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq16 x y)
- // cond:
- // result: (SETNE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (SETNE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (SETNEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64 x y)
- // cond:
- // result: (SETNE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (SETNEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqPtr x y)
- // cond:
- // result: (SETNE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
- for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64LoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORLconst [1] x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = 1
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -13711,927 +11157,141 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
}
return false
}
-func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (OffPtr [off] ptr)
- // cond: is32Bit(off)
- // result: (ADDQconst [off] ptr)
+ // match: (ROLBconst [c] (ROLBconst [d] x))
+ // cond:
+ // result: (ROLBconst [(c+d)& 7] x)
for {
- off := v.AuxInt
- ptr := v.Args[0]
- if !(is32Bit(off)) {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLBconst {
break
}
- v.reset(OpAMD64ADDQconst)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDQ (MOVQconst [off]) ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(OpAMD64ADDQ)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
- v0.AuxInt = off
- v.AddArg(v0)
- v.AddArg(ptr)
- return true
- }
-}
-func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or16 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or64 x y)
- // cond:
- // result: (ORQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or8 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 <t> x y)
- // cond:
- // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x32 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 <t> x y)
- // cond:
- // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x8 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux16 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux32 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux64 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux8 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x16 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = (c + d) & 7
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
return true
}
-}
-func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x32 <t> x y)
+ // match: (ROLBconst [0] x)
// cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+ // result: x
for {
- t := v.Type
+ if v.AuxInt != 0 {
+ break
+ }
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
return true
}
+ return false
}
-func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh64x64 <t> x y)
+ // match: (ROLLconst [c] (ROLLconst [d] x))
// cond:
- // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+ // result: (ROLLconst [(c+d)&31] x)
for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = (c + d) & 31
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
return true
}
-}
-func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x8 <t> x y)
+ // match: (ROLLconst [0] x)
// cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ // result: x
for {
- t := v.Type
+ if v.AuxInt != 0 {
+ break
+ }
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
return true
}
+ return false
}
-func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh8x16 <t> x y)
+ // match: (ROLQconst [c] (ROLQconst [d] x))
// cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ // result: (ROLQconst [(c+d)&63] x)
for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = (c + d) & 63
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
return true
}
-}
-func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 <t> x y)
+ // match: (ROLQconst [0] x)
// cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ // result: x
for {
- t := v.Type
+ if v.AuxInt != 0 {
+ break
+ }
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
return true
}
+ return false
}
-func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Rsh8x64 <t> x y)
+ // match: (ROLWconst [c] (ROLWconst [d] x))
// cond:
- // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+ // result: (ROLWconst [(c+d)&15] x)
for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLWconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = (c + d) & 15
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
return true
}
-}
-func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x8 <t> x y)
+ // match: (ROLWconst [0] x)
// cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ // result: x
for {
- t := v.Type
+ if v.AuxInt != 0 {
+ break
+ }
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
return true
}
+ return false
}
func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
b := v.Block
@@ -16091,8 +12751,8 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
}
c := v_1.AuxInt
v.reset(OpAMD64SUBLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBL (MOVLconst [c]) x)
@@ -16107,8 +12767,8 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
x := v.Args[1]
v.reset(OpAMD64NEGL)
v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16172,8 +12832,8 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
break
}
v.reset(OpAMD64SUBQconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBQ (MOVQconst [c]) x)
@@ -16191,8 +12851,8 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
}
v.reset(OpAMD64NEGQ)
v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16244,12 +12904,12 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
// cond:
// result: (MOVQconst [d-c])
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
d := v_0.AuxInt
- c := v.AuxInt
v.reset(OpAMD64MOVQconst)
v.AuxInt = d - c
return true
@@ -16258,13 +12918,13 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
// cond: is32Bit(-c-d)
// result: (ADDQconst [-c-d] x)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SUBQconst {
break
}
- x := v_0.Args[0]
d := v_0.AuxInt
- c := v.AuxInt
+ x := v_0.Args[0]
if !(is32Bit(-c - d)) {
break
}
@@ -16275,6 +12935,4532 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORL x (MOVLconst [c]))
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL x x)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQ x x)
+ // cond:
+ // result: (MOVQconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // cond:
+ // result: (XORQconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond: config.PtrSize == 8
+ // result: (ADDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AddPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond: config.PtrSize == 8
+ // result: (LEAQ {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+ // match: (Addr {sym} base)
+ // cond: config.PtrSize == 4
+ // result: (LEAL {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (ANDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Avg64u x y)
+ // cond:
+ // result: (AVGQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64AVGQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap32 x)
+ // cond:
+ // result: (BSWAPL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64BSWAPL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap64 x)
+ // cond:
+ // result: (BSWAPQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64BSWAPQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (NOTQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVSSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVSSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVQconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVSDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVSDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVLconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond: config.PtrSize == 8
+ // result: (MOVQconst [0])
+ for {
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (ConstNil)
+ // cond: config.PtrSize == 4
+ // result: (MOVLconst [0])
+ for {
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond: config.PtrSize == 8
+ // result: (MOVQconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Convert <t> x mem)
+ // cond: config.PtrSize == 4
+ // result: (MOVLconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz16 <t> x)
+ // cond:
+ // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVWEQconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz32 <t> x)
+ // cond:
+ // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVLEQconst)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz64 <t> x)
+ // cond:
+ // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVQEQconst)
+ v.AuxInt = 64
+ v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (CVTTSS2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSS2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64 x)
+ // cond:
+ // result: (CVTTSS2SQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSS2SQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (CVTSS2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSS2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (CVTSL2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSL2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (CVTSL2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSL2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (CVTTSD2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSD2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (CVTSD2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSD2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto64 x)
+ // cond:
+ // result: (CVTTSD2SQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSD2SQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to32F x)
+ // cond:
+ // result: (CVTSQ2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSQ2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to64F x)
+ // cond:
+ // result: (CVTSQ2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSQ2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpAMD64CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (Select0 (DIVW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (Select0 (DIVWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (Select0 (DIVL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64DIVSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (Select0 (DIVLU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64 x y)
+ // cond:
+ // result: (Select0 (DIVQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64DIVSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64u x y)
+ // cond:
+ // result: (Select0 (DIVQU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond: config.PtrSize == 8
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (EqPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (SETGE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (SETAE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (SETGE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (SETAE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (SETGE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (SETAE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (SETGE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (SETAE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpAMD64LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetG mem)
+ // cond:
+ // result: (LoweredGetG mem)
+ for {
+ mem := v.Args[0]
+ v.reset(OpAMD64LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpAMD64CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (SETG (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (SETA (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (SETG (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (SETA (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (SETG (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (SETA (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (SETG (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (SETA (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (HMULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (HMULWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64 x y)
+ // cond:
+ // result: (HMULQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64u x y)
+ // cond:
+ // result: (HMULQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (HMULB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (HMULBU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULBU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Int64Hi x)
+ // cond:
+ // result: (SHRQconst [32] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = 32
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (SETB (CMPQ idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil p)
+ // cond: config.PtrSize == 8
+ // result: (SETNE (TESTQ p p))
+ for {
+ p := v.Args[0]
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (IsNonNil p)
+ // cond: config.PtrSize == 4
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v.Args[0]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (SETBE (CMPQ idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (SETLE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (SETBE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (SETL (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (SETB (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (SETL (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (SETB (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (SETL (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (SETB (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (SETL (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (SETB (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8)
+ // result: (MOVQload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4)
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (ROLWconst <t> [c&15] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLWconst)
+ v.Type = t
+ v.AuxInt = c & 15
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 <t> x [c])
+ // cond:
+ // result: (ROLLconst <t> [c&31] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLLconst)
+ v.Type = t
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot64 <t> x [c])
+ // cond:
+ // result: (ROLQconst <t> [c&63] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLQconst)
+ v.Type = t
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (ROLBconst <t> [c&7] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLBconst)
+ v.Type = t
+ v.AuxInt = c & 7
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (Select1 (DIVW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (Select1 (DIVWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (Select1 (DIVL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (Select1 (DIVLU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64 x y)
+ // cond:
+ // result: (Select1 (DIVQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64u x y)
+ // cond:
+ // result: (Select1 (DIVQU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVQstore dst (MOVQload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 16
+ // result: (MOVOstore dst (MOVOload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 16) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
+ // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = SizeAndAlign(s).Size() - 8
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0.AuxInt = SizeAndAlign(s).Size() - 8
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+ v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 16
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 16
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) (OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+ v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 16
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 16
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpAMD64DUFFCOPY)
+ v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
+ // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
+ break
+ }
+ v.reset(OpAMD64REPMOVSQ)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0.AuxInt = SizeAndAlign(s).Size() / 8
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MULQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEGQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond: config.PtrSize == 8
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NeqPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORLconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond: config.PtrSize == 8 && is32Bit(off)
+ // result: (ADDQconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(config.PtrSize == 8 && is32Bit(off)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: config.PtrSize == 8
+ // result: (ADDQ (MOVQconst [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: config.PtrSize == 4
+ // result: (ADDLconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (ORQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x16 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x32 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x64 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x8 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16584,16 +17770,34 @@ func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SubPtr x y)
- // cond:
- // result: (SUBQ x y)
+ // cond: config.PtrSize == 8
+ // result: (SUBQ x y)
for {
x := v.Args[0]
y := v.Args[1]
+ if !(config.PtrSize == 8) {
+ break
+ }
v.reset(OpAMD64SUBQ)
v.AddArg(x)
v.AddArg(y)
return true
}
+ // match: (SubPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (SUBL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool {
b := v.Block
@@ -16679,171 +17883,6 @@ func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
return true
}
}
-func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORL x (MOVLconst [c]))
- // cond:
- // result: (XORLconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (MOVLconst [c]) x)
- // cond:
- // result: (XORLconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL x x)
- // cond:
- // result: (MOVLconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORLconst [c] x)
- // cond: int32(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int32(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] (MOVLconst [d]))
- // cond:
- // result: (MOVLconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORQ (MOVQconst [c]) x)
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORQ x x)
- // cond:
- // result: (MOVQconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORQconst [0] x)
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORQconst [c] (MOVQconst [d]))
- // cond:
- // result: (MOVQconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16907,88 +17946,94 @@ func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool {
func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Zero [0] _ mem)
- // cond:
+ // match: (Zero [s] _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
// result: mem
for {
- if v.AuxInt != 0 {
+ s := v.AuxInt
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 0) {
break
}
- mem := v.Args[1]
v.reset(OpCopy)
v.Type = mem.Type
v.AddArg(mem)
return true
}
- // match: (Zero [1] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 1
// result: (MOVBstoreconst [0] destptr mem)
for {
- if v.AuxInt != 1 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = 0
v.AddArg(destptr)
v.AddArg(mem)
return true
}
- // match: (Zero [2] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 2
// result: (MOVWstoreconst [0] destptr mem)
for {
- if v.AuxInt != 2 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = 0
v.AddArg(destptr)
v.AddArg(mem)
return true
}
- // match: (Zero [4] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 4
// result: (MOVLstoreconst [0] destptr mem)
for {
- if v.AuxInt != 4 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = 0
v.AddArg(destptr)
v.AddArg(mem)
return true
}
- // match: (Zero [8] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 8
// result: (MOVQstoreconst [0] destptr mem)
for {
- if v.AuxInt != 8 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = 0
v.AddArg(destptr)
v.AddArg(mem)
return true
}
- // match: (Zero [3] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 3
// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem))
for {
- if v.AuxInt != 3 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 2)
v.AddArg(destptr)
@@ -16999,15 +18044,16 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
- // match: (Zero [5] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 5
// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
for {
- if v.AuxInt != 5 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
v.reset(OpAMD64MOVBstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
@@ -17018,15 +18064,16 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
- // match: (Zero [6] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 6
// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
for {
- if v.AuxInt != 6 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
v.reset(OpAMD64MOVWstoreconst)
v.AuxInt = makeValAndOff(0, 4)
v.AddArg(destptr)
@@ -17037,15 +18084,16 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
- // match: (Zero [7] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 7
// result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem))
for {
- if v.AuxInt != 7 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
v.reset(OpAMD64MOVLstoreconst)
v.AuxInt = makeValAndOff(0, 3)
v.AddArg(destptr)
@@ -17056,21 +18104,21 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
- // match: (Zero [size] destptr mem)
- // cond: size%8 != 0 && size > 8
- // result: (Zero [size-size%8] (ADDQconst destptr [size%8]) (MOVQstoreconst [0] destptr mem))
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8
+ // result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) (MOVQstoreconst [0] destptr mem))
for {
- size := v.AuxInt
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
- if !(size%8 != 0 && size > 8) {
+ if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) {
break
}
v.reset(OpZero)
- v.AuxInt = size - size%8
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
+ v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 8
v0.AddArg(destptr)
- v0.AuxInt = size % 8
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
v1.AuxInt = 0
@@ -17079,15 +18127,16 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v1)
return true
}
- // match: (Zero [16] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 16
// result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))
for {
- if v.AuxInt != 16 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 16) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 8)
v.AddArg(destptr)
@@ -17098,15 +18147,16 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
- // match: (Zero [24] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 24
// result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))
for {
- if v.AuxInt != 24 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 24) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 16)
v.AddArg(destptr)
@@ -17121,15 +18171,16 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
- // match: (Zero [32] destptr mem)
- // cond:
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 32
// result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))))
for {
- if v.AuxInt != 32 {
- break
- }
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 32) {
+ break
+ }
v.reset(OpAMD64MOVQstoreconst)
v.AuxInt = makeValAndOff(0, 24)
v.AddArg(destptr)
@@ -17148,19 +18199,19 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v0)
return true
}
- // match: (Zero [size] destptr mem)
- // cond: size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice
- // result: (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice
+ // result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
for {
- size := v.AuxInt
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
- if !(size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice) {
+ if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) {
break
}
v.reset(OpZero)
- v.AuxInt = size - 8
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
+ v.AuxInt = SizeAndAlign(s).Size() - 8
+ v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
v0.AuxInt = 8
v0.AddArg(destptr)
v.AddArg(v0)
@@ -17173,18 +18224,18 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(v1)
return true
}
- // match: (Zero [size] destptr mem)
- // cond: size <= 1024 && size%16 == 0 && !config.noDuffDevice
- // result: (DUFFZERO [size] destptr (MOVOconst [0]) mem)
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem)
for {
- size := v.AuxInt
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
- if !(size <= 1024 && size%16 == 0 && !config.noDuffDevice) {
+ if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpAMD64DUFFZERO)
- v.AuxInt = size
+ v.AuxInt = SizeAndAlign(s).Size()
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128)
v0.AuxInt = 0
@@ -17192,20 +18243,20 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
- // match: (Zero [size] destptr mem)
- // cond: (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0
- // result: (REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
+ // match: (Zero [s] destptr mem)
+ // cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0
+ // result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem)
for {
- size := v.AuxInt
+ s := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
- if !((size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0) {
+ if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) {
break
}
v.reset(OpAMD64REPSTOSQ)
v.AddArg(destptr)
v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
- v0.AuxInt = size / 8
+ v0.AuxInt = SizeAndAlign(s).Size() / 8
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
v1.AuxInt = 0
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index b57283e9bc..a4659e40bd 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -8,28 +8,1295 @@ import "math"
var _ = math.MinInt8 // in case not otherwise used
func rewriteValueARM(v *Value, config *Config) bool {
switch v.Op {
+ case OpARMADC:
+ return rewriteValueARM_OpARMADC(v, config)
+ case OpARMADCconst:
+ return rewriteValueARM_OpARMADCconst(v, config)
+ case OpARMADCshiftLL:
+ return rewriteValueARM_OpARMADCshiftLL(v, config)
+ case OpARMADCshiftLLreg:
+ return rewriteValueARM_OpARMADCshiftLLreg(v, config)
+ case OpARMADCshiftRA:
+ return rewriteValueARM_OpARMADCshiftRA(v, config)
+ case OpARMADCshiftRAreg:
+ return rewriteValueARM_OpARMADCshiftRAreg(v, config)
+ case OpARMADCshiftRL:
+ return rewriteValueARM_OpARMADCshiftRL(v, config)
+ case OpARMADCshiftRLreg:
+ return rewriteValueARM_OpARMADCshiftRLreg(v, config)
case OpARMADD:
return rewriteValueARM_OpARMADD(v, config)
+ case OpARMADDS:
+ return rewriteValueARM_OpARMADDS(v, config)
+ case OpARMADDSshiftLL:
+ return rewriteValueARM_OpARMADDSshiftLL(v, config)
+ case OpARMADDSshiftLLreg:
+ return rewriteValueARM_OpARMADDSshiftLLreg(v, config)
+ case OpARMADDSshiftRA:
+ return rewriteValueARM_OpARMADDSshiftRA(v, config)
+ case OpARMADDSshiftRAreg:
+ return rewriteValueARM_OpARMADDSshiftRAreg(v, config)
+ case OpARMADDSshiftRL:
+ return rewriteValueARM_OpARMADDSshiftRL(v, config)
+ case OpARMADDSshiftRLreg:
+ return rewriteValueARM_OpARMADDSshiftRLreg(v, config)
+ case OpARMADDconst:
+ return rewriteValueARM_OpARMADDconst(v, config)
+ case OpARMADDshiftLL:
+ return rewriteValueARM_OpARMADDshiftLL(v, config)
+ case OpARMADDshiftLLreg:
+ return rewriteValueARM_OpARMADDshiftLLreg(v, config)
+ case OpARMADDshiftRA:
+ return rewriteValueARM_OpARMADDshiftRA(v, config)
+ case OpARMADDshiftRAreg:
+ return rewriteValueARM_OpARMADDshiftRAreg(v, config)
+ case OpARMADDshiftRL:
+ return rewriteValueARM_OpARMADDshiftRL(v, config)
+ case OpARMADDshiftRLreg:
+ return rewriteValueARM_OpARMADDshiftRLreg(v, config)
+ case OpARMAND:
+ return rewriteValueARM_OpARMAND(v, config)
+ case OpARMANDconst:
+ return rewriteValueARM_OpARMANDconst(v, config)
+ case OpARMANDshiftLL:
+ return rewriteValueARM_OpARMANDshiftLL(v, config)
+ case OpARMANDshiftLLreg:
+ return rewriteValueARM_OpARMANDshiftLLreg(v, config)
+ case OpARMANDshiftRA:
+ return rewriteValueARM_OpARMANDshiftRA(v, config)
+ case OpARMANDshiftRAreg:
+ return rewriteValueARM_OpARMANDshiftRAreg(v, config)
+ case OpARMANDshiftRL:
+ return rewriteValueARM_OpARMANDshiftRL(v, config)
+ case OpARMANDshiftRLreg:
+ return rewriteValueARM_OpARMANDshiftRLreg(v, config)
+ case OpARMBIC:
+ return rewriteValueARM_OpARMBIC(v, config)
+ case OpARMBICconst:
+ return rewriteValueARM_OpARMBICconst(v, config)
+ case OpARMBICshiftLL:
+ return rewriteValueARM_OpARMBICshiftLL(v, config)
+ case OpARMBICshiftLLreg:
+ return rewriteValueARM_OpARMBICshiftLLreg(v, config)
+ case OpARMBICshiftRA:
+ return rewriteValueARM_OpARMBICshiftRA(v, config)
+ case OpARMBICshiftRAreg:
+ return rewriteValueARM_OpARMBICshiftRAreg(v, config)
+ case OpARMBICshiftRL:
+ return rewriteValueARM_OpARMBICshiftRL(v, config)
+ case OpARMBICshiftRLreg:
+ return rewriteValueARM_OpARMBICshiftRLreg(v, config)
+ case OpARMCMOVWHSconst:
+ return rewriteValueARM_OpARMCMOVWHSconst(v, config)
+ case OpARMCMOVWLSconst:
+ return rewriteValueARM_OpARMCMOVWLSconst(v, config)
+ case OpARMCMP:
+ return rewriteValueARM_OpARMCMP(v, config)
+ case OpARMCMPD:
+ return rewriteValueARM_OpARMCMPD(v, config)
+ case OpARMCMPF:
+ return rewriteValueARM_OpARMCMPF(v, config)
+ case OpARMCMPconst:
+ return rewriteValueARM_OpARMCMPconst(v, config)
+ case OpARMCMPshiftLL:
+ return rewriteValueARM_OpARMCMPshiftLL(v, config)
+ case OpARMCMPshiftLLreg:
+ return rewriteValueARM_OpARMCMPshiftLLreg(v, config)
+ case OpARMCMPshiftRA:
+ return rewriteValueARM_OpARMCMPshiftRA(v, config)
+ case OpARMCMPshiftRAreg:
+ return rewriteValueARM_OpARMCMPshiftRAreg(v, config)
+ case OpARMCMPshiftRL:
+ return rewriteValueARM_OpARMCMPshiftRL(v, config)
+ case OpARMCMPshiftRLreg:
+ return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
+ case OpARMDIV:
+ return rewriteValueARM_OpARMDIV(v, config)
+ case OpARMDIVU:
+ return rewriteValueARM_OpARMDIVU(v, config)
+ case OpARMEqual:
+ return rewriteValueARM_OpARMEqual(v, config)
+ case OpARMGreaterEqual:
+ return rewriteValueARM_OpARMGreaterEqual(v, config)
+ case OpARMGreaterEqualU:
+ return rewriteValueARM_OpARMGreaterEqualU(v, config)
+ case OpARMGreaterThan:
+ return rewriteValueARM_OpARMGreaterThan(v, config)
+ case OpARMGreaterThanU:
+ return rewriteValueARM_OpARMGreaterThanU(v, config)
+ case OpARMLessEqual:
+ return rewriteValueARM_OpARMLessEqual(v, config)
+ case OpARMLessEqualU:
+ return rewriteValueARM_OpARMLessEqualU(v, config)
+ case OpARMLessThan:
+ return rewriteValueARM_OpARMLessThan(v, config)
+ case OpARMLessThanU:
+ return rewriteValueARM_OpARMLessThanU(v, config)
+ case OpARMMOVBUload:
+ return rewriteValueARM_OpARMMOVBUload(v, config)
+ case OpARMMOVBUreg:
+ return rewriteValueARM_OpARMMOVBUreg(v, config)
+ case OpARMMOVBload:
+ return rewriteValueARM_OpARMMOVBload(v, config)
+ case OpARMMOVBreg:
+ return rewriteValueARM_OpARMMOVBreg(v, config)
+ case OpARMMOVBstore:
+ return rewriteValueARM_OpARMMOVBstore(v, config)
+ case OpARMMOVDload:
+ return rewriteValueARM_OpARMMOVDload(v, config)
+ case OpARMMOVDstore:
+ return rewriteValueARM_OpARMMOVDstore(v, config)
+ case OpARMMOVFload:
+ return rewriteValueARM_OpARMMOVFload(v, config)
+ case OpARMMOVFstore:
+ return rewriteValueARM_OpARMMOVFstore(v, config)
+ case OpARMMOVHUload:
+ return rewriteValueARM_OpARMMOVHUload(v, config)
+ case OpARMMOVHUreg:
+ return rewriteValueARM_OpARMMOVHUreg(v, config)
+ case OpARMMOVHload:
+ return rewriteValueARM_OpARMMOVHload(v, config)
+ case OpARMMOVHreg:
+ return rewriteValueARM_OpARMMOVHreg(v, config)
+ case OpARMMOVHstore:
+ return rewriteValueARM_OpARMMOVHstore(v, config)
+ case OpARMMOVWload:
+ return rewriteValueARM_OpARMMOVWload(v, config)
+ case OpARMMOVWloadidx:
+ return rewriteValueARM_OpARMMOVWloadidx(v, config)
+ case OpARMMOVWloadshiftLL:
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
+ case OpARMMOVWloadshiftRA:
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
+ case OpARMMOVWloadshiftRL:
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
+ case OpARMMOVWreg:
+ return rewriteValueARM_OpARMMOVWreg(v, config)
+ case OpARMMOVWstore:
+ return rewriteValueARM_OpARMMOVWstore(v, config)
+ case OpARMMOVWstoreidx:
+ return rewriteValueARM_OpARMMOVWstoreidx(v, config)
+ case OpARMMOVWstoreshiftLL:
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
+ case OpARMMOVWstoreshiftRA:
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
+ case OpARMMOVWstoreshiftRL:
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
+ case OpARMMUL:
+ return rewriteValueARM_OpARMMUL(v, config)
+ case OpARMMULA:
+ return rewriteValueARM_OpARMMULA(v, config)
+ case OpARMMVN:
+ return rewriteValueARM_OpARMMVN(v, config)
+ case OpARMMVNshiftLL:
+ return rewriteValueARM_OpARMMVNshiftLL(v, config)
+ case OpARMMVNshiftLLreg:
+ return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
+ case OpARMMVNshiftRA:
+ return rewriteValueARM_OpARMMVNshiftRA(v, config)
+ case OpARMMVNshiftRAreg:
+ return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
+ case OpARMMVNshiftRL:
+ return rewriteValueARM_OpARMMVNshiftRL(v, config)
+ case OpARMMVNshiftRLreg:
+ return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
+ case OpARMNotEqual:
+ return rewriteValueARM_OpARMNotEqual(v, config)
+ case OpARMOR:
+ return rewriteValueARM_OpARMOR(v, config)
+ case OpARMORconst:
+ return rewriteValueARM_OpARMORconst(v, config)
+ case OpARMORshiftLL:
+ return rewriteValueARM_OpARMORshiftLL(v, config)
+ case OpARMORshiftLLreg:
+ return rewriteValueARM_OpARMORshiftLLreg(v, config)
+ case OpARMORshiftRA:
+ return rewriteValueARM_OpARMORshiftRA(v, config)
+ case OpARMORshiftRAreg:
+ return rewriteValueARM_OpARMORshiftRAreg(v, config)
+ case OpARMORshiftRL:
+ return rewriteValueARM_OpARMORshiftRL(v, config)
+ case OpARMORshiftRLreg:
+ return rewriteValueARM_OpARMORshiftRLreg(v, config)
+ case OpARMRSB:
+ return rewriteValueARM_OpARMRSB(v, config)
+ case OpARMRSBSshiftLL:
+ return rewriteValueARM_OpARMRSBSshiftLL(v, config)
+ case OpARMRSBSshiftLLreg:
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
+ case OpARMRSBSshiftRA:
+ return rewriteValueARM_OpARMRSBSshiftRA(v, config)
+ case OpARMRSBSshiftRAreg:
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
+ case OpARMRSBSshiftRL:
+ return rewriteValueARM_OpARMRSBSshiftRL(v, config)
+ case OpARMRSBSshiftRLreg:
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
+ case OpARMRSBconst:
+ return rewriteValueARM_OpARMRSBconst(v, config)
+ case OpARMRSBshiftLL:
+ return rewriteValueARM_OpARMRSBshiftLL(v, config)
+ case OpARMRSBshiftLLreg:
+ return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
+ case OpARMRSBshiftRA:
+ return rewriteValueARM_OpARMRSBshiftRA(v, config)
+ case OpARMRSBshiftRAreg:
+ return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
+ case OpARMRSBshiftRL:
+ return rewriteValueARM_OpARMRSBshiftRL(v, config)
+ case OpARMRSBshiftRLreg:
+ return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
+ case OpARMRSCconst:
+ return rewriteValueARM_OpARMRSCconst(v, config)
+ case OpARMRSCshiftLL:
+ return rewriteValueARM_OpARMRSCshiftLL(v, config)
+ case OpARMRSCshiftLLreg:
+ return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
+ case OpARMRSCshiftRA:
+ return rewriteValueARM_OpARMRSCshiftRA(v, config)
+ case OpARMRSCshiftRAreg:
+ return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
+ case OpARMRSCshiftRL:
+ return rewriteValueARM_OpARMRSCshiftRL(v, config)
+ case OpARMRSCshiftRLreg:
+ return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
+ case OpARMSBC:
+ return rewriteValueARM_OpARMSBC(v, config)
+ case OpARMSBCconst:
+ return rewriteValueARM_OpARMSBCconst(v, config)
+ case OpARMSBCshiftLL:
+ return rewriteValueARM_OpARMSBCshiftLL(v, config)
+ case OpARMSBCshiftLLreg:
+ return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
+ case OpARMSBCshiftRA:
+ return rewriteValueARM_OpARMSBCshiftRA(v, config)
+ case OpARMSBCshiftRAreg:
+ return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
+ case OpARMSBCshiftRL:
+ return rewriteValueARM_OpARMSBCshiftRL(v, config)
+ case OpARMSBCshiftRLreg:
+ return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
+ case OpARMSLL:
+ return rewriteValueARM_OpARMSLL(v, config)
+ case OpARMSLLconst:
+ return rewriteValueARM_OpARMSLLconst(v, config)
+ case OpARMSRA:
+ return rewriteValueARM_OpARMSRA(v, config)
+ case OpARMSRAcond:
+ return rewriteValueARM_OpARMSRAcond(v, config)
+ case OpARMSRAconst:
+ return rewriteValueARM_OpARMSRAconst(v, config)
+ case OpARMSRL:
+ return rewriteValueARM_OpARMSRL(v, config)
+ case OpARMSRLconst:
+ return rewriteValueARM_OpARMSRLconst(v, config)
+ case OpARMSUB:
+ return rewriteValueARM_OpARMSUB(v, config)
+ case OpARMSUBS:
+ return rewriteValueARM_OpARMSUBS(v, config)
+ case OpARMSUBSshiftLL:
+ return rewriteValueARM_OpARMSUBSshiftLL(v, config)
+ case OpARMSUBSshiftLLreg:
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
+ case OpARMSUBSshiftRA:
+ return rewriteValueARM_OpARMSUBSshiftRA(v, config)
+ case OpARMSUBSshiftRAreg:
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
+ case OpARMSUBSshiftRL:
+ return rewriteValueARM_OpARMSUBSshiftRL(v, config)
+ case OpARMSUBSshiftRLreg:
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
+ case OpARMSUBconst:
+ return rewriteValueARM_OpARMSUBconst(v, config)
+ case OpARMSUBshiftLL:
+ return rewriteValueARM_OpARMSUBshiftLL(v, config)
+ case OpARMSUBshiftLLreg:
+ return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
+ case OpARMSUBshiftRA:
+ return rewriteValueARM_OpARMSUBshiftRA(v, config)
+ case OpARMSUBshiftRAreg:
+ return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
+ case OpARMSUBshiftRL:
+ return rewriteValueARM_OpARMSUBshiftRL(v, config)
+ case OpARMSUBshiftRLreg:
+ return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v, config)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v, config)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v, config)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v, config)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v, config)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v, config)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v, config)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v, config)
+ case OpAdd16:
+ return rewriteValueARM_OpAdd16(v, config)
case OpAdd32:
return rewriteValueARM_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValueARM_OpAdd32F(v, config)
+ case OpAdd32carry:
+ return rewriteValueARM_OpAdd32carry(v, config)
+ case OpAdd32withcarry:
+ return rewriteValueARM_OpAdd32withcarry(v, config)
+ case OpAdd64F:
+ return rewriteValueARM_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValueARM_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValueARM_OpAddPtr(v, config)
case OpAddr:
return rewriteValueARM_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValueARM_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValueARM_OpAnd32(v, config)
+ case OpAnd8:
+ return rewriteValueARM_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValueARM_OpAndB(v, config)
+ case OpClosureCall:
+ return rewriteValueARM_OpClosureCall(v, config)
+ case OpCom16:
+ return rewriteValueARM_OpCom16(v, config)
+ case OpCom32:
+ return rewriteValueARM_OpCom32(v, config)
+ case OpCom8:
+ return rewriteValueARM_OpCom8(v, config)
+ case OpConst16:
+ return rewriteValueARM_OpConst16(v, config)
case OpConst32:
return rewriteValueARM_OpConst32(v, config)
+ case OpConst32F:
+ return rewriteValueARM_OpConst32F(v, config)
+ case OpConst64F:
+ return rewriteValueARM_OpConst64F(v, config)
+ case OpConst8:
+ return rewriteValueARM_OpConst8(v, config)
+ case OpConstBool:
+ return rewriteValueARM_OpConstBool(v, config)
+ case OpConstNil:
+ return rewriteValueARM_OpConstNil(v, config)
+ case OpConvert:
+ return rewriteValueARM_OpConvert(v, config)
+ case OpCvt32Fto32:
+ return rewriteValueARM_OpCvt32Fto32(v, config)
+ case OpCvt32Fto32U:
+ return rewriteValueARM_OpCvt32Fto32U(v, config)
+ case OpCvt32Fto64F:
+ return rewriteValueARM_OpCvt32Fto64F(v, config)
+ case OpCvt32Uto32F:
+ return rewriteValueARM_OpCvt32Uto32F(v, config)
+ case OpCvt32Uto64F:
+ return rewriteValueARM_OpCvt32Uto64F(v, config)
+ case OpCvt32to32F:
+ return rewriteValueARM_OpCvt32to32F(v, config)
+ case OpCvt32to64F:
+ return rewriteValueARM_OpCvt32to64F(v, config)
+ case OpCvt64Fto32:
+ return rewriteValueARM_OpCvt64Fto32(v, config)
+ case OpCvt64Fto32F:
+ return rewriteValueARM_OpCvt64Fto32F(v, config)
+ case OpCvt64Fto32U:
+ return rewriteValueARM_OpCvt64Fto32U(v, config)
+ case OpDeferCall:
+ return rewriteValueARM_OpDeferCall(v, config)
+ case OpDiv16:
+ return rewriteValueARM_OpDiv16(v, config)
+ case OpDiv16u:
+ return rewriteValueARM_OpDiv16u(v, config)
+ case OpDiv32:
+ return rewriteValueARM_OpDiv32(v, config)
+ case OpDiv32F:
+ return rewriteValueARM_OpDiv32F(v, config)
+ case OpDiv32u:
+ return rewriteValueARM_OpDiv32u(v, config)
+ case OpDiv64F:
+ return rewriteValueARM_OpDiv64F(v, config)
+ case OpDiv8:
+ return rewriteValueARM_OpDiv8(v, config)
+ case OpDiv8u:
+ return rewriteValueARM_OpDiv8u(v, config)
+ case OpEq16:
+ return rewriteValueARM_OpEq16(v, config)
+ case OpEq32:
+ return rewriteValueARM_OpEq32(v, config)
+ case OpEq32F:
+ return rewriteValueARM_OpEq32F(v, config)
+ case OpEq64F:
+ return rewriteValueARM_OpEq64F(v, config)
+ case OpEq8:
+ return rewriteValueARM_OpEq8(v, config)
+ case OpEqB:
+ return rewriteValueARM_OpEqB(v, config)
+ case OpEqPtr:
+ return rewriteValueARM_OpEqPtr(v, config)
+ case OpGeq16:
+ return rewriteValueARM_OpGeq16(v, config)
+ case OpGeq16U:
+ return rewriteValueARM_OpGeq16U(v, config)
+ case OpGeq32:
+ return rewriteValueARM_OpGeq32(v, config)
+ case OpGeq32F:
+ return rewriteValueARM_OpGeq32F(v, config)
+ case OpGeq32U:
+ return rewriteValueARM_OpGeq32U(v, config)
+ case OpGeq64F:
+ return rewriteValueARM_OpGeq64F(v, config)
+ case OpGeq8:
+ return rewriteValueARM_OpGeq8(v, config)
+ case OpGeq8U:
+ return rewriteValueARM_OpGeq8U(v, config)
+ case OpGetClosurePtr:
+ return rewriteValueARM_OpGetClosurePtr(v, config)
+ case OpGoCall:
+ return rewriteValueARM_OpGoCall(v, config)
+ case OpGreater16:
+ return rewriteValueARM_OpGreater16(v, config)
+ case OpGreater16U:
+ return rewriteValueARM_OpGreater16U(v, config)
+ case OpGreater32:
+ return rewriteValueARM_OpGreater32(v, config)
+ case OpGreater32F:
+ return rewriteValueARM_OpGreater32F(v, config)
+ case OpGreater32U:
+ return rewriteValueARM_OpGreater32U(v, config)
+ case OpGreater64F:
+ return rewriteValueARM_OpGreater64F(v, config)
+ case OpGreater8:
+ return rewriteValueARM_OpGreater8(v, config)
+ case OpGreater8U:
+ return rewriteValueARM_OpGreater8U(v, config)
+ case OpHmul16:
+ return rewriteValueARM_OpHmul16(v, config)
+ case OpHmul16u:
+ return rewriteValueARM_OpHmul16u(v, config)
+ case OpHmul32:
+ return rewriteValueARM_OpHmul32(v, config)
+ case OpHmul32u:
+ return rewriteValueARM_OpHmul32u(v, config)
+ case OpHmul8:
+ return rewriteValueARM_OpHmul8(v, config)
+ case OpHmul8u:
+ return rewriteValueARM_OpHmul8u(v, config)
+ case OpInterCall:
+ return rewriteValueARM_OpInterCall(v, config)
+ case OpIsInBounds:
+ return rewriteValueARM_OpIsInBounds(v, config)
+ case OpIsNonNil:
+ return rewriteValueARM_OpIsNonNil(v, config)
+ case OpIsSliceInBounds:
+ return rewriteValueARM_OpIsSliceInBounds(v, config)
+ case OpLeq16:
+ return rewriteValueARM_OpLeq16(v, config)
+ case OpLeq16U:
+ return rewriteValueARM_OpLeq16U(v, config)
+ case OpLeq32:
+ return rewriteValueARM_OpLeq32(v, config)
+ case OpLeq32F:
+ return rewriteValueARM_OpLeq32F(v, config)
+ case OpLeq32U:
+ return rewriteValueARM_OpLeq32U(v, config)
+ case OpLeq64F:
+ return rewriteValueARM_OpLeq64F(v, config)
+ case OpLeq8:
+ return rewriteValueARM_OpLeq8(v, config)
+ case OpLeq8U:
+ return rewriteValueARM_OpLeq8U(v, config)
+ case OpLess16:
+ return rewriteValueARM_OpLess16(v, config)
+ case OpLess16U:
+ return rewriteValueARM_OpLess16U(v, config)
case OpLess32:
return rewriteValueARM_OpLess32(v, config)
+ case OpLess32F:
+ return rewriteValueARM_OpLess32F(v, config)
+ case OpLess32U:
+ return rewriteValueARM_OpLess32U(v, config)
+ case OpLess64F:
+ return rewriteValueARM_OpLess64F(v, config)
+ case OpLess8:
+ return rewriteValueARM_OpLess8(v, config)
+ case OpLess8U:
+ return rewriteValueARM_OpLess8U(v, config)
case OpLoad:
return rewriteValueARM_OpLoad(v, config)
- case OpARMMOVWload:
- return rewriteValueARM_OpARMMOVWload(v, config)
- case OpARMMOVWstore:
- return rewriteValueARM_OpARMMOVWstore(v, config)
+ case OpLrot16:
+ return rewriteValueARM_OpLrot16(v, config)
+ case OpLrot32:
+ return rewriteValueARM_OpLrot32(v, config)
+ case OpLrot8:
+ return rewriteValueARM_OpLrot8(v, config)
+ case OpLsh16x16:
+ return rewriteValueARM_OpLsh16x16(v, config)
+ case OpLsh16x32:
+ return rewriteValueARM_OpLsh16x32(v, config)
+ case OpLsh16x64:
+ return rewriteValueARM_OpLsh16x64(v, config)
+ case OpLsh16x8:
+ return rewriteValueARM_OpLsh16x8(v, config)
+ case OpLsh32x16:
+ return rewriteValueARM_OpLsh32x16(v, config)
+ case OpLsh32x32:
+ return rewriteValueARM_OpLsh32x32(v, config)
+ case OpLsh32x64:
+ return rewriteValueARM_OpLsh32x64(v, config)
+ case OpLsh32x8:
+ return rewriteValueARM_OpLsh32x8(v, config)
+ case OpLsh8x16:
+ return rewriteValueARM_OpLsh8x16(v, config)
+ case OpLsh8x32:
+ return rewriteValueARM_OpLsh8x32(v, config)
+ case OpLsh8x64:
+ return rewriteValueARM_OpLsh8x64(v, config)
+ case OpLsh8x8:
+ return rewriteValueARM_OpLsh8x8(v, config)
+ case OpMod16:
+ return rewriteValueARM_OpMod16(v, config)
+ case OpMod16u:
+ return rewriteValueARM_OpMod16u(v, config)
+ case OpMod32:
+ return rewriteValueARM_OpMod32(v, config)
+ case OpMod32u:
+ return rewriteValueARM_OpMod32u(v, config)
+ case OpMod8:
+ return rewriteValueARM_OpMod8(v, config)
+ case OpMod8u:
+ return rewriteValueARM_OpMod8u(v, config)
+ case OpMove:
+ return rewriteValueARM_OpMove(v, config)
+ case OpMul16:
+ return rewriteValueARM_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValueARM_OpMul32(v, config)
+ case OpMul32F:
+ return rewriteValueARM_OpMul32F(v, config)
+ case OpMul32uhilo:
+ return rewriteValueARM_OpMul32uhilo(v, config)
+ case OpMul64F:
+ return rewriteValueARM_OpMul64F(v, config)
+ case OpMul8:
+ return rewriteValueARM_OpMul8(v, config)
+ case OpNeg16:
+ return rewriteValueARM_OpNeg16(v, config)
+ case OpNeg32:
+ return rewriteValueARM_OpNeg32(v, config)
+ case OpNeg32F:
+ return rewriteValueARM_OpNeg32F(v, config)
+ case OpNeg64F:
+ return rewriteValueARM_OpNeg64F(v, config)
+ case OpNeg8:
+ return rewriteValueARM_OpNeg8(v, config)
+ case OpNeq16:
+ return rewriteValueARM_OpNeq16(v, config)
+ case OpNeq32:
+ return rewriteValueARM_OpNeq32(v, config)
+ case OpNeq32F:
+ return rewriteValueARM_OpNeq32F(v, config)
+ case OpNeq64F:
+ return rewriteValueARM_OpNeq64F(v, config)
+ case OpNeq8:
+ return rewriteValueARM_OpNeq8(v, config)
+ case OpNeqB:
+ return rewriteValueARM_OpNeqB(v, config)
+ case OpNeqPtr:
+ return rewriteValueARM_OpNeqPtr(v, config)
+ case OpNilCheck:
+ return rewriteValueARM_OpNilCheck(v, config)
+ case OpNot:
+ return rewriteValueARM_OpNot(v, config)
case OpOffPtr:
return rewriteValueARM_OpOffPtr(v, config)
+ case OpOr16:
+ return rewriteValueARM_OpOr16(v, config)
+ case OpOr32:
+ return rewriteValueARM_OpOr32(v, config)
+ case OpOr8:
+ return rewriteValueARM_OpOr8(v, config)
+ case OpOrB:
+ return rewriteValueARM_OpOrB(v, config)
+ case OpRsh16Ux16:
+ return rewriteValueARM_OpRsh16Ux16(v, config)
+ case OpRsh16Ux32:
+ return rewriteValueARM_OpRsh16Ux32(v, config)
+ case OpRsh16Ux64:
+ return rewriteValueARM_OpRsh16Ux64(v, config)
+ case OpRsh16Ux8:
+ return rewriteValueARM_OpRsh16Ux8(v, config)
+ case OpRsh16x16:
+ return rewriteValueARM_OpRsh16x16(v, config)
+ case OpRsh16x32:
+ return rewriteValueARM_OpRsh16x32(v, config)
+ case OpRsh16x64:
+ return rewriteValueARM_OpRsh16x64(v, config)
+ case OpRsh16x8:
+ return rewriteValueARM_OpRsh16x8(v, config)
+ case OpRsh32Ux16:
+ return rewriteValueARM_OpRsh32Ux16(v, config)
+ case OpRsh32Ux32:
+ return rewriteValueARM_OpRsh32Ux32(v, config)
+ case OpRsh32Ux64:
+ return rewriteValueARM_OpRsh32Ux64(v, config)
+ case OpRsh32Ux8:
+ return rewriteValueARM_OpRsh32Ux8(v, config)
+ case OpRsh32x16:
+ return rewriteValueARM_OpRsh32x16(v, config)
+ case OpRsh32x32:
+ return rewriteValueARM_OpRsh32x32(v, config)
+ case OpRsh32x64:
+ return rewriteValueARM_OpRsh32x64(v, config)
+ case OpRsh32x8:
+ return rewriteValueARM_OpRsh32x8(v, config)
+ case OpRsh8Ux16:
+ return rewriteValueARM_OpRsh8Ux16(v, config)
+ case OpRsh8Ux32:
+ return rewriteValueARM_OpRsh8Ux32(v, config)
+ case OpRsh8Ux64:
+ return rewriteValueARM_OpRsh8Ux64(v, config)
+ case OpRsh8Ux8:
+ return rewriteValueARM_OpRsh8Ux8(v, config)
+ case OpRsh8x16:
+ return rewriteValueARM_OpRsh8x16(v, config)
+ case OpRsh8x32:
+ return rewriteValueARM_OpRsh8x32(v, config)
+ case OpRsh8x64:
+ return rewriteValueARM_OpRsh8x64(v, config)
+ case OpRsh8x8:
+ return rewriteValueARM_OpRsh8x8(v, config)
+ case OpSignExt16to32:
+ return rewriteValueARM_OpSignExt16to32(v, config)
+ case OpSignExt8to16:
+ return rewriteValueARM_OpSignExt8to16(v, config)
+ case OpSignExt8to32:
+ return rewriteValueARM_OpSignExt8to32(v, config)
+ case OpSignmask:
+ return rewriteValueARM_OpSignmask(v, config)
+ case OpSqrt:
+ return rewriteValueARM_OpSqrt(v, config)
case OpStaticCall:
return rewriteValueARM_OpStaticCall(v, config)
case OpStore:
return rewriteValueARM_OpStore(v, config)
+ case OpSub16:
+ return rewriteValueARM_OpSub16(v, config)
+ case OpSub32:
+ return rewriteValueARM_OpSub32(v, config)
+ case OpSub32F:
+ return rewriteValueARM_OpSub32F(v, config)
+ case OpSub32carry:
+ return rewriteValueARM_OpSub32carry(v, config)
+ case OpSub32withcarry:
+ return rewriteValueARM_OpSub32withcarry(v, config)
+ case OpSub64F:
+ return rewriteValueARM_OpSub64F(v, config)
+ case OpSub8:
+ return rewriteValueARM_OpSub8(v, config)
+ case OpSubPtr:
+ return rewriteValueARM_OpSubPtr(v, config)
+ case OpTrunc16to8:
+ return rewriteValueARM_OpTrunc16to8(v, config)
+ case OpTrunc32to16:
+ return rewriteValueARM_OpTrunc32to16(v, config)
+ case OpTrunc32to8:
+ return rewriteValueARM_OpTrunc32to8(v, config)
+ case OpXor16:
+ return rewriteValueARM_OpXor16(v, config)
+ case OpXor32:
+ return rewriteValueARM_OpXor32(v, config)
+ case OpXor8:
+ return rewriteValueARM_OpXor8(v, config)
+ case OpZero:
+ return rewriteValueARM_OpZero(v, config)
+ case OpZeroExt16to32:
+ return rewriteValueARM_OpZeroExt16to32(v, config)
+ case OpZeroExt8to16:
+ return rewriteValueARM_OpZeroExt8to16(v, config)
+ case OpZeroExt8to32:
+ return rewriteValueARM_OpZeroExt8to32(v, config)
+ case OpZeromask:
+ return rewriteValueARM_OpZeromask(v, config)
+ }
+ return false
+}
+func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADC (MOVWconst [c]) x flags)
+ // cond:
+ // result: (ADCconst [c] x flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCconst [c] x flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SLLconst [c] y) flags)
+ // cond:
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SLLconst [c] y) x flags)
+ // cond:
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRLconst [c] y) flags)
+ // cond:
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRLconst [c] y) x flags)
+ // cond:
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRAconst [c] y) flags)
+ // cond:
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRAconst [c] y) x flags)
+ // cond:
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SLL y z) flags)
+ // cond:
+ // result: (ADCshiftLLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SLL y z) x flags)
+ // cond:
+ // result: (ADCshiftLLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRL y z) flags)
+ // cond:
+ // result: (ADCshiftRLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRL y z) x flags)
+ // cond:
+ // result: (ADCshiftRLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC x (SRA y z) flags)
+ // cond:
+ // result: (ADCshiftRAreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADC (SRA y z) x flags)
+ // cond:
+ // result: (ADCshiftRAreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (ADCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMADCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (ADCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMADCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (ADCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (ADCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMADCconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (ADCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMADCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
}
return false
}
@@ -66,83 +1333,4891 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (ADD x (SLLconst [c] y))
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (SLLconst [c] y) x)
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD x (SRLconst [c] y))
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (SRLconst [c] y) x)
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD x (SRAconst [c] y))
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (SRAconst [c] y) x)
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD x (SLL y z))
+ // cond:
+ // result: (ADDshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD (SLL y z) x)
+ // cond:
+ // result: (ADDshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD x (SRL y z))
+ // cond:
+ // result: (ADDshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD (SRL y z) x)
+ // cond:
+ // result: (ADDshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD x (SRA y z))
+ // cond:
+ // result: (ADDshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD (SRA y z) x)
+ // cond:
+ // result: (ADDshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADD x (RSBconst [0] y))
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMRSBconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (RSBconst [0] y) x)
+ // cond:
+ // result: (SUB x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (MUL x y) a)
+ // cond:
+ // result: (MULA x y a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMUL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ a := v.Args[1]
+ v.reset(OpARMMULA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(a)
+ return true
+ }
+ // match: (ADD a (MUL x y))
+ // cond:
+ // result: (MULA x y a)
+ for {
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMUL {
+ break
+ }
+ x := v_1.Args[0]
+ y := v_1.Args[1]
+ v.reset(OpARMMULA)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(a)
+ return true
+ }
return false
}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Add32 x y)
+ // match: (ADDS (MOVWconst [c]) x)
// cond:
- // result: (ADD x y)
+ // result: (ADDSconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDS x (MOVWconst [c]))
+ // cond:
+ // result: (ADDSconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDS x (SLLconst [c] y))
+ // cond:
+ // result: (ADDSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDS (SLLconst [c] y) x)
+ // cond:
+ // result: (ADDSshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDS x (SRLconst [c] y))
+ // cond:
+ // result: (ADDSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDS (SRLconst [c] y) x)
+ // cond:
+ // result: (ADDSshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDS x (SRAconst [c] y))
+ // cond:
+ // result: (ADDSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDS (SRAconst [c] y) x)
+ // cond:
+ // result: (ADDSshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDS x (SLL y z))
+ // cond:
+ // result: (ADDSshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS (SLL y z) x)
+ // cond:
+ // result: (ADDSshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS x (SRL y z))
+ // cond:
+ // result: (ADDSshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS (SRL y z) x)
+ // cond:
+ // result: (ADDSshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS x (SRA y z))
+ // cond:
+ // result: (ADDSshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMADDSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (ADDS (SRA y z) x)
+ // cond:
+ // result: (ADDSshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMADDSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDSshiftLL x y [c])
for {
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMADD)
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
return true
}
+ return false
}
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Addr {sym} base)
+ // match: (ADDSshiftRA (MOVWconst [c]) x [d])
// cond:
- // result: (ADDconst {sym} base)
+ // result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(OpARMADDconst)
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // cond:
+ // result: (MOVWaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym := v_0.Aux
+ ptr := v_0.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c+d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c + d))
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Const32 [val])
+ // match: (ADDshiftLL (MOVWconst [c]) x [d])
// cond:
- // result: (MOVWconst [val])
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
for {
- val := v.AuxInt
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AND (MOVWconst [c]) x)
+ // cond:
+ // result: (ANDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x (MOVWconst [c]))
+ // cond:
+ // result: (ANDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x (SLLconst [c] y))
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND (SLLconst [c] y) x)
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (SRLconst [c] y))
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND (SRLconst [c] y) x)
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (SRAconst [c] y))
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND (SRAconst [c] y) x)
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (SLL y z))
+ // cond:
+ // result: (ANDshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMANDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND (SLL y z) x)
+ // cond:
+ // result: (ANDshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND x (SRL y z))
+ // cond:
+ // result: (ANDshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMANDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND (SRL y z) x)
+ // cond:
+ // result: (ANDshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND x (SRA y z))
+ // cond:
+ // result: (ANDshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMANDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND (SRA y z) x)
+ // cond:
+ // result: (ANDshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMANDshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (AND x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // cond:
+ // result: (BIC x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVN {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMBIC)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (MVNshiftLL y [c]))
+ // cond:
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVNshiftLL {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (MVNshiftRL y [c]))
+ // cond:
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVNshiftRL {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (MVNshiftRA y [c]))
+ // cond:
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMVNshiftRA {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDconst [0] _)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
v.reset(OpARMMOVWconst)
- v.AuxInt = val
+ v.AuxInt = 0
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c&d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c & d
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // cond:
+ // result: (ANDconst [c&d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & d
+ v.AddArg(x)
+ return true
+ }
+ return false
}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Less32 x y)
+ // match: (ANDshiftLL (MOVWconst [c]) x [d])
// cond:
- // result: (LessThan (CMP x y))
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
+ // cond: c==d
+ // result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ if y.Op != OpARMSLLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ANDconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
+ // match: (ANDshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
}
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Load <t> ptr mem)
- // cond: is32BitInt(t)
- // result: (MOVWload ptr mem)
+ // match: (ANDshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
for {
- t := v.Type
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRAconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ANDconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMANDconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ANDconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BIC x (MOVWconst [c]))
+ // cond:
+ // result: (BICconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMBICconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x (SLLconst [c] y))
+ // cond:
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (BIC x (SRLconst [c] y))
+ // cond:
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (BIC x (SRAconst [c] y))
+ // cond:
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (BIC x (SLL y z))
+ // cond:
+ // result: (BICshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMBICshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (BIC x (SRL y z))
+ // cond:
+ // result: (BICshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMBICshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (BIC x (SRA y z))
+ // cond:
+ // result: (BICshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMBICshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (BIC x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [0])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (BICconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [d&^c])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = d &^ c
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMBICconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMBICconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMBICconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMOVWHSconst _ (FlagEQ) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ c := v.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWHSconst x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWHSconst _ (FlagLT_UGT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ c := v.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWHSconst x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWHSconst _ (FlagGT_UGT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ c := v.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWHSconst x (InvertFlags flags) [c])
+ // cond:
+ // result: (CMOVWLSconst x flags [c])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWLSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMOVWLSconst _ (FlagEQ) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ c := v.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWLSconst _ (FlagLT_ULT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ c := v.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWLSconst x (FlagLT_UGT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWLSconst _ (FlagGT_ULT) [c])
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ c := v.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
+ // match: (CMOVWLSconst x (FlagGT_UGT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMOVWLSconst x (InvertFlags flags) [c])
+ // cond:
+ // result: (CMOVWHSconst x flags [c])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMP x (MOVWconst [c]))
+ // cond:
+ // result: (CMPconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVWconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLLconst [c] y))
+ // cond:
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMP (SLLconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftLL x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRLconst [c] y))
+ // cond:
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMP (SRLconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRL x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRAconst [c] y))
+ // cond:
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMP (SRAconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRA x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLL y z))
+ // cond:
+ // result: (CMPshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMCMPshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMP (SLL y z) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftLLreg x y z))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftLLreg, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRL y z))
+ // cond:
+ // result: (CMPshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMCMPshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMP (SRL y z) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRLreg x y z))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRLreg, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRA y z))
+ // cond:
+ // result: (CMPshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMCMPshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMP (SRA y z) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRAreg x y z))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPshiftRAreg, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPD x (MOVDconst [0]))
+ // cond:
+ // result: (CMPD0 x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMCMPD0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPF(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPF x (MOVFconst [0]))
+ // cond:
+ // result: (CMPF0 x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVFconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMCMPF0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpARMFlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagLT_UGT)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagGT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpARMFlagGT_UGT)
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT_ULT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVBUreg {
+ break
+ }
+ if !(0xff < c) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT_ULT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVHUreg {
+ break
+ }
+ if !(0xffff < c) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= int32(m) && int32(m) < int32(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ m := v_0.AuxInt
+ if !(0 <= int32(m) && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpARMFlagLT_ULT)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v1.AuxInt = d
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v1.AuxInt = d
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v1.AuxInt = d
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DIV (MOVWconst [c]) (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c)/int32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c) / int32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DIVU x (MOVWconst [1]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (DIVU x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SRLconst [log2(c)] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (DIVU (MOVWconst [c]) (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(c)/uint32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(c) / uint32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Equal (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (Equal (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // cond:
+ // result: (Equal x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // cond:
+ // result: (LessEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterEqualU (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqualU (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqualU (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqualU (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqualU (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // cond:
+ // result: (LessEqualU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterThan (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThan (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // cond:
+ // result: (LessThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterThanU (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThanU (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // cond:
+ // result: (LessThanU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // cond:
+ // result: (GreaterEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessEqualU (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqualU (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqualU (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqualU (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqualU (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // cond:
+ // result: (GreaterEqualU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessThan (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThan (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThan (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // cond:
+ // result: (GreaterThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessThanU (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThanU (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThanU (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThanU (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThanU (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // cond:
+ // result: (GreaterThanU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // cond:
+ // result: (ANDconst [c&0xff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0xff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(uint8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint8(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32BitInt(t)) {
+ if !(canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpARMMOVWload)
+ v.reset(OpARMMOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBreg x:(MOVBload _ _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0x7f
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(int8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int8(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVDstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVFload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVFstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVHUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // cond:
+ // result: (ANDconst [c&0xffff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0xffff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVHUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(uint16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint16(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHreg x:(MOVBload _ _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVHload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = c & 0x7fff
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARMMOVHreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [int64(int16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int16(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
v.AddArg(mem)
return true
}
@@ -151,14 +6226,34 @@ func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVWload [off1] {sym1} (ADDconst [off2] {sym2} ptr) mem)
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ if v_0.Op != OpARMMOVWaddr {
break
}
off2 := v_0.AuxInt
@@ -175,19 +6270,516 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreidx {
+ break
+ }
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ x := v_2.Args[2]
+ if !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
+ // cond:
+ // result: (MOVWload [c] ptr mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+ // cond:
+ // result: (MOVWload [c] ptr mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
+ // cond:
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+ // cond:
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ idx := v_0.Args[0]
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
+ // cond:
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
+ // cond:
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ idx := v_0.Args[0]
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
+ // cond:
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
+ // cond:
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ idx := v_0.Args[0]
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := v.AuxInt
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreshiftLL {
+ break
+ }
+ d := v_2.AuxInt
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ x := v_2.Args[2]
+ if !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
+ // cond:
+ // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+ for {
+ d := v.AuxInt
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := v.AuxInt
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreshiftRA {
+ break
+ }
+ d := v_2.AuxInt
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ x := v_2.Args[2]
+ if !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
+ // cond:
+ // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
+ for {
+ d := v.AuxInt
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := v.AuxInt
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWstoreshiftRL {
+ break
+ }
+ d := v_2.AuxInt
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] {
+ break
+ }
+ x := v_2.Args[2]
+ if !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
+ // cond:
+ // result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+ for {
+ d := v.AuxInt
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWreg x)
+ // cond: x.Uses == 1
+ // result: (MOVWnop x)
+ for {
+ x := v.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARMMOVWnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (MOVWstore [off1] {sym1} (ADDconst [off2] {sym2} ptr) val mem)
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
+ if v_0.Op != OpARMMOVWaddr {
break
}
off2 := v_0.AuxInt
@@ -206,25 +6798,9415 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADD {
+ break
+ }
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWstoreidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
+ // cond: sym == nil && !config.nacl
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym == nil && !config.nacl) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
+ // cond:
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+ // cond:
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ idx := v_0.Args[0]
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+ // cond:
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ idx := v_0.Args[0]
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
+ // cond:
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ idx := v_1.Args[0]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
+ // cond:
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ idx := v_0.Args[0]
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+ // cond:
+ // result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+ for {
+ d := v.AuxInt
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+ // cond:
+ // result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
+ for {
+ d := v.AuxInt
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
+ // cond:
+ // result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+ for {
+ d := v.AuxInt
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MUL x (MOVWconst [c]))
+ // cond: int32(c) == -1
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL _ (MOVWconst [0]))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MUL x (MOVWconst [1]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (RSBshiftLL x x [log2(c+1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = log2(c + 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: int32(c) == -1
+ // result: (RSBconst [0] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVWconst [0]) _)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MUL (MOVWconst [1]) x)
+ // cond:
+ // result: x
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (RSBshiftLL x x [log2(c+1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = log2(c + 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c*d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c * d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: int32(c) == -1
+ // result: (SUB a x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULA _ (MOVWconst [0]) a)
+ // cond:
+ // result: a
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ a := v.Args[2]
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [1]) a)
+ // cond:
+ // result: (ADD x a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ a := v.Args[2]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 3)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 1
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 5)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 2
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 7)
+ v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ a := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 9)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: int32(c) == -1
+ // result: (SUB a x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULA (MOVWconst [0]) _ a)
+ // cond:
+ // result: a
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ a := v.Args[2]
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [1]) x a)
+ // cond:
+ // result: (ADD x a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v.Args[1]
+ a := v.Args[2]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 3)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 1
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 5)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 2
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 7)
+ v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ a := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = log2(c / 9)
+ v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+ // cond:
+ // result: (ADDconst [int64(int32(c*d))] a)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_1.AuxInt
+ a := v.Args[2]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c * d))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVN (MOVWconst [c]))
+ // cond:
+ // result: (MOVWconst [^c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^c
+ return true
+ }
+ // match: (MVN (SLLconst [c] x))
+ // cond:
+ // result: (MVNshiftLL x [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRLconst [c] x))
+ // cond:
+ // result: (MVNshiftRL x [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRAconst [c] x))
+ // cond:
+ // result: (MVNshiftRA x [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SLL x y))
+ // cond:
+ // result: (MVNshiftLLreg x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARMMVNshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN (SRL x y))
+ // cond:
+ // result: (MVNshiftRLreg x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARMMVNshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN (SRA x y))
+ // cond:
+ // result: (MVNshiftRAreg x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARMMVNshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftLL (MOVWconst [c]) [d])
+ // cond:
+ // result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^int64(uint32(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftLLreg x (MOVWconst [c]))
+ // cond:
+ // result: (MVNshiftLL x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRA (MOVWconst [c]) [d])
+ // cond:
+ // result: (MOVWconst [^int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^int64(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRAreg x (MOVWconst [c]))
+ // cond:
+ // result: (MVNshiftRA x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRL (MOVWconst [c]) [d])
+ // cond:
+ // result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = ^int64(uint32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVNshiftRLreg x (MOVWconst [c]))
+ // cond:
+ // result: (MVNshiftRL x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NotEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (NotEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // cond:
+ // result: (NotEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMNotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OR (MOVWconst [c]) x)
+ // cond:
+ // result: (ORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x (MOVWconst [c]))
+ // cond:
+ // result: (ORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x (SLLconst [c] y))
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SLLconst [c] y) x)
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SRLconst [c] y))
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SRLconst [c] y) x)
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SRAconst [c] y))
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SRAconst [c] y) x)
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SLL y z))
+ // cond:
+ // result: (ORshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SLL y z) x)
+ // cond:
+ // result: (ORshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x (SRL y z))
+ // cond:
+ // result: (ORshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SRL y z) x)
+ // cond:
+ // result: (ORshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x (SRA y z))
+ // cond:
+ // result: (ORshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SRA y z) x)
+ // cond:
+ // result: (ORshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [-1])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c|d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c | d
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond:
+ // result: (ORconst [c|d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMORconst)
+ v.AuxInt = c | d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL x y:(SLLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSLLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA x y:(SRAconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRAconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL x y:(SRLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSB (MOVWconst [c]) x)
+ // cond:
+ // result: (SUBconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (MOVWconst [c]))
+ // cond:
+ // result: (RSBconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (SLLconst [c] y))
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB (SLLconst [c] y) x)
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB x (SRLconst [c] y))
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB (SRLconst [c] y) x)
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB x (SRAconst [c] y))
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB (SRAconst [c] y) x)
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB x (SLL y z))
+ // cond:
+ // result: (RSBshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SLL y z) x)
+ // cond:
+ // result: (SUBshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x (SRL y z))
+ // cond:
+ // result: (RSBshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SRL y z) x)
+ // cond:
+ // result: (SUBshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x (SRA y z))
+ // cond:
+ // result: (RSBshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SRA y z) x)
+ // cond:
+ // result: (SUBshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c-d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c - d))
+ return true
+ }
+ // match: (RSBconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (RSCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (RSCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBC (MOVWconst [c]) x flags)
+ // cond:
+ // result: (RSCconst [c] x flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCconst [c] x flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SLLconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SLLconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRLconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRLconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRAconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRAconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SLL y z) flags)
+ // cond:
+ // result: (SBCshiftLLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SLL y z) x flags)
+ // cond:
+ // result: (RSCshiftLLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRL y z) flags)
+ // cond:
+ // result: (SBCshiftRLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRL y z) x flags)
+ // cond:
+ // result: (RSCshiftRLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRA y z) flags)
+ // cond:
+ // result: (SBCshiftRAreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRA y z) x flags)
+ // cond:
+ // result: (RSCshiftRAreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (SBCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (SBCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLL x (MOVWconst [c]))
+ // cond:
+ // result: (SLLconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(d) << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRA x (MOVWconst [c]))
+ // cond:
+ // result: (SRAconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAcond x _ (FlagEQ))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAcond x y (FlagLT_ULT))
+ // cond:
+ // result: (SRA x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAcond x _ (FlagLT_UGT))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAcond x y (FlagGT_ULT))
+ // cond:
+ // result: (SRA x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAcond x _ (FlagGT_UGT))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(d)>>uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(d) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRL x (MOVWconst [c]))
+ // cond:
+ // result: (SRLconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(d) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUB (MOVWconst [c]) x)
+ // cond:
+ // result: (RSBconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [c]))
+ // cond:
+ // result: (SUBconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (SLLconst [c] y))
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB (SLLconst [c] y) x)
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SRLconst [c] y))
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB (SRLconst [c] y) x)
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SRAconst [c] y))
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB (SRAconst [c] y) x)
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SLL y z))
+ // cond:
+ // result: (SUBshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SLL y z) x)
+ // cond:
+ // result: (RSBshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x (SRL y z))
+ // cond:
+ // result: (SUBshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SRL y z) x)
+ // cond:
+ // result: (RSBshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x (SRA y z))
+ // cond:
+ // result: (SUBshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SRA y z) x)
+ // cond:
+ // result: (RSBshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBS (MOVWconst [c]) x)
+ // cond:
+ // result: (RSBSconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (MOVWconst [c]))
+ // cond:
+ // result: (SUBSconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (SLLconst [c] y))
+ // cond:
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS (SLLconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS x (SRLconst [c] y))
+ // cond:
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS (SRLconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS x (SRAconst [c] y))
+ // cond:
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS (SRAconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS x (SLL y z))
+ // cond:
+ // result: (SUBSshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SLL y z) x)
+ // cond:
+ // result: (RSBSshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS x (SRL y z))
+ // cond:
+ // result: (SUBSshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SRL y z) x)
+ // cond:
+ // result: (RSBSshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS x (SRA y z))
+ // cond:
+ // result: (SUBSshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SRA y z) x)
+ // cond:
+ // result: (RSBSshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(d-c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(d - c))
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(-c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(-c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(-c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(-c + d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(-c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(-c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XOR (MOVWconst [c]) x)
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x (MOVWconst [c]))
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x (SLLconst [c] y))
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SLLconst [c] y) x)
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SRLconst [c] y))
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SRLconst [c] y) x)
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SRAconst [c] y))
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SRAconst [c] y) x)
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SLL y z))
+ // cond:
+ // result: (XORshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SLL y z) x)
+ // cond:
+ // result: (XORshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x (SRL y z))
+ // cond:
+ // result: (XORshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SRL y z) x)
+ // cond:
+ // result: (XORshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x (SRA y z))
+ // cond:
+ // result: (XORshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SRA y z) x)
+ // cond:
+ // result: (XORshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond:
+ // result: (XORconst [c^d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMXORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32carry x y)
+ // cond:
+ // result: (ADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32withcarry x y c)
+ // cond:
+ // result: (ADC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMADC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMCALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVFconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVWconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert x mem)
+ // cond:
+ // result: (MOVWconvert x mem)
+ for {
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVWconvert)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (MOVFW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32U x)
+ // cond:
+ // result: (MOVFWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (MOVFD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto32F x)
+ // cond:
+ // result: (MOVWUF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto64F x)
+ // cond:
+ // result: (MOVWUD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (MOVWF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (MOVWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (MOVDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (MOVDF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32U x)
+ // cond:
+ // result: (MOVDWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (Equal (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (Equal (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (GreaterEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpARMLoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (GreaterThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (GreaterThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMULU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMCALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (LessThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
+func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 15
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 16 - c&15
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 x [c])
+ // cond:
+ // result: (SRRconst x [32-c&31])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMSRRconst)
+ v.AuxInt = 32 - c&31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 7
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 8 - c&7
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MOD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (MODU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMOD)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMODU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 1
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 1
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 2
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AuxInt = 1
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 1
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARMDUFFCOPY)
+ v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
+ // result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredMove)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32uhilo x y)
+ // cond:
+ // result: (MULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMULD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (NEGF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMNEGF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (NEGD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMNEGD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (RSBconst [0] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (NotEqual (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (NotEqual (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMLoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (OffPtr [off] ptr:(SP))
+ // cond:
+ // result: (MOVWaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
// match: (OffPtr [off] ptr)
// cond:
- // result: (ADD (MOVWconst <config.Frontend().TypeInt32()> [off]) ptr)
+ // result: (ADDconst [off] ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
- v.reset(OpARMADD)
- v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.Frontend().TypeInt32())
- v0.AuxInt = off
- v.AddArg(v0)
+ v.reset(OpARMADDconst)
+ v.AuxInt = off
v.AddArg(ptr)
return true
}
}
+func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 x y)
+ // cond:
+ // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 x y)
+ // cond:
+ // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 x y)
+ // cond:
+ // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 x y)
+ // cond:
+ // result: (SRL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 x y)
+ // cond:
+ // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 x y)
+ // cond:
+ // result: (SRAcond x y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v.AddArg(x)
+ v.AddArg(y)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 256
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 x y)
+ // cond:
+ // result: (SRA x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v3.AuxInt = 256
+ v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 x y)
+ // cond:
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 x y)
+ // cond:
+ // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 x y)
+ // cond:
+ // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 x y)
+ // cond:
+ // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Signmask x)
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (SQRTD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSQRTD)
+ v.AddArg(x)
+ return true
+ }
+}
func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -245,8 +16227,40 @@ func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
func rewriteValueARM_OpStore(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Store [4] ptr val mem)
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
// cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMMOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
if v.AuxInt != 4 {
@@ -255,17 +16269,901 @@ func rewriteValueARM_OpStore(v *Value, config *Config) bool {
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
+ if !(!is32BitFloat(val.Type)) {
+ break
+ }
v.reset(OpARMMOVWstore)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
+func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (SUBF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32carry x y)
+ // cond:
+ // result: (SUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32withcarry x y c)
+ // cond:
+ // result: (SBC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMSBC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (SUBD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUBD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpXor32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpXor8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpZero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zero [s] _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 1
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = 2
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 3
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v5.AuxInt = 0
+ v5.AddArg(ptr)
+ v6 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v6.AuxInt = 0
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = 2
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+ v3.AuxInt = 0
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARMDUFFZERO)
+ v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/4))
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
+ // result: (LoweredZero [SizeAndAlign(s).Align()] ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) (MOVWconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredZero)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZeroExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVHUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpZeroExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpZeroExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zeromask x)
+ // cond:
+ // result: (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31])
+ for {
+ x := v.Args[0]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteBlockARM(b *Block) bool {
switch b.Kind {
+ case BlockARMEQ:
+ // match: (EQ (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMEQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMGE:
+ // match: (GE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMGT:
+ // match: (GT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMEQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMNotEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMNE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
// match: (If (LessThan cc) yes no)
// cond:
// result: (LT cc yes no)
@@ -283,6 +17181,1084 @@ func rewriteBlockARM(b *Block) bool {
_ = no
return true
}
+ // match: (If (LessThanU cc) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMLessThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMLessEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMLessEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMGreaterEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If cond yes no)
+ // cond:
+ // result: (NE (CMPconst [0] cond) yes no)
+ for {
+ v := b.Control
+ cond := b.Control
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMNE
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(cond)
+ b.SetControl(v0)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMLE:
+ // match: (LE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMLT:
+ // match: (LT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMNE:
+ // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMEQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMNotEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMNE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMLE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMLessEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMCMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMGreaterEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMNE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMUGE:
+ // match: (UGE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMUGT:
+ // match: (UGT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMULT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMULE:
+ // match: (ULE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARMULT:
+ // match: (ULT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARMFlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARMInvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARMUGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
}
return false
}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
new file mode 100644
index 0000000000..318718d652
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -0,0 +1,15213 @@
+// autogenerated from gen/ARM64.rules: do not edit!
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+
+var _ = math.MinInt8 // in case not otherwise used
+func rewriteValueARM64(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpARM64ADD:
+ return rewriteValueARM64_OpARM64ADD(v, config)
+ case OpARM64ADDconst:
+ return rewriteValueARM64_OpARM64ADDconst(v, config)
+ case OpARM64ADDshiftLL:
+ return rewriteValueARM64_OpARM64ADDshiftLL(v, config)
+ case OpARM64ADDshiftRA:
+ return rewriteValueARM64_OpARM64ADDshiftRA(v, config)
+ case OpARM64ADDshiftRL:
+ return rewriteValueARM64_OpARM64ADDshiftRL(v, config)
+ case OpARM64AND:
+ return rewriteValueARM64_OpARM64AND(v, config)
+ case OpARM64ANDconst:
+ return rewriteValueARM64_OpARM64ANDconst(v, config)
+ case OpARM64ANDshiftLL:
+ return rewriteValueARM64_OpARM64ANDshiftLL(v, config)
+ case OpARM64ANDshiftRA:
+ return rewriteValueARM64_OpARM64ANDshiftRA(v, config)
+ case OpARM64ANDshiftRL:
+ return rewriteValueARM64_OpARM64ANDshiftRL(v, config)
+ case OpARM64BIC:
+ return rewriteValueARM64_OpARM64BIC(v, config)
+ case OpARM64BICconst:
+ return rewriteValueARM64_OpARM64BICconst(v, config)
+ case OpARM64BICshiftLL:
+ return rewriteValueARM64_OpARM64BICshiftLL(v, config)
+ case OpARM64BICshiftRA:
+ return rewriteValueARM64_OpARM64BICshiftRA(v, config)
+ case OpARM64BICshiftRL:
+ return rewriteValueARM64_OpARM64BICshiftRL(v, config)
+ case OpARM64CMP:
+ return rewriteValueARM64_OpARM64CMP(v, config)
+ case OpARM64CMPW:
+ return rewriteValueARM64_OpARM64CMPW(v, config)
+ case OpARM64CMPWconst:
+ return rewriteValueARM64_OpARM64CMPWconst(v, config)
+ case OpARM64CMPconst:
+ return rewriteValueARM64_OpARM64CMPconst(v, config)
+ case OpARM64CMPshiftLL:
+ return rewriteValueARM64_OpARM64CMPshiftLL(v, config)
+ case OpARM64CMPshiftRA:
+ return rewriteValueARM64_OpARM64CMPshiftRA(v, config)
+ case OpARM64CMPshiftRL:
+ return rewriteValueARM64_OpARM64CMPshiftRL(v, config)
+ case OpARM64CSELULT:
+ return rewriteValueARM64_OpARM64CSELULT(v, config)
+ case OpARM64CSELULT0:
+ return rewriteValueARM64_OpARM64CSELULT0(v, config)
+ case OpARM64DIV:
+ return rewriteValueARM64_OpARM64DIV(v, config)
+ case OpARM64DIVW:
+ return rewriteValueARM64_OpARM64DIVW(v, config)
+ case OpARM64Equal:
+ return rewriteValueARM64_OpARM64Equal(v, config)
+ case OpARM64FMOVDload:
+ return rewriteValueARM64_OpARM64FMOVDload(v, config)
+ case OpARM64FMOVDstore:
+ return rewriteValueARM64_OpARM64FMOVDstore(v, config)
+ case OpARM64FMOVSload:
+ return rewriteValueARM64_OpARM64FMOVSload(v, config)
+ case OpARM64FMOVSstore:
+ return rewriteValueARM64_OpARM64FMOVSstore(v, config)
+ case OpARM64GreaterEqual:
+ return rewriteValueARM64_OpARM64GreaterEqual(v, config)
+ case OpARM64GreaterEqualU:
+ return rewriteValueARM64_OpARM64GreaterEqualU(v, config)
+ case OpARM64GreaterThan:
+ return rewriteValueARM64_OpARM64GreaterThan(v, config)
+ case OpARM64GreaterThanU:
+ return rewriteValueARM64_OpARM64GreaterThanU(v, config)
+ case OpARM64LessEqual:
+ return rewriteValueARM64_OpARM64LessEqual(v, config)
+ case OpARM64LessEqualU:
+ return rewriteValueARM64_OpARM64LessEqualU(v, config)
+ case OpARM64LessThan:
+ return rewriteValueARM64_OpARM64LessThan(v, config)
+ case OpARM64LessThanU:
+ return rewriteValueARM64_OpARM64LessThanU(v, config)
+ case OpARM64MOD:
+ return rewriteValueARM64_OpARM64MOD(v, config)
+ case OpARM64MODW:
+ return rewriteValueARM64_OpARM64MODW(v, config)
+ case OpARM64MOVBUload:
+ return rewriteValueARM64_OpARM64MOVBUload(v, config)
+ case OpARM64MOVBUreg:
+ return rewriteValueARM64_OpARM64MOVBUreg(v, config)
+ case OpARM64MOVBload:
+ return rewriteValueARM64_OpARM64MOVBload(v, config)
+ case OpARM64MOVBreg:
+ return rewriteValueARM64_OpARM64MOVBreg(v, config)
+ case OpARM64MOVBstore:
+ return rewriteValueARM64_OpARM64MOVBstore(v, config)
+ case OpARM64MOVBstorezero:
+ return rewriteValueARM64_OpARM64MOVBstorezero(v, config)
+ case OpARM64MOVDload:
+ return rewriteValueARM64_OpARM64MOVDload(v, config)
+ case OpARM64MOVDreg:
+ return rewriteValueARM64_OpARM64MOVDreg(v, config)
+ case OpARM64MOVDstore:
+ return rewriteValueARM64_OpARM64MOVDstore(v, config)
+ case OpARM64MOVDstorezero:
+ return rewriteValueARM64_OpARM64MOVDstorezero(v, config)
+ case OpARM64MOVHUload:
+ return rewriteValueARM64_OpARM64MOVHUload(v, config)
+ case OpARM64MOVHUreg:
+ return rewriteValueARM64_OpARM64MOVHUreg(v, config)
+ case OpARM64MOVHload:
+ return rewriteValueARM64_OpARM64MOVHload(v, config)
+ case OpARM64MOVHreg:
+ return rewriteValueARM64_OpARM64MOVHreg(v, config)
+ case OpARM64MOVHstore:
+ return rewriteValueARM64_OpARM64MOVHstore(v, config)
+ case OpARM64MOVHstorezero:
+ return rewriteValueARM64_OpARM64MOVHstorezero(v, config)
+ case OpARM64MOVWUload:
+ return rewriteValueARM64_OpARM64MOVWUload(v, config)
+ case OpARM64MOVWUreg:
+ return rewriteValueARM64_OpARM64MOVWUreg(v, config)
+ case OpARM64MOVWload:
+ return rewriteValueARM64_OpARM64MOVWload(v, config)
+ case OpARM64MOVWreg:
+ return rewriteValueARM64_OpARM64MOVWreg(v, config)
+ case OpARM64MOVWstore:
+ return rewriteValueARM64_OpARM64MOVWstore(v, config)
+ case OpARM64MOVWstorezero:
+ return rewriteValueARM64_OpARM64MOVWstorezero(v, config)
+ case OpARM64MUL:
+ return rewriteValueARM64_OpARM64MUL(v, config)
+ case OpARM64MULW:
+ return rewriteValueARM64_OpARM64MULW(v, config)
+ case OpARM64MVN:
+ return rewriteValueARM64_OpARM64MVN(v, config)
+ case OpARM64NEG:
+ return rewriteValueARM64_OpARM64NEG(v, config)
+ case OpARM64NotEqual:
+ return rewriteValueARM64_OpARM64NotEqual(v, config)
+ case OpARM64OR:
+ return rewriteValueARM64_OpARM64OR(v, config)
+ case OpARM64ORconst:
+ return rewriteValueARM64_OpARM64ORconst(v, config)
+ case OpARM64ORshiftLL:
+ return rewriteValueARM64_OpARM64ORshiftLL(v, config)
+ case OpARM64ORshiftRA:
+ return rewriteValueARM64_OpARM64ORshiftRA(v, config)
+ case OpARM64ORshiftRL:
+ return rewriteValueARM64_OpARM64ORshiftRL(v, config)
+ case OpARM64SLL:
+ return rewriteValueARM64_OpARM64SLL(v, config)
+ case OpARM64SLLconst:
+ return rewriteValueARM64_OpARM64SLLconst(v, config)
+ case OpARM64SRA:
+ return rewriteValueARM64_OpARM64SRA(v, config)
+ case OpARM64SRAconst:
+ return rewriteValueARM64_OpARM64SRAconst(v, config)
+ case OpARM64SRL:
+ return rewriteValueARM64_OpARM64SRL(v, config)
+ case OpARM64SRLconst:
+ return rewriteValueARM64_OpARM64SRLconst(v, config)
+ case OpARM64SUB:
+ return rewriteValueARM64_OpARM64SUB(v, config)
+ case OpARM64SUBconst:
+ return rewriteValueARM64_OpARM64SUBconst(v, config)
+ case OpARM64SUBshiftLL:
+ return rewriteValueARM64_OpARM64SUBshiftLL(v, config)
+ case OpARM64SUBshiftRA:
+ return rewriteValueARM64_OpARM64SUBshiftRA(v, config)
+ case OpARM64SUBshiftRL:
+ return rewriteValueARM64_OpARM64SUBshiftRL(v, config)
+ case OpARM64UDIV:
+ return rewriteValueARM64_OpARM64UDIV(v, config)
+ case OpARM64UDIVW:
+ return rewriteValueARM64_OpARM64UDIVW(v, config)
+ case OpARM64UMOD:
+ return rewriteValueARM64_OpARM64UMOD(v, config)
+ case OpARM64UMODW:
+ return rewriteValueARM64_OpARM64UMODW(v, config)
+ case OpARM64XOR:
+ return rewriteValueARM64_OpARM64XOR(v, config)
+ case OpARM64XORconst:
+ return rewriteValueARM64_OpARM64XORconst(v, config)
+ case OpARM64XORshiftLL:
+ return rewriteValueARM64_OpARM64XORshiftLL(v, config)
+ case OpARM64XORshiftRA:
+ return rewriteValueARM64_OpARM64XORshiftRA(v, config)
+ case OpARM64XORshiftRL:
+ return rewriteValueARM64_OpARM64XORshiftRL(v, config)
+ case OpAdd16:
+ return rewriteValueARM64_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValueARM64_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValueARM64_OpAdd32F(v, config)
+ case OpAdd64:
+ return rewriteValueARM64_OpAdd64(v, config)
+ case OpAdd64F:
+ return rewriteValueARM64_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValueARM64_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValueARM64_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValueARM64_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValueARM64_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValueARM64_OpAnd32(v, config)
+ case OpAnd64:
+ return rewriteValueARM64_OpAnd64(v, config)
+ case OpAnd8:
+ return rewriteValueARM64_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValueARM64_OpAndB(v, config)
+ case OpAvg64u:
+ return rewriteValueARM64_OpAvg64u(v, config)
+ case OpClosureCall:
+ return rewriteValueARM64_OpClosureCall(v, config)
+ case OpCom16:
+ return rewriteValueARM64_OpCom16(v, config)
+ case OpCom32:
+ return rewriteValueARM64_OpCom32(v, config)
+ case OpCom64:
+ return rewriteValueARM64_OpCom64(v, config)
+ case OpCom8:
+ return rewriteValueARM64_OpCom8(v, config)
+ case OpConst16:
+ return rewriteValueARM64_OpConst16(v, config)
+ case OpConst32:
+ return rewriteValueARM64_OpConst32(v, config)
+ case OpConst32F:
+ return rewriteValueARM64_OpConst32F(v, config)
+ case OpConst64:
+ return rewriteValueARM64_OpConst64(v, config)
+ case OpConst64F:
+ return rewriteValueARM64_OpConst64F(v, config)
+ case OpConst8:
+ return rewriteValueARM64_OpConst8(v, config)
+ case OpConstBool:
+ return rewriteValueARM64_OpConstBool(v, config)
+ case OpConstNil:
+ return rewriteValueARM64_OpConstNil(v, config)
+ case OpConvert:
+ return rewriteValueARM64_OpConvert(v, config)
+ case OpCvt32Fto32:
+ return rewriteValueARM64_OpCvt32Fto32(v, config)
+ case OpCvt32Fto32U:
+ return rewriteValueARM64_OpCvt32Fto32U(v, config)
+ case OpCvt32Fto64:
+ return rewriteValueARM64_OpCvt32Fto64(v, config)
+ case OpCvt32Fto64F:
+ return rewriteValueARM64_OpCvt32Fto64F(v, config)
+ case OpCvt32Uto32F:
+ return rewriteValueARM64_OpCvt32Uto32F(v, config)
+ case OpCvt32Uto64F:
+ return rewriteValueARM64_OpCvt32Uto64F(v, config)
+ case OpCvt32to32F:
+ return rewriteValueARM64_OpCvt32to32F(v, config)
+ case OpCvt32to64F:
+ return rewriteValueARM64_OpCvt32to64F(v, config)
+ case OpCvt64Fto32:
+ return rewriteValueARM64_OpCvt64Fto32(v, config)
+ case OpCvt64Fto32F:
+ return rewriteValueARM64_OpCvt64Fto32F(v, config)
+ case OpCvt64Fto32U:
+ return rewriteValueARM64_OpCvt64Fto32U(v, config)
+ case OpCvt64Fto64:
+ return rewriteValueARM64_OpCvt64Fto64(v, config)
+ case OpCvt64to32F:
+ return rewriteValueARM64_OpCvt64to32F(v, config)
+ case OpCvt64to64F:
+ return rewriteValueARM64_OpCvt64to64F(v, config)
+ case OpDeferCall:
+ return rewriteValueARM64_OpDeferCall(v, config)
+ case OpDiv16:
+ return rewriteValueARM64_OpDiv16(v, config)
+ case OpDiv16u:
+ return rewriteValueARM64_OpDiv16u(v, config)
+ case OpDiv32:
+ return rewriteValueARM64_OpDiv32(v, config)
+ case OpDiv32F:
+ return rewriteValueARM64_OpDiv32F(v, config)
+ case OpDiv32u:
+ return rewriteValueARM64_OpDiv32u(v, config)
+ case OpDiv64:
+ return rewriteValueARM64_OpDiv64(v, config)
+ case OpDiv64F:
+ return rewriteValueARM64_OpDiv64F(v, config)
+ case OpDiv64u:
+ return rewriteValueARM64_OpDiv64u(v, config)
+ case OpDiv8:
+ return rewriteValueARM64_OpDiv8(v, config)
+ case OpDiv8u:
+ return rewriteValueARM64_OpDiv8u(v, config)
+ case OpEq16:
+ return rewriteValueARM64_OpEq16(v, config)
+ case OpEq32:
+ return rewriteValueARM64_OpEq32(v, config)
+ case OpEq32F:
+ return rewriteValueARM64_OpEq32F(v, config)
+ case OpEq64:
+ return rewriteValueARM64_OpEq64(v, config)
+ case OpEq64F:
+ return rewriteValueARM64_OpEq64F(v, config)
+ case OpEq8:
+ return rewriteValueARM64_OpEq8(v, config)
+ case OpEqB:
+ return rewriteValueARM64_OpEqB(v, config)
+ case OpEqPtr:
+ return rewriteValueARM64_OpEqPtr(v, config)
+ case OpGeq16:
+ return rewriteValueARM64_OpGeq16(v, config)
+ case OpGeq16U:
+ return rewriteValueARM64_OpGeq16U(v, config)
+ case OpGeq32:
+ return rewriteValueARM64_OpGeq32(v, config)
+ case OpGeq32F:
+ return rewriteValueARM64_OpGeq32F(v, config)
+ case OpGeq32U:
+ return rewriteValueARM64_OpGeq32U(v, config)
+ case OpGeq64:
+ return rewriteValueARM64_OpGeq64(v, config)
+ case OpGeq64F:
+ return rewriteValueARM64_OpGeq64F(v, config)
+ case OpGeq64U:
+ return rewriteValueARM64_OpGeq64U(v, config)
+ case OpGeq8:
+ return rewriteValueARM64_OpGeq8(v, config)
+ case OpGeq8U:
+ return rewriteValueARM64_OpGeq8U(v, config)
+ case OpGetClosurePtr:
+ return rewriteValueARM64_OpGetClosurePtr(v, config)
+ case OpGoCall:
+ return rewriteValueARM64_OpGoCall(v, config)
+ case OpGreater16:
+ return rewriteValueARM64_OpGreater16(v, config)
+ case OpGreater16U:
+ return rewriteValueARM64_OpGreater16U(v, config)
+ case OpGreater32:
+ return rewriteValueARM64_OpGreater32(v, config)
+ case OpGreater32F:
+ return rewriteValueARM64_OpGreater32F(v, config)
+ case OpGreater32U:
+ return rewriteValueARM64_OpGreater32U(v, config)
+ case OpGreater64:
+ return rewriteValueARM64_OpGreater64(v, config)
+ case OpGreater64F:
+ return rewriteValueARM64_OpGreater64F(v, config)
+ case OpGreater64U:
+ return rewriteValueARM64_OpGreater64U(v, config)
+ case OpGreater8:
+ return rewriteValueARM64_OpGreater8(v, config)
+ case OpGreater8U:
+ return rewriteValueARM64_OpGreater8U(v, config)
+ case OpHmul16:
+ return rewriteValueARM64_OpHmul16(v, config)
+ case OpHmul16u:
+ return rewriteValueARM64_OpHmul16u(v, config)
+ case OpHmul32:
+ return rewriteValueARM64_OpHmul32(v, config)
+ case OpHmul32u:
+ return rewriteValueARM64_OpHmul32u(v, config)
+ case OpHmul64:
+ return rewriteValueARM64_OpHmul64(v, config)
+ case OpHmul64u:
+ return rewriteValueARM64_OpHmul64u(v, config)
+ case OpHmul8:
+ return rewriteValueARM64_OpHmul8(v, config)
+ case OpHmul8u:
+ return rewriteValueARM64_OpHmul8u(v, config)
+ case OpInterCall:
+ return rewriteValueARM64_OpInterCall(v, config)
+ case OpIsInBounds:
+ return rewriteValueARM64_OpIsInBounds(v, config)
+ case OpIsNonNil:
+ return rewriteValueARM64_OpIsNonNil(v, config)
+ case OpIsSliceInBounds:
+ return rewriteValueARM64_OpIsSliceInBounds(v, config)
+ case OpLeq16:
+ return rewriteValueARM64_OpLeq16(v, config)
+ case OpLeq16U:
+ return rewriteValueARM64_OpLeq16U(v, config)
+ case OpLeq32:
+ return rewriteValueARM64_OpLeq32(v, config)
+ case OpLeq32F:
+ return rewriteValueARM64_OpLeq32F(v, config)
+ case OpLeq32U:
+ return rewriteValueARM64_OpLeq32U(v, config)
+ case OpLeq64:
+ return rewriteValueARM64_OpLeq64(v, config)
+ case OpLeq64F:
+ return rewriteValueARM64_OpLeq64F(v, config)
+ case OpLeq64U:
+ return rewriteValueARM64_OpLeq64U(v, config)
+ case OpLeq8:
+ return rewriteValueARM64_OpLeq8(v, config)
+ case OpLeq8U:
+ return rewriteValueARM64_OpLeq8U(v, config)
+ case OpLess16:
+ return rewriteValueARM64_OpLess16(v, config)
+ case OpLess16U:
+ return rewriteValueARM64_OpLess16U(v, config)
+ case OpLess32:
+ return rewriteValueARM64_OpLess32(v, config)
+ case OpLess32F:
+ return rewriteValueARM64_OpLess32F(v, config)
+ case OpLess32U:
+ return rewriteValueARM64_OpLess32U(v, config)
+ case OpLess64:
+ return rewriteValueARM64_OpLess64(v, config)
+ case OpLess64F:
+ return rewriteValueARM64_OpLess64F(v, config)
+ case OpLess64U:
+ return rewriteValueARM64_OpLess64U(v, config)
+ case OpLess8:
+ return rewriteValueARM64_OpLess8(v, config)
+ case OpLess8U:
+ return rewriteValueARM64_OpLess8U(v, config)
+ case OpLoad:
+ return rewriteValueARM64_OpLoad(v, config)
+ case OpLrot16:
+ return rewriteValueARM64_OpLrot16(v, config)
+ case OpLrot32:
+ return rewriteValueARM64_OpLrot32(v, config)
+ case OpLrot64:
+ return rewriteValueARM64_OpLrot64(v, config)
+ case OpLrot8:
+ return rewriteValueARM64_OpLrot8(v, config)
+ case OpLsh16x16:
+ return rewriteValueARM64_OpLsh16x16(v, config)
+ case OpLsh16x32:
+ return rewriteValueARM64_OpLsh16x32(v, config)
+ case OpLsh16x64:
+ return rewriteValueARM64_OpLsh16x64(v, config)
+ case OpLsh16x8:
+ return rewriteValueARM64_OpLsh16x8(v, config)
+ case OpLsh32x16:
+ return rewriteValueARM64_OpLsh32x16(v, config)
+ case OpLsh32x32:
+ return rewriteValueARM64_OpLsh32x32(v, config)
+ case OpLsh32x64:
+ return rewriteValueARM64_OpLsh32x64(v, config)
+ case OpLsh32x8:
+ return rewriteValueARM64_OpLsh32x8(v, config)
+ case OpLsh64x16:
+ return rewriteValueARM64_OpLsh64x16(v, config)
+ case OpLsh64x32:
+ return rewriteValueARM64_OpLsh64x32(v, config)
+ case OpLsh64x64:
+ return rewriteValueARM64_OpLsh64x64(v, config)
+ case OpLsh64x8:
+ return rewriteValueARM64_OpLsh64x8(v, config)
+ case OpLsh8x16:
+ return rewriteValueARM64_OpLsh8x16(v, config)
+ case OpLsh8x32:
+ return rewriteValueARM64_OpLsh8x32(v, config)
+ case OpLsh8x64:
+ return rewriteValueARM64_OpLsh8x64(v, config)
+ case OpLsh8x8:
+ return rewriteValueARM64_OpLsh8x8(v, config)
+ case OpMod16:
+ return rewriteValueARM64_OpMod16(v, config)
+ case OpMod16u:
+ return rewriteValueARM64_OpMod16u(v, config)
+ case OpMod32:
+ return rewriteValueARM64_OpMod32(v, config)
+ case OpMod32u:
+ return rewriteValueARM64_OpMod32u(v, config)
+ case OpMod64:
+ return rewriteValueARM64_OpMod64(v, config)
+ case OpMod64u:
+ return rewriteValueARM64_OpMod64u(v, config)
+ case OpMod8:
+ return rewriteValueARM64_OpMod8(v, config)
+ case OpMod8u:
+ return rewriteValueARM64_OpMod8u(v, config)
+ case OpMove:
+ return rewriteValueARM64_OpMove(v, config)
+ case OpMul16:
+ return rewriteValueARM64_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValueARM64_OpMul32(v, config)
+ case OpMul32F:
+ return rewriteValueARM64_OpMul32F(v, config)
+ case OpMul64:
+ return rewriteValueARM64_OpMul64(v, config)
+ case OpMul64F:
+ return rewriteValueARM64_OpMul64F(v, config)
+ case OpMul8:
+ return rewriteValueARM64_OpMul8(v, config)
+ case OpNeg16:
+ return rewriteValueARM64_OpNeg16(v, config)
+ case OpNeg32:
+ return rewriteValueARM64_OpNeg32(v, config)
+ case OpNeg32F:
+ return rewriteValueARM64_OpNeg32F(v, config)
+ case OpNeg64:
+ return rewriteValueARM64_OpNeg64(v, config)
+ case OpNeg64F:
+ return rewriteValueARM64_OpNeg64F(v, config)
+ case OpNeg8:
+ return rewriteValueARM64_OpNeg8(v, config)
+ case OpNeq16:
+ return rewriteValueARM64_OpNeq16(v, config)
+ case OpNeq32:
+ return rewriteValueARM64_OpNeq32(v, config)
+ case OpNeq32F:
+ return rewriteValueARM64_OpNeq32F(v, config)
+ case OpNeq64:
+ return rewriteValueARM64_OpNeq64(v, config)
+ case OpNeq64F:
+ return rewriteValueARM64_OpNeq64F(v, config)
+ case OpNeq8:
+ return rewriteValueARM64_OpNeq8(v, config)
+ case OpNeqB:
+ return rewriteValueARM64_OpNeqB(v, config)
+ case OpNeqPtr:
+ return rewriteValueARM64_OpNeqPtr(v, config)
+ case OpNilCheck:
+ return rewriteValueARM64_OpNilCheck(v, config)
+ case OpNot:
+ return rewriteValueARM64_OpNot(v, config)
+ case OpOffPtr:
+ return rewriteValueARM64_OpOffPtr(v, config)
+ case OpOr16:
+ return rewriteValueARM64_OpOr16(v, config)
+ case OpOr32:
+ return rewriteValueARM64_OpOr32(v, config)
+ case OpOr64:
+ return rewriteValueARM64_OpOr64(v, config)
+ case OpOr8:
+ return rewriteValueARM64_OpOr8(v, config)
+ case OpOrB:
+ return rewriteValueARM64_OpOrB(v, config)
+ case OpRsh16Ux16:
+ return rewriteValueARM64_OpRsh16Ux16(v, config)
+ case OpRsh16Ux32:
+ return rewriteValueARM64_OpRsh16Ux32(v, config)
+ case OpRsh16Ux64:
+ return rewriteValueARM64_OpRsh16Ux64(v, config)
+ case OpRsh16Ux8:
+ return rewriteValueARM64_OpRsh16Ux8(v, config)
+ case OpRsh16x16:
+ return rewriteValueARM64_OpRsh16x16(v, config)
+ case OpRsh16x32:
+ return rewriteValueARM64_OpRsh16x32(v, config)
+ case OpRsh16x64:
+ return rewriteValueARM64_OpRsh16x64(v, config)
+ case OpRsh16x8:
+ return rewriteValueARM64_OpRsh16x8(v, config)
+ case OpRsh32Ux16:
+ return rewriteValueARM64_OpRsh32Ux16(v, config)
+ case OpRsh32Ux32:
+ return rewriteValueARM64_OpRsh32Ux32(v, config)
+ case OpRsh32Ux64:
+ return rewriteValueARM64_OpRsh32Ux64(v, config)
+ case OpRsh32Ux8:
+ return rewriteValueARM64_OpRsh32Ux8(v, config)
+ case OpRsh32x16:
+ return rewriteValueARM64_OpRsh32x16(v, config)
+ case OpRsh32x32:
+ return rewriteValueARM64_OpRsh32x32(v, config)
+ case OpRsh32x64:
+ return rewriteValueARM64_OpRsh32x64(v, config)
+ case OpRsh32x8:
+ return rewriteValueARM64_OpRsh32x8(v, config)
+ case OpRsh64Ux16:
+ return rewriteValueARM64_OpRsh64Ux16(v, config)
+ case OpRsh64Ux32:
+ return rewriteValueARM64_OpRsh64Ux32(v, config)
+ case OpRsh64Ux64:
+ return rewriteValueARM64_OpRsh64Ux64(v, config)
+ case OpRsh64Ux8:
+ return rewriteValueARM64_OpRsh64Ux8(v, config)
+ case OpRsh64x16:
+ return rewriteValueARM64_OpRsh64x16(v, config)
+ case OpRsh64x32:
+ return rewriteValueARM64_OpRsh64x32(v, config)
+ case OpRsh64x64:
+ return rewriteValueARM64_OpRsh64x64(v, config)
+ case OpRsh64x8:
+ return rewriteValueARM64_OpRsh64x8(v, config)
+ case OpRsh8Ux16:
+ return rewriteValueARM64_OpRsh8Ux16(v, config)
+ case OpRsh8Ux32:
+ return rewriteValueARM64_OpRsh8Ux32(v, config)
+ case OpRsh8Ux64:
+ return rewriteValueARM64_OpRsh8Ux64(v, config)
+ case OpRsh8Ux8:
+ return rewriteValueARM64_OpRsh8Ux8(v, config)
+ case OpRsh8x16:
+ return rewriteValueARM64_OpRsh8x16(v, config)
+ case OpRsh8x32:
+ return rewriteValueARM64_OpRsh8x32(v, config)
+ case OpRsh8x64:
+ return rewriteValueARM64_OpRsh8x64(v, config)
+ case OpRsh8x8:
+ return rewriteValueARM64_OpRsh8x8(v, config)
+ case OpSignExt16to32:
+ return rewriteValueARM64_OpSignExt16to32(v, config)
+ case OpSignExt16to64:
+ return rewriteValueARM64_OpSignExt16to64(v, config)
+ case OpSignExt32to64:
+ return rewriteValueARM64_OpSignExt32to64(v, config)
+ case OpSignExt8to16:
+ return rewriteValueARM64_OpSignExt8to16(v, config)
+ case OpSignExt8to32:
+ return rewriteValueARM64_OpSignExt8to32(v, config)
+ case OpSignExt8to64:
+ return rewriteValueARM64_OpSignExt8to64(v, config)
+ case OpSqrt:
+ return rewriteValueARM64_OpSqrt(v, config)
+ case OpStaticCall:
+ return rewriteValueARM64_OpStaticCall(v, config)
+ case OpStore:
+ return rewriteValueARM64_OpStore(v, config)
+ case OpSub16:
+ return rewriteValueARM64_OpSub16(v, config)
+ case OpSub32:
+ return rewriteValueARM64_OpSub32(v, config)
+ case OpSub32F:
+ return rewriteValueARM64_OpSub32F(v, config)
+ case OpSub64:
+ return rewriteValueARM64_OpSub64(v, config)
+ case OpSub64F:
+ return rewriteValueARM64_OpSub64F(v, config)
+ case OpSub8:
+ return rewriteValueARM64_OpSub8(v, config)
+ case OpSubPtr:
+ return rewriteValueARM64_OpSubPtr(v, config)
+ case OpTrunc16to8:
+ return rewriteValueARM64_OpTrunc16to8(v, config)
+ case OpTrunc32to16:
+ return rewriteValueARM64_OpTrunc32to16(v, config)
+ case OpTrunc32to8:
+ return rewriteValueARM64_OpTrunc32to8(v, config)
+ case OpTrunc64to16:
+ return rewriteValueARM64_OpTrunc64to16(v, config)
+ case OpTrunc64to32:
+ return rewriteValueARM64_OpTrunc64to32(v, config)
+ case OpTrunc64to8:
+ return rewriteValueARM64_OpTrunc64to8(v, config)
+ case OpXor16:
+ return rewriteValueARM64_OpXor16(v, config)
+ case OpXor32:
+ return rewriteValueARM64_OpXor32(v, config)
+ case OpXor64:
+ return rewriteValueARM64_OpXor64(v, config)
+ case OpXor8:
+ return rewriteValueARM64_OpXor8(v, config)
+ case OpZero:
+ return rewriteValueARM64_OpZero(v, config)
+ case OpZeroExt16to32:
+ return rewriteValueARM64_OpZeroExt16to32(v, config)
+ case OpZeroExt16to64:
+ return rewriteValueARM64_OpZeroExt16to64(v, config)
+ case OpZeroExt32to64:
+ return rewriteValueARM64_OpZeroExt32to64(v, config)
+ case OpZeroExt8to16:
+ return rewriteValueARM64_OpZeroExt8to16(v, config)
+ case OpZeroExt8to32:
+ return rewriteValueARM64_OpZeroExt8to32(v, config)
+ case OpZeroExt8to64:
+ return rewriteValueARM64_OpZeroExt8to64(v, config)
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADD (MOVDconst [c]) x)
+ // cond:
+ // result: (ADDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADD x (MOVDconst [c]))
+ // cond:
+ // result: (ADDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADD x (NEG y))
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64NEG {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (NEG y) x)
+ // cond:
+ // result: (SUB x y)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64NEG {
+ break
+ }
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD x (SLLconst [c] y))
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (SLLconst [c] y) x)
+ // cond:
+ // result: (ADDshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD x (SRLconst [c] y))
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ADDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (SRLconst [c] y) x)
+ // cond:
+ // result: (ADDshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ADDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD x (SRAconst [c] y))
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ADDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (SRAconst [c] y) x)
+ // cond:
+ // result: (ADDshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ADDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
+ // cond:
+ // result: (MOVDaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym := v_0.Aux
+ ptr := v_0.Args[0]
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c+d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = c + d
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (ADDconst [c+d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c + d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (ADDconst [c-d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c - d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(int64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64(int64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ADDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64AND(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AND (MOVDconst [c]) x)
+ // cond:
+ // result: (ANDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond:
+ // result: (ANDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // cond:
+ // result: (BIC x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MVN {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64BIC)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (SLLconst [c] y))
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ANDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND (SLLconst [c] y) x)
+ // cond:
+ // result: (ANDshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ANDshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (SRLconst [c] y))
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ANDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND (SRLconst [c] y) x)
+ // cond:
+ // result: (ANDshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ANDshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND x (SRAconst [c] y))
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ANDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND (SRAconst [c] y) x)
+ // cond:
+ // result: (ANDshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ANDshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDconst [0] _)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c&d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = c & d
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // cond:
+ // result: (ANDconst [c&d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c & d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARM64SLLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(int64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64(int64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARM64SRAconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ANDshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ANDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARM64SRLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BIC(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BIC x (MOVDconst [c]))
+ // cond:
+ // result: (BICconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64BICconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x x)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (BIC x (SLLconst [c] y))
+ // cond:
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64BICshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (BIC x (SRLconst [c] y))
+ // cond:
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64BICshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (BIC x (SRAconst [c] y))
+ // cond:
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64BICshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [-1] _)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (BICconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [d&^c])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = d &^ c
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64BICconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(int64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64BICconst)
+ v.AuxInt = int64(int64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (BICshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (BICconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64BICconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMP(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMP x (MOVDconst [c]))
+ // cond:
+ // result: (CMPconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLLconst [c] y))
+ // cond:
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64CMPshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMP (SLLconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftLL x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPshiftLL, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRLconst [c] y))
+ // cond:
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64CMPshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMP (SRLconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRL x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPshiftRL, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRAconst [c] y))
+ // cond:
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64CMPshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMP (SRAconst [c] y) x)
+ // cond:
+ // result: (InvertFlags (CMPshiftRA x y [c]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPshiftRA, TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPW x (MOVDconst [c]))
+ // cond:
+ // result: (CMPWconst [int64(int32(c))] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMPWconst)
+ v.AuxInt = int64(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // cond:
+ // result: (InvertFlags (CMPWconst [int64(int32(c))] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPWconst, TypeFlags)
+ v0.AuxInt = int64(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpARM64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpARM64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpARM64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpARM64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVBUreg _) [c])
+ // cond: 0xff < int32(c)
+ // result: (FlagLT_ULT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ if !(0xff < int32(c)) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVHUreg _) [c])
+ // cond: 0xffff < int32(c)
+ // result: (FlagLT_ULT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ if !(0xffff < int32(c)) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(x == y) {
+ break
+ }
+ v.reset(OpARM64FlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: int64(x)<int64(y) && uint64(x)<uint64(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) < int64(y) && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: int64(x)<int64(y) && uint64(x)>uint64(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) < int64(y) && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpARM64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: int64(x)>int64(y) && uint64(x)<uint64(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) > int64(y) && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpARM64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: int64(x)>int64(y) && uint64(x)>uint64(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) > int64(y) && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpARM64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT_ULT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ if !(0xff < c) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT_ULT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ if !(0xffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (MOVWUreg _) [c])
+ // cond: 0xffffffff < c
+ // result: (FlagLT_ULT)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if !(0xffffffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ m := v_0.AuxInt
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ if !(0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpARM64FlagLT_ULT)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
+ v1.AuxInt = d
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
+ v1.AuxInt = d
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(int64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64(int64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = c
+ v1 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
+ v1.AuxInt = d
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMPconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSELULT(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CSELULT x (MOVDconst [0]) flag)
+ // cond:
+ // result: (CSELULT0 x flag)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ flag := v.Args[2]
+ v.reset(OpARM64CSELULT0)
+ v.AddArg(x)
+ v.AddArg(flag)
+ return true
+ }
+ // match: (CSELULT _ y (FlagEQ))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CSELULT x _ (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CSELULT _ y (FlagLT_UGT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (CSELULT x _ (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CSELULT _ y (FlagGT_UGT))
+ // cond:
+ // result: y
+ for {
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSELULT0(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CSELULT0 _ (FlagEQ))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (CSELULT0 x (FlagLT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CSELULT0 _ (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (CSELULT0 x (FlagGT_ULT))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (CSELULT0 _ (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIV(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(c)/int64(d)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(c) / int64(d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIVW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c)/int32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(int32(c) / int32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64Equal(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Equal (FlagEQ))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (Equal (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // cond:
+ // result: (Equal x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64Equal)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FMOVDstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FMOVSstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterEqual (FlagEQ))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // cond:
+ // result: (LessEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterEqualU (FlagEQ))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqualU (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqualU (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqualU (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqualU (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // cond:
+ // result: (LessEqualU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterThan (FlagEQ))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThan (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // cond:
+ // result: (LessThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThanU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterThanU (FlagEQ))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThanU (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThanU (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // cond:
+ // result: (LessThanU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessEqual (FlagEQ))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // cond:
+ // result: (GreaterEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqualU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessEqualU (FlagEQ))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqualU (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqualU (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqualU (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqualU (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // cond:
+ // result: (GreaterEqualU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessThan (FlagEQ))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThan (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThan (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // cond:
+ // result: (GreaterThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanU(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessThanU (FlagEQ))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThanU (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThanU (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThanU (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThanU (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // cond:
+ // result: (GreaterThanU x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(c)%int64(d)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(c) % int64(d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MODW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c)%int32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(int32(c) % int32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVBstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint8(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVBstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBreg x:(MOVBload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(int8(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDstorezero {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = c
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ mem := v.Args[2]
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVDstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint16(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHreg x:(MOVBload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(int16(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // cond:
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVWUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVWUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWstore {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWreg x:(MOVBload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVWload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // cond:
+ // result: (MOVDreg x)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64MOVWreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(int32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // cond:
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // cond:
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MUL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MUL x (MOVDconst [-1]))
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != -1 {
+ break
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL _ (MOVDconst [0]))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MUL x (MOVDconst [1]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c-1) && c >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c+1) && c >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c + 1)
+ v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVDconst [-1]) x)
+ // cond:
+ // result: (NEG x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVDconst [0]) _)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MUL (MOVDconst [1]) x)
+ // cond:
+ // result: x
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && c >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && c >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c + 1)
+ v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MUL (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c*d])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = c * d
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MULW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c + 1)
+ v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (NEG x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: x
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (SLLconst [log2(c)] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log2(c-1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c + 1)
+ v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 3)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 5)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 7)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = log2(c / 9)
+ v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULW (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c)*int32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(int32(c) * int32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVN(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MVN (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [^c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = ^c
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NEG (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [-c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -c
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NotEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NotEqual (FlagEQ))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagEQ {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (NotEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVDconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // cond:
+ // result: (NotEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64OR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OR (MOVDconst [c]) x)
+ // cond:
+ // result: (ORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x (MOVDconst [c]))
+ // cond:
+ // result: (ORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x (SLLconst [c] y))
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SLLconst [c] y) x)
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SRLconst [c] y))
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SRLconst [c] y) x)
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SRAconst [c] y))
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64ORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SRAconst [c] y) x)
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64ORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // cond:
+ // result: (MOVDconst [-1])
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c|d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = c | d
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond:
+ // result: (ORconst [c|d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = c | d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL x y:(SLLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARM64SLLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(int64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64(int64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA x y:(SRAconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARM64SRAconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL x y:(SRLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARM64SRLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLL x (MOVDconst [c]))
+ // cond:
+ // result: (SLLconst x [c&63])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLLconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(d)<<uint64(c)])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(d) << uint64(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRA x (MOVDconst [c]))
+ // cond:
+ // result: (SRAconst x [c&63])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRAconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(d)>>uint64(c)])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(d) >> uint64(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRL x (MOVDconst [c]))
+ // cond:
+ // result: (SRLconst x [c&63])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRLconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint64(d) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUB x (MOVDconst [c]))
+ // cond:
+ // result: (SUBconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x x)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SUB x (SLLconst [c] y))
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SRLconst [c] y))
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64SUBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SRAconst [c] y))
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64SUBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [d-c])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = d - c
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = -c - d
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = -c + d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(int64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64(int64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIV(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (UDIV x (MOVDconst [1]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIV x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SRLconst [log2(c)] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(uint64(c)/uint64(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint64(c) / uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIVW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c) && is32Bit(c)
+ // result: (SRLconst [log2(c)] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(uint32(c)/uint32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint32(c) / uint32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMOD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (UMOD _ (MOVDconst [1]))
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (UMOD x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c - 1
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(uint64(c)%uint64(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint64(c) % uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMODW(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (UMODW _ (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (UMODW x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c) && is32Bit(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isPowerOfTwo(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = c - 1
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [int64(uint32(c)%uint32(d))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_1.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64(uint32(c) % uint32(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XOR (MOVDconst [c]) x)
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x (MOVDconst [c]))
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64XORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x x)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (XOR x (SLLconst [c] y))
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64XORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SLLconst [c] y) x)
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64XORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SRLconst [c] y))
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64XORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SRLconst [c] y) x)
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64XORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SRAconst [c] y))
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARM64XORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SRAconst [c] y) x)
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARM64XORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // cond:
+ // result: (MVN x)
+ for {
+ if v.AuxInt != -1 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // cond:
+ // result: (MOVDconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond:
+ // result: (XORconst [c^d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64XORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(int64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64(int64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVDconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (FADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (FADDD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FADDD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Avg64u <t> x y)
+ // cond:
+ // result: (ADD (ADD <t> (SRLconst <t> x [1]) (SRLconst <t> y [1])) (AND <t> (AND <t> x y) (MOVDconst [1])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Line, OpARM64ADD, t)
+ v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v1.AuxInt = 1
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v2.AuxInt = 1
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpARM64AND, t)
+ v4 := b.NewValue0(v.Line, OpARM64AND, t)
+ v4.AddArg(x)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v5.AuxInt = 1
+ v3.AddArg(v5)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (FMOVSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64FMOVSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (FMOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64FMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVDconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert x mem)
+ // cond:
+ // result: (MOVDconvert x mem)
+ for {
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVDconvert)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (FCVTZSSW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSSW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32U x)
+ // cond:
+ // result: (FCVTZUSW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZUSW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64 x)
+ // cond:
+ // result: (FCVTZSS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (FCVTSD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTSD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto32F x)
+ // cond:
+ // result: (UCVTFWS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64UCVTFWS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto64F x)
+ // cond:
+ // result: (UCVTFWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64UCVTFWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (SCVTFWS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFWS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (SCVTFWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (FCVTZSDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (FCVTDS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTDS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32U x)
+ // cond:
+ // result: (FCVTZUDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZUDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto64 x)
+ // cond:
+ // result: (FCVTZSD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to32F x)
+ // cond:
+ // result: (SCVTFS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to64F x)
+ // cond:
+ // result: (SCVTFD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARM64CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (FDIVS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FDIVS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (UDIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64 x y)
+ // cond:
+ // result: (DIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (FDIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64u x y)
+ // cond:
+ // result: (UDIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (Equal (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (Equal (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (Equal (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64XOR, config.fe.TypeBool())
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (GreaterEqualU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (GreaterEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpARM64LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueARM64_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARM64CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (GreaterThan (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (GreaterThan (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (GreaterThanU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (GreaterThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (GreaterThan (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (GreaterThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRAconst (MULW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64 x y)
+ // cond:
+ // result: (MULH x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MULH)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64u x y)
+ // cond:
+ // result: (UMULH x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMULH)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRAconst (MULW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqualU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (GreaterThan (FCMPS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (LessThanU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (LessThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (GreaterThan (FCMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARM64OR)
+ v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
+ v0.AuxInt = c & 15
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v1.AuxInt = 16 - c&15
+ v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 x [c])
+ // cond:
+ // result: (RORWconst x [32-c&31])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = 32 - c&31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpLrot64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot64 x [c])
+ // cond:
+ // result: (RORconst x [64-c&63])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARM64RORconst)
+ v.AuxInt = 64 - c&63
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> (ZeroExt8to64 x) [8-c&7]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARM64OR)
+ v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
+ v0.AuxInt = c & 7
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v1.AuxInt = 8 - c&7
+ v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh16x64 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpConst64, t)
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh32x64 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpConst64, t)
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x16 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x32 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh64x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh64x64 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpConst64, t)
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x8 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh8x64 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpConst64, t)
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MODW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MODW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MODW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (UMODW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMODW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64 x y)
+ // cond:
+ // result: (MOD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MOD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64u x y)
+ // cond:
+ // result: (UMOD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMOD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MODW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = 1
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 1
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore dst (MOVWUload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 2
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v4.AuxInt = 1
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHUload [6] src mem) (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = 6
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v0.AuxInt = 6
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v1.AuxInt = 4
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v2.AuxInt = 4
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v3.AuxInt = 2
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v4.AuxInt = 2
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v2.AuxInt = 1
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v2.AuxInt = 2
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVWstore [4] dst (MOVWUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = 8
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v0.AuxInt = 8
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+ v1.AuxInt = 4
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v2.AuxInt = 4
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = 8
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+ v0.AuxInt = 8
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = 16
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+ v0.AuxInt = 16
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+ v1.AuxInt = 8
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+ v2.AuxInt = 8
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0
+ // result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0) {
+ break
+ }
+ v.reset(OpARM64LoweredMove)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpARM64ADDconst, src.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (FMULS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FMULS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (FMULD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FMULD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (FNEGS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FNEGS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (FNEGD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FNEGD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (NotEqual (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (NotEqual (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XOR (MOVDconst [1]) x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr:(SP))
+ // cond:
+ // result: (MOVDaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (ZeroExt16to64 x) [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh16Ux64 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 x y)
+ // cond:
+ // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 x y)
+ // cond:
+ // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SignExt16to64 x) [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SignExt16to64 x) [63])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond:
+ // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v2.AuxInt = 63
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 x y)
+ // cond:
+ // result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst (ZeroExt32to64 x) [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh32Ux64 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 x y)
+ // cond:
+ // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 x y)
+ // cond:
+ // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst (SignExt32to64 x) [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst (SignExt32to64 x) [63])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond:
+ // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v2.AuxInt = 63
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 x y)
+ // cond:
+ // result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux16 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux32 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh64Ux64 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpConst64, t)
+ v1.AuxInt = 0
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux8 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x16 x y)
+ // cond:
+ // result: (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v2.AuxInt = 63
+ v0.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v0.AddArg(v3)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x32 x y)
+ // cond:
+ // result: (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v2.AuxInt = 63
+ v0.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v0.AddArg(v3)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRAconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (SRAconst x [63])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond:
+ // result: (SRA x (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v1.AuxInt = 63
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x8 x y)
+ // cond:
+ // result: (SRA x (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v2.AuxInt = 63
+ v0.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v0.AddArg(v3)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (ZeroExt8to64 x) [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (MOVDconst [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh8Ux64 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpConst64, t)
+ v2.AuxInt = 0
+ v.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64CSELULT)
+ v0 := b.NewValue0(v.Line, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpConst64, t)
+ v3.AuxInt = 0
+ v.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v.AddArg(v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 x y)
+ // cond:
+ // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 x y)
+ // cond:
+ // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SignExt8to64 x) [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SignExt8to64 x) [63])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond:
+ // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v2.AuxInt = 63
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 x y)
+ // cond:
+ // result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpConst64, y.Type)
+ v3.AuxInt = 63
+ v1.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v4.AuxInt = 64
+ v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v1.AddArg(v4)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to64 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt32to64 x)
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to64 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (FSQRTD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FSQRTD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpARM64CALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(!is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(!is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (FSUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (FSUBD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FSUBD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc64to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc64to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to32 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc64to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor64 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpZero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zero [s] _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore ptr (MOVDconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore ptr (MOVDconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = 1
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore ptr (MOVDconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = 2
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] ptr (MOVDconst [0]) (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = 3
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v5.AddArg(ptr)
+ v6 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v6.AuxInt = 0
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstore ptr (MOVDconst [0]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = 6
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v1.AuxInt = 4
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v3.AuxInt = 2
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v5.AddArg(ptr)
+ v6 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v6.AuxInt = 0
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem)))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVBstore, TypeMem)
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = 4
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = 8
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+ v1.AuxInt = 4
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = 8
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = 16
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+ v1.AuxInt = 8
+ v1.AddArg(ptr)
+ v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2.AuxInt = 0
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
+ v3.AddArg(ptr)
+ v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4.AuxInt = 0
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARM64DUFFZERO)
+ v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/8))
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
+ // result: (LoweredZero [SizeAndAlign(s).Align()] ptr (ADDconst <ptr.Type> [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)] ptr) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !((SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
+ break
+ }
+ v.reset(OpARM64LoweredZero)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpARM64ADDconst, ptr.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpZeroExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVHUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to64 x)
+ // cond:
+ // result: (MOVHUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt32to64 x)
+ // cond:
+ // result: (MOVWUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to64 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteBlockARM64(b *Block) bool {
+ switch b.Kind {
+ case BlockARM64EQ:
+ // match: (EQ (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64EQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64GE:
+ // match: (GE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64GT:
+ // match: (GT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64Equal {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64EQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64NotEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64NE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If cond yes no)
+ // cond:
+ // result: (NE (CMPconst [0] cond) yes no)
+ for {
+ v := b.Control
+ cond := b.Control
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64NE
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(cond)
+ b.SetControl(v0)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64LE:
+ // match: (LE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64LT:
+ // match: (LT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64NE:
+ // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64Equal {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64EQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64NotEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64NE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64NE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64UGE:
+ // match: (UGE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64UGT:
+ // match: (UGT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (ULT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64ULE:
+ // match: (ULE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64ULT:
+ // match: (ULT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagLT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_ULT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpARM64FlagGT_UGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (UGT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
new file mode 100644
index 0000000000..9675d2f731
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -0,0 +1,7277 @@
+// autogenerated from gen/PPC64.rules: do not edit!
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+
+var _ = math.MinInt8 // in case not otherwise used
+func rewriteValuePPC64(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpAdd16:
+ return rewriteValuePPC64_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValuePPC64_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValuePPC64_OpAdd32F(v, config)
+ case OpAdd64:
+ return rewriteValuePPC64_OpAdd64(v, config)
+ case OpAdd64F:
+ return rewriteValuePPC64_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValuePPC64_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValuePPC64_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValuePPC64_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValuePPC64_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValuePPC64_OpAnd32(v, config)
+ case OpAnd64:
+ return rewriteValuePPC64_OpAnd64(v, config)
+ case OpAnd8:
+ return rewriteValuePPC64_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValuePPC64_OpAndB(v, config)
+ case OpAvg64u:
+ return rewriteValuePPC64_OpAvg64u(v, config)
+ case OpClosureCall:
+ return rewriteValuePPC64_OpClosureCall(v, config)
+ case OpCom16:
+ return rewriteValuePPC64_OpCom16(v, config)
+ case OpCom32:
+ return rewriteValuePPC64_OpCom32(v, config)
+ case OpCom64:
+ return rewriteValuePPC64_OpCom64(v, config)
+ case OpCom8:
+ return rewriteValuePPC64_OpCom8(v, config)
+ case OpConst16:
+ return rewriteValuePPC64_OpConst16(v, config)
+ case OpConst32:
+ return rewriteValuePPC64_OpConst32(v, config)
+ case OpConst32F:
+ return rewriteValuePPC64_OpConst32F(v, config)
+ case OpConst64:
+ return rewriteValuePPC64_OpConst64(v, config)
+ case OpConst64F:
+ return rewriteValuePPC64_OpConst64F(v, config)
+ case OpConst8:
+ return rewriteValuePPC64_OpConst8(v, config)
+ case OpConstBool:
+ return rewriteValuePPC64_OpConstBool(v, config)
+ case OpConstNil:
+ return rewriteValuePPC64_OpConstNil(v, config)
+ case OpConvert:
+ return rewriteValuePPC64_OpConvert(v, config)
+ case OpCvt32Fto32:
+ return rewriteValuePPC64_OpCvt32Fto32(v, config)
+ case OpCvt32Fto64:
+ return rewriteValuePPC64_OpCvt32Fto64(v, config)
+ case OpCvt32Fto64F:
+ return rewriteValuePPC64_OpCvt32Fto64F(v, config)
+ case OpCvt32to32F:
+ return rewriteValuePPC64_OpCvt32to32F(v, config)
+ case OpCvt32to64F:
+ return rewriteValuePPC64_OpCvt32to64F(v, config)
+ case OpCvt64Fto32:
+ return rewriteValuePPC64_OpCvt64Fto32(v, config)
+ case OpCvt64Fto32F:
+ return rewriteValuePPC64_OpCvt64Fto32F(v, config)
+ case OpCvt64Fto64:
+ return rewriteValuePPC64_OpCvt64Fto64(v, config)
+ case OpCvt64to32F:
+ return rewriteValuePPC64_OpCvt64to32F(v, config)
+ case OpCvt64to64F:
+ return rewriteValuePPC64_OpCvt64to64F(v, config)
+ case OpDeferCall:
+ return rewriteValuePPC64_OpDeferCall(v, config)
+ case OpDiv16:
+ return rewriteValuePPC64_OpDiv16(v, config)
+ case OpDiv16u:
+ return rewriteValuePPC64_OpDiv16u(v, config)
+ case OpDiv32:
+ return rewriteValuePPC64_OpDiv32(v, config)
+ case OpDiv32F:
+ return rewriteValuePPC64_OpDiv32F(v, config)
+ case OpDiv32u:
+ return rewriteValuePPC64_OpDiv32u(v, config)
+ case OpDiv64:
+ return rewriteValuePPC64_OpDiv64(v, config)
+ case OpDiv64F:
+ return rewriteValuePPC64_OpDiv64F(v, config)
+ case OpDiv64u:
+ return rewriteValuePPC64_OpDiv64u(v, config)
+ case OpDiv8:
+ return rewriteValuePPC64_OpDiv8(v, config)
+ case OpDiv8u:
+ return rewriteValuePPC64_OpDiv8u(v, config)
+ case OpEq16:
+ return rewriteValuePPC64_OpEq16(v, config)
+ case OpEq32:
+ return rewriteValuePPC64_OpEq32(v, config)
+ case OpEq32F:
+ return rewriteValuePPC64_OpEq32F(v, config)
+ case OpEq64:
+ return rewriteValuePPC64_OpEq64(v, config)
+ case OpEq64F:
+ return rewriteValuePPC64_OpEq64F(v, config)
+ case OpEq8:
+ return rewriteValuePPC64_OpEq8(v, config)
+ case OpEqB:
+ return rewriteValuePPC64_OpEqB(v, config)
+ case OpEqPtr:
+ return rewriteValuePPC64_OpEqPtr(v, config)
+ case OpGeq16:
+ return rewriteValuePPC64_OpGeq16(v, config)
+ case OpGeq16U:
+ return rewriteValuePPC64_OpGeq16U(v, config)
+ case OpGeq32:
+ return rewriteValuePPC64_OpGeq32(v, config)
+ case OpGeq32F:
+ return rewriteValuePPC64_OpGeq32F(v, config)
+ case OpGeq32U:
+ return rewriteValuePPC64_OpGeq32U(v, config)
+ case OpGeq64:
+ return rewriteValuePPC64_OpGeq64(v, config)
+ case OpGeq64F:
+ return rewriteValuePPC64_OpGeq64F(v, config)
+ case OpGeq64U:
+ return rewriteValuePPC64_OpGeq64U(v, config)
+ case OpGeq8:
+ return rewriteValuePPC64_OpGeq8(v, config)
+ case OpGeq8U:
+ return rewriteValuePPC64_OpGeq8U(v, config)
+ case OpGetClosurePtr:
+ return rewriteValuePPC64_OpGetClosurePtr(v, config)
+ case OpGoCall:
+ return rewriteValuePPC64_OpGoCall(v, config)
+ case OpGreater16:
+ return rewriteValuePPC64_OpGreater16(v, config)
+ case OpGreater16U:
+ return rewriteValuePPC64_OpGreater16U(v, config)
+ case OpGreater32:
+ return rewriteValuePPC64_OpGreater32(v, config)
+ case OpGreater32F:
+ return rewriteValuePPC64_OpGreater32F(v, config)
+ case OpGreater32U:
+ return rewriteValuePPC64_OpGreater32U(v, config)
+ case OpGreater64:
+ return rewriteValuePPC64_OpGreater64(v, config)
+ case OpGreater64F:
+ return rewriteValuePPC64_OpGreater64F(v, config)
+ case OpGreater64U:
+ return rewriteValuePPC64_OpGreater64U(v, config)
+ case OpGreater8:
+ return rewriteValuePPC64_OpGreater8(v, config)
+ case OpGreater8U:
+ return rewriteValuePPC64_OpGreater8U(v, config)
+ case OpHmul16:
+ return rewriteValuePPC64_OpHmul16(v, config)
+ case OpHmul16u:
+ return rewriteValuePPC64_OpHmul16u(v, config)
+ case OpHmul32:
+ return rewriteValuePPC64_OpHmul32(v, config)
+ case OpHmul32u:
+ return rewriteValuePPC64_OpHmul32u(v, config)
+ case OpHmul64:
+ return rewriteValuePPC64_OpHmul64(v, config)
+ case OpHmul64u:
+ return rewriteValuePPC64_OpHmul64u(v, config)
+ case OpHmul8:
+ return rewriteValuePPC64_OpHmul8(v, config)
+ case OpHmul8u:
+ return rewriteValuePPC64_OpHmul8u(v, config)
+ case OpInterCall:
+ return rewriteValuePPC64_OpInterCall(v, config)
+ case OpIsInBounds:
+ return rewriteValuePPC64_OpIsInBounds(v, config)
+ case OpIsNonNil:
+ return rewriteValuePPC64_OpIsNonNil(v, config)
+ case OpIsSliceInBounds:
+ return rewriteValuePPC64_OpIsSliceInBounds(v, config)
+ case OpLeq16:
+ return rewriteValuePPC64_OpLeq16(v, config)
+ case OpLeq16U:
+ return rewriteValuePPC64_OpLeq16U(v, config)
+ case OpLeq32:
+ return rewriteValuePPC64_OpLeq32(v, config)
+ case OpLeq32F:
+ return rewriteValuePPC64_OpLeq32F(v, config)
+ case OpLeq32U:
+ return rewriteValuePPC64_OpLeq32U(v, config)
+ case OpLeq64:
+ return rewriteValuePPC64_OpLeq64(v, config)
+ case OpLeq64F:
+ return rewriteValuePPC64_OpLeq64F(v, config)
+ case OpLeq64U:
+ return rewriteValuePPC64_OpLeq64U(v, config)
+ case OpLeq8:
+ return rewriteValuePPC64_OpLeq8(v, config)
+ case OpLeq8U:
+ return rewriteValuePPC64_OpLeq8U(v, config)
+ case OpLess16:
+ return rewriteValuePPC64_OpLess16(v, config)
+ case OpLess16U:
+ return rewriteValuePPC64_OpLess16U(v, config)
+ case OpLess32:
+ return rewriteValuePPC64_OpLess32(v, config)
+ case OpLess32F:
+ return rewriteValuePPC64_OpLess32F(v, config)
+ case OpLess32U:
+ return rewriteValuePPC64_OpLess32U(v, config)
+ case OpLess64:
+ return rewriteValuePPC64_OpLess64(v, config)
+ case OpLess64F:
+ return rewriteValuePPC64_OpLess64F(v, config)
+ case OpLess64U:
+ return rewriteValuePPC64_OpLess64U(v, config)
+ case OpLess8:
+ return rewriteValuePPC64_OpLess8(v, config)
+ case OpLess8U:
+ return rewriteValuePPC64_OpLess8U(v, config)
+ case OpLoad:
+ return rewriteValuePPC64_OpLoad(v, config)
+ case OpLsh16x16:
+ return rewriteValuePPC64_OpLsh16x16(v, config)
+ case OpLsh16x32:
+ return rewriteValuePPC64_OpLsh16x32(v, config)
+ case OpLsh16x64:
+ return rewriteValuePPC64_OpLsh16x64(v, config)
+ case OpLsh16x8:
+ return rewriteValuePPC64_OpLsh16x8(v, config)
+ case OpLsh32x16:
+ return rewriteValuePPC64_OpLsh32x16(v, config)
+ case OpLsh32x32:
+ return rewriteValuePPC64_OpLsh32x32(v, config)
+ case OpLsh32x64:
+ return rewriteValuePPC64_OpLsh32x64(v, config)
+ case OpLsh32x8:
+ return rewriteValuePPC64_OpLsh32x8(v, config)
+ case OpLsh64x16:
+ return rewriteValuePPC64_OpLsh64x16(v, config)
+ case OpLsh64x32:
+ return rewriteValuePPC64_OpLsh64x32(v, config)
+ case OpLsh64x64:
+ return rewriteValuePPC64_OpLsh64x64(v, config)
+ case OpLsh64x8:
+ return rewriteValuePPC64_OpLsh64x8(v, config)
+ case OpLsh8x16:
+ return rewriteValuePPC64_OpLsh8x16(v, config)
+ case OpLsh8x32:
+ return rewriteValuePPC64_OpLsh8x32(v, config)
+ case OpLsh8x64:
+ return rewriteValuePPC64_OpLsh8x64(v, config)
+ case OpLsh8x8:
+ return rewriteValuePPC64_OpLsh8x8(v, config)
+ case OpMod16:
+ return rewriteValuePPC64_OpMod16(v, config)
+ case OpMod16u:
+ return rewriteValuePPC64_OpMod16u(v, config)
+ case OpMod32:
+ return rewriteValuePPC64_OpMod32(v, config)
+ case OpMod32u:
+ return rewriteValuePPC64_OpMod32u(v, config)
+ case OpMod64:
+ return rewriteValuePPC64_OpMod64(v, config)
+ case OpMod64u:
+ return rewriteValuePPC64_OpMod64u(v, config)
+ case OpMod8:
+ return rewriteValuePPC64_OpMod8(v, config)
+ case OpMod8u:
+ return rewriteValuePPC64_OpMod8u(v, config)
+ case OpMove:
+ return rewriteValuePPC64_OpMove(v, config)
+ case OpMul16:
+ return rewriteValuePPC64_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValuePPC64_OpMul32(v, config)
+ case OpMul32F:
+ return rewriteValuePPC64_OpMul32F(v, config)
+ case OpMul64:
+ return rewriteValuePPC64_OpMul64(v, config)
+ case OpMul64F:
+ return rewriteValuePPC64_OpMul64F(v, config)
+ case OpMul8:
+ return rewriteValuePPC64_OpMul8(v, config)
+ case OpNeg16:
+ return rewriteValuePPC64_OpNeg16(v, config)
+ case OpNeg32:
+ return rewriteValuePPC64_OpNeg32(v, config)
+ case OpNeg32F:
+ return rewriteValuePPC64_OpNeg32F(v, config)
+ case OpNeg64:
+ return rewriteValuePPC64_OpNeg64(v, config)
+ case OpNeg64F:
+ return rewriteValuePPC64_OpNeg64F(v, config)
+ case OpNeg8:
+ return rewriteValuePPC64_OpNeg8(v, config)
+ case OpNeq16:
+ return rewriteValuePPC64_OpNeq16(v, config)
+ case OpNeq32:
+ return rewriteValuePPC64_OpNeq32(v, config)
+ case OpNeq32F:
+ return rewriteValuePPC64_OpNeq32F(v, config)
+ case OpNeq64:
+ return rewriteValuePPC64_OpNeq64(v, config)
+ case OpNeq64F:
+ return rewriteValuePPC64_OpNeq64F(v, config)
+ case OpNeq8:
+ return rewriteValuePPC64_OpNeq8(v, config)
+ case OpNeqB:
+ return rewriteValuePPC64_OpNeqB(v, config)
+ case OpNeqPtr:
+ return rewriteValuePPC64_OpNeqPtr(v, config)
+ case OpNilCheck:
+ return rewriteValuePPC64_OpNilCheck(v, config)
+ case OpNot:
+ return rewriteValuePPC64_OpNot(v, config)
+ case OpOffPtr:
+ return rewriteValuePPC64_OpOffPtr(v, config)
+ case OpOr16:
+ return rewriteValuePPC64_OpOr16(v, config)
+ case OpOr32:
+ return rewriteValuePPC64_OpOr32(v, config)
+ case OpOr64:
+ return rewriteValuePPC64_OpOr64(v, config)
+ case OpOr8:
+ return rewriteValuePPC64_OpOr8(v, config)
+ case OpOrB:
+ return rewriteValuePPC64_OpOrB(v, config)
+ case OpPPC64ADD:
+ return rewriteValuePPC64_OpPPC64ADD(v, config)
+ case OpPPC64CMPUconst:
+ return rewriteValuePPC64_OpPPC64CMPUconst(v, config)
+ case OpPPC64CMPWUconst:
+ return rewriteValuePPC64_OpPPC64CMPWUconst(v, config)
+ case OpPPC64CMPWconst:
+ return rewriteValuePPC64_OpPPC64CMPWconst(v, config)
+ case OpPPC64CMPconst:
+ return rewriteValuePPC64_OpPPC64CMPconst(v, config)
+ case OpPPC64Equal:
+ return rewriteValuePPC64_OpPPC64Equal(v, config)
+ case OpPPC64FGreaterEqual:
+ return rewriteValuePPC64_OpPPC64FGreaterEqual(v, config)
+ case OpPPC64FGreaterThan:
+ return rewriteValuePPC64_OpPPC64FGreaterThan(v, config)
+ case OpPPC64FLessEqual:
+ return rewriteValuePPC64_OpPPC64FLessEqual(v, config)
+ case OpPPC64FLessThan:
+ return rewriteValuePPC64_OpPPC64FLessThan(v, config)
+ case OpPPC64GreaterEqual:
+ return rewriteValuePPC64_OpPPC64GreaterEqual(v, config)
+ case OpPPC64GreaterThan:
+ return rewriteValuePPC64_OpPPC64GreaterThan(v, config)
+ case OpPPC64LessEqual:
+ return rewriteValuePPC64_OpPPC64LessEqual(v, config)
+ case OpPPC64LessThan:
+ return rewriteValuePPC64_OpPPC64LessThan(v, config)
+ case OpPPC64MOVBstore:
+ return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
+ case OpPPC64MOVBstorezero:
+ return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
+ case OpPPC64MOVDstore:
+ return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
+ case OpPPC64MOVDstorezero:
+ return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
+ case OpPPC64MOVHstore:
+ return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
+ case OpPPC64MOVHstorezero:
+ return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
+ case OpPPC64MOVWstore:
+ return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
+ case OpPPC64MOVWstorezero:
+ return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
+ case OpPPC64NotEqual:
+ return rewriteValuePPC64_OpPPC64NotEqual(v, config)
+ case OpRsh16Ux16:
+ return rewriteValuePPC64_OpRsh16Ux16(v, config)
+ case OpRsh16Ux32:
+ return rewriteValuePPC64_OpRsh16Ux32(v, config)
+ case OpRsh16Ux64:
+ return rewriteValuePPC64_OpRsh16Ux64(v, config)
+ case OpRsh16Ux8:
+ return rewriteValuePPC64_OpRsh16Ux8(v, config)
+ case OpRsh16x16:
+ return rewriteValuePPC64_OpRsh16x16(v, config)
+ case OpRsh16x32:
+ return rewriteValuePPC64_OpRsh16x32(v, config)
+ case OpRsh16x64:
+ return rewriteValuePPC64_OpRsh16x64(v, config)
+ case OpRsh16x8:
+ return rewriteValuePPC64_OpRsh16x8(v, config)
+ case OpRsh32Ux16:
+ return rewriteValuePPC64_OpRsh32Ux16(v, config)
+ case OpRsh32Ux32:
+ return rewriteValuePPC64_OpRsh32Ux32(v, config)
+ case OpRsh32Ux64:
+ return rewriteValuePPC64_OpRsh32Ux64(v, config)
+ case OpRsh32Ux8:
+ return rewriteValuePPC64_OpRsh32Ux8(v, config)
+ case OpRsh32x16:
+ return rewriteValuePPC64_OpRsh32x16(v, config)
+ case OpRsh32x32:
+ return rewriteValuePPC64_OpRsh32x32(v, config)
+ case OpRsh32x64:
+ return rewriteValuePPC64_OpRsh32x64(v, config)
+ case OpRsh32x8:
+ return rewriteValuePPC64_OpRsh32x8(v, config)
+ case OpRsh64Ux16:
+ return rewriteValuePPC64_OpRsh64Ux16(v, config)
+ case OpRsh64Ux32:
+ return rewriteValuePPC64_OpRsh64Ux32(v, config)
+ case OpRsh64Ux64:
+ return rewriteValuePPC64_OpRsh64Ux64(v, config)
+ case OpRsh64Ux8:
+ return rewriteValuePPC64_OpRsh64Ux8(v, config)
+ case OpRsh64x16:
+ return rewriteValuePPC64_OpRsh64x16(v, config)
+ case OpRsh64x32:
+ return rewriteValuePPC64_OpRsh64x32(v, config)
+ case OpRsh64x64:
+ return rewriteValuePPC64_OpRsh64x64(v, config)
+ case OpRsh64x8:
+ return rewriteValuePPC64_OpRsh64x8(v, config)
+ case OpRsh8Ux16:
+ return rewriteValuePPC64_OpRsh8Ux16(v, config)
+ case OpRsh8Ux32:
+ return rewriteValuePPC64_OpRsh8Ux32(v, config)
+ case OpRsh8Ux64:
+ return rewriteValuePPC64_OpRsh8Ux64(v, config)
+ case OpRsh8Ux8:
+ return rewriteValuePPC64_OpRsh8Ux8(v, config)
+ case OpRsh8x16:
+ return rewriteValuePPC64_OpRsh8x16(v, config)
+ case OpRsh8x32:
+ return rewriteValuePPC64_OpRsh8x32(v, config)
+ case OpRsh8x64:
+ return rewriteValuePPC64_OpRsh8x64(v, config)
+ case OpRsh8x8:
+ return rewriteValuePPC64_OpRsh8x8(v, config)
+ case OpSignExt16to32:
+ return rewriteValuePPC64_OpSignExt16to32(v, config)
+ case OpSignExt16to64:
+ return rewriteValuePPC64_OpSignExt16to64(v, config)
+ case OpSignExt32to64:
+ return rewriteValuePPC64_OpSignExt32to64(v, config)
+ case OpSignExt8to16:
+ return rewriteValuePPC64_OpSignExt8to16(v, config)
+ case OpSignExt8to32:
+ return rewriteValuePPC64_OpSignExt8to32(v, config)
+ case OpSignExt8to64:
+ return rewriteValuePPC64_OpSignExt8to64(v, config)
+ case OpSqrt:
+ return rewriteValuePPC64_OpSqrt(v, config)
+ case OpStaticCall:
+ return rewriteValuePPC64_OpStaticCall(v, config)
+ case OpStore:
+ return rewriteValuePPC64_OpStore(v, config)
+ case OpSub16:
+ return rewriteValuePPC64_OpSub16(v, config)
+ case OpSub32:
+ return rewriteValuePPC64_OpSub32(v, config)
+ case OpSub32F:
+ return rewriteValuePPC64_OpSub32F(v, config)
+ case OpSub64:
+ return rewriteValuePPC64_OpSub64(v, config)
+ case OpSub64F:
+ return rewriteValuePPC64_OpSub64F(v, config)
+ case OpSub8:
+ return rewriteValuePPC64_OpSub8(v, config)
+ case OpSubPtr:
+ return rewriteValuePPC64_OpSubPtr(v, config)
+ case OpTrunc16to8:
+ return rewriteValuePPC64_OpTrunc16to8(v, config)
+ case OpTrunc32to16:
+ return rewriteValuePPC64_OpTrunc32to16(v, config)
+ case OpTrunc32to8:
+ return rewriteValuePPC64_OpTrunc32to8(v, config)
+ case OpTrunc64to16:
+ return rewriteValuePPC64_OpTrunc64to16(v, config)
+ case OpTrunc64to32:
+ return rewriteValuePPC64_OpTrunc64to32(v, config)
+ case OpTrunc64to8:
+ return rewriteValuePPC64_OpTrunc64to8(v, config)
+ case OpXor16:
+ return rewriteValuePPC64_OpXor16(v, config)
+ case OpXor32:
+ return rewriteValuePPC64_OpXor32(v, config)
+ case OpXor64:
+ return rewriteValuePPC64_OpXor64(v, config)
+ case OpXor8:
+ return rewriteValuePPC64_OpXor8(v, config)
+ case OpZero:
+ return rewriteValuePPC64_OpZero(v, config)
+ case OpZeroExt16to32:
+ return rewriteValuePPC64_OpZeroExt16to32(v, config)
+ case OpZeroExt16to64:
+ return rewriteValuePPC64_OpZeroExt16to64(v, config)
+ case OpZeroExt32to64:
+ return rewriteValuePPC64_OpZeroExt32to64(v, config)
+ case OpZeroExt8to16:
+ return rewriteValuePPC64_OpZeroExt8to16(v, config)
+ case OpZeroExt8to32:
+ return rewriteValuePPC64_OpZeroExt8to32(v, config)
+ case OpZeroExt8to64:
+ return rewriteValuePPC64_OpZeroExt8to64(v, config)
+ }
+ return false
+}
+func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (FADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (FADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpPPC64MOVDaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAvg64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Avg64u <t> x y)
+ // cond:
+ // result: (ADD (ADD <t> (SRD <t> x (MOVDconst <t> [1])) (SRD <t> y (MOVDconst <t> [1]))) (ANDconst <t> (AND <t> x y) [1]))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Line, OpPPC64ADD, t)
+ v1 := b.NewValue0(v.Line, OpPPC64SRD, t)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVDconst, t)
+ v2.AuxInt = 1
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Line, OpPPC64SRD, t)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Line, OpPPC64MOVDconst, t)
+ v4.AuxInt = 1
+ v3.AddArg(v4)
+ v0.AddArg(v3)
+ v.AddArg(v0)
+ v5 := b.NewValue0(v.Line, OpPPC64ANDconst, t)
+ v5.AuxInt = 1
+ v6 := b.NewValue0(v.Line, OpPPC64AND, t)
+ v6.AddArg(x)
+ v6.AddArg(y)
+ v5.AddArg(v6)
+ v.AddArg(v5)
+ return true
+ }
+}
+func rewriteValuePPC64_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpPPC64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (XORconst [-1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = -1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (XORconst [-1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = -1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (XORconst [-1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = -1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (XORconst [-1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = -1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (FMOVSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpPPC64FMOVSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (FMOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVWconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValuePPC64_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond:
+ // result: (MOVDconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpPPC64MOVDconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (Xf2i64 (FCTIWZ x))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64Xf2i64)
+ v0 := b.NewValue0(v.Line, OpPPC64FCTIWZ, config.fe.TypeFloat64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64 x)
+ // cond:
+ // result: (Xf2i64 (FCTIDZ x))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64Xf2i64)
+ v0 := b.NewValue0(v.Line, OpPPC64FCTIDZ, config.fe.TypeFloat64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (FRSP (FCFID (Xi2f64 (SignExt32to64 x))))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FRSP)
+ v0 := b.NewValue0(v.Line, OpPPC64FCFID, config.fe.TypeFloat64())
+ v1 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
+ v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (FCFID (Xi2f64 (SignExt32to64 x)))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
+ v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (Xf2i64 (FCTIWZ x))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64Xf2i64)
+ v0 := b.NewValue0(v.Line, OpPPC64FCTIWZ, config.fe.TypeFloat64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (FRSP x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FRSP)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto64 x)
+ // cond:
+ // result: (Xf2i64 (FCTIDZ x))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64Xf2i64)
+ v0 := b.NewValue0(v.Line, OpPPC64FCTIDZ, config.fe.TypeFloat64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to32F x)
+ // cond:
+ // result: (FRSP (FCFID (Xi2f64 x)))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FRSP)
+ v0 := b.NewValue0(v.Line, OpPPC64FCFID, config.fe.TypeFloat64())
+ v1 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to64F x)
+ // cond:
+ // result: (FCFID (Xi2f64 x))
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpPPC64CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (FDIVS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FDIVS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64 x y)
+ // cond:
+ // result: (DIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (FDIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64u x y)
+ // cond:
+ // result: (DIVDU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVDU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (Equal (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (ANDconst [1] (EQV x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Line, OpPPC64EQV, config.fe.TypeInt64())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (GreaterEqual (CMPU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (FGreaterEqual (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (GreaterEqual (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (GreaterEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (FGreaterEqual (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (GreaterEqual (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (GreaterEqual (CMPU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpPPC64LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpPPC64CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (GreaterThan (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (FGreaterThan (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FGreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (GreaterThan (CMPWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (GreaterThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (FGreaterThan (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FGreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (GreaterThan (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRAWconst (MULLW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRWconst (MULLW <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (MULHW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULHW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (MULHWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULHWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64 x y)
+ // cond:
+ // result: (MULHD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULHD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64u x y)
+ // cond:
+ // result: (MULHDU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULHDU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRAWconst (MULLW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRWconst (MULLW <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpPPC64CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThan (CMPU idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqual (CMPU idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqual (CMPWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (LessEqual (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (LessThan (CMPWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (LessThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (LessThan (CMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && isSigned(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && !isSigned(t)
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && isSigned(t)
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && !isSigned(t)
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || (is8BitInt(t) && isSigned(t)))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || (is8BitInt(t) && isSigned(t))) {
+ break
+ }
+ v.reset(OpPPC64MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && !isSigned(t)
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -16
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -16
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -16
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x16 x y)
+ // cond:
+ // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x32 x y)
+ // cond:
+ // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x64 x y)
+ // cond:
+ // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x8 x y)
+ // cond:
+ // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -8
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -8
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 x y)
+ // cond:
+ // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -8
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (SUB x (MULLW y (DIVW x y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64DIVW, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (SUB x (MULLW y (DIVWU x y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64DIVWU, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64 x y)
+ // cond:
+ // result: (SUB x (MULLD y (DIVD x y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64MULLD, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64DIVD, config.fe.TypeInt64())
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64u x y)
+ // cond:
+ // result: (SUB x (MULLD y (DIVDU x y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64MULLD, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64DIVDU, config.fe.TypeInt64())
+ v1.AddArg(x)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = 1
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0.AuxInt = 1
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVWload, config.fe.TypeInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstore [3] dst (MOVBZload [3] src mem) (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v2.AuxInt = 2
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
+ v3.AuxInt = 1
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v4.AuxInt = 1
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVDload, config.fe.TypeInt64())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVWZload, config.fe.TypeUInt32())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVWZload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVHstore [2] dst (MOVHZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = 6
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v0.AuxInt = 6
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
+ v1.AuxInt = 4
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v2.AuxInt = 4
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
+ v3.AuxInt = 2
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v4.AuxInt = 2
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
+ v5.AddArg(dst)
+ v6 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v6.AddArg(src)
+ v6.AddArg(mem)
+ v5.AddArg(v6)
+ v5.AddArg(mem)
+ v3.AddArg(v5)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem)))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v2.AuxInt = 1
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
+ v3.AddArg(dst)
+ v4 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v4.AddArg(src)
+ v4.AddArg(mem)
+ v3.AddArg(v4)
+ v3.AddArg(mem)
+ v1.AddArg(v3)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
+ // result: (LoweredMove [SizeAndAlign(s).Align()] dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
+ break
+ }
+ v.reset(OpPPC64LoweredMove)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULLW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULLW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (FMULS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FMULS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MULLD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULLD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (FMUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULLW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64MULLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (FNEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FNEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (FNEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FNEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpPPC64LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVDconst, config.Frontend().TypeInt64())
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADD (MOVDconst [c]) x)
+ // cond: int64(int32(c)) == c
+ // result: (ADDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(int64(int32(c)) == c) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADD x (MOVDconst [c]))
+ // cond: int64(int32(c)) == c
+ // result: (ADDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(int64(int32(c)) == c) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPUconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: int64(x)==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) == int64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPWUconst (MOVWconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVWconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVWconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPWconst (MOVWconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVWconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVWconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVWconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: int64(x)==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) == int64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: int64(x)<int64(y)
+ // result: (FlagLT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) < int64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: int64(x)>int64(y)
+ // result: (FlagGT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := v_0.AuxInt
+ if !(int64(x) > int64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64Equal(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Equal (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (Equal (FlagLT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (FlagGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // cond:
+ // result: (Equal x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64Equal)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FGreaterEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FGreaterEqual (InvertFlags x))
+ // cond:
+ // result: (FLessEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FLessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FGreaterThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FGreaterThan (InvertFlags x))
+ // cond:
+ // result: (FLessThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FLessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FLessEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FLessEqual (InvertFlags x))
+ // cond:
+ // result: (FGreaterEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FGreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FLessThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FLessThan (InvertFlags x))
+ // cond:
+ // result: (FGreaterThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FGreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (FlagLT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterEqual (FlagGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // cond:
+ // result: (LessEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64GreaterThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GreaterThan (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagLT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (GreaterThan (FlagGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // cond:
+ // result: (LessThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LessEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagLT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessEqual (FlagGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // cond:
+ // result: (GreaterEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LessThan(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (LessThan (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (FlagLT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (LessThan (FlagGT))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // cond:
+ // result: (GreaterThan x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVBstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVDstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVDstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVHstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVHstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVWstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NotEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NotEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (NotEqual (FlagLT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // cond:
+ // result: (NotEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 x y)
+ // cond:
+ // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 x y)
+ // cond:
+ // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x y)
+ // cond:
+ // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 x y)
+ // cond:
+ // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 x y)
+ // cond:
+ // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 x y)
+ // cond:
+ // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x y)
+ // cond:
+ // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 x y)
+ // cond:
+ // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -16
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 x y)
+ // cond:
+ // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 x y)
+ // cond:
+ // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x y)
+ // cond:
+ // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 x y)
+ // cond:
+ // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 x y)
+ // cond:
+ // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 x y)
+ // cond:
+ // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x y)
+ // cond:
+ // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 x y)
+ // cond:
+ // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -32
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux16 x y)
+ // cond:
+ // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux32 x y)
+ // cond:
+ // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux64 x y)
+ // cond:
+ // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux8 x y)
+ // cond:
+ // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x16 x y)
+ // cond:
+ // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x32 x y)
+ // cond:
+ // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x64 x y)
+ // cond:
+ // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x8 x y)
+ // cond:
+ // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v2.AuxInt = -64
+ v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 x y)
+ // cond:
+ // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 x y)
+ // cond:
+ // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x y)
+ // cond:
+ // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 x y)
+ // cond:
+ // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 x y)
+ // cond:
+ // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 x y)
+ // cond:
+ // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x y)
+ // cond:
+ // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 x y)
+ // cond:
+ // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
+ v3.AuxInt = -8
+ v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSignExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to64 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSignExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt32to64 x)
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSignExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to64 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSqrt(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sqrt x)
+ // cond:
+ // result: (FSQRT x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64FSQRT)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpPPC64CALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: (is64BitInt(val.Type) || isPtr(val.Type))
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitInt(val.Type) || isPtr(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitInt(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitInt(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpPPC64MOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpPPC64MOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (FSUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSub64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (FSUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64FSUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to16 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to32 x)
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to8 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpXor16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpXor32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpXor64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor64 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpXor8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Zero [s] _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstorezero destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstorezero destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVBstorezero [1] destptr (MOVBstorezero [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = 1
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstorezero destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstorezero [2] destptr (MOVHstorezero [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = 2
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVBstorezero [3] destptr (MOVBstorezero [2] destptr (MOVBstorezero [1] destptr (MOVBstorezero [0] destptr mem))))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = 3
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
+ v0.AuxInt = 2
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
+ v1.AuxInt = 1
+ v1.AddArg(destptr)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
+ v2.AuxInt = 0
+ v2.AddArg(destptr)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstorezero [0] destptr mem)
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = 0
+ v.AddArg(destptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
+ // result: (MOVWstorezero [4] destptr (MOVWstorezero [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = 4
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVWstorezero, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
+ // result: (MOVHstorezero [6] destptr (MOVHstorezero [4] destptr (MOVHstorezero [2] destptr (MOVHstorezero [0] destptr mem))))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = 6
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
+ v0.AuxInt = 4
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
+ v1.AuxInt = 2
+ v1.AddArg(destptr)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
+ v2.AuxInt = 0
+ v2.AddArg(destptr)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstorezero [2] destptr (MOVBstorezero [1] destptr (MOVBstorezero [0] destptr mem)))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = 2
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
+ v0.AuxInt = 1
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(destptr)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = 8
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
+ v0.AuxInt = 0
+ v0.AddArg(destptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = 16
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
+ v0.AuxInt = 8
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
+ v1.AuxInt = 0
+ v1.AddArg(destptr)
+ v1.AddArg(mem)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0
+ // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))
+ for {
+ s := v.AuxInt
+ destptr := v.Args[0]
+ mem := v.Args[1]
+ if !(SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = 24
+ v.AddArg(destptr)
+ v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
+ v0.AuxInt = 16
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
+ v1.AuxInt = 8
+ v1.AddArg(destptr)
+ v2 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
+ v2.AuxInt = 0
+ v2.AddArg(destptr)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
+ // result: (LoweredZero [SizeAndAlign(s).Align()] ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) mem)
+ for {
+ s := v.AuxInt
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
+ break
+ }
+ v.reset(OpPPC64LoweredZero)
+ v.AuxInt = SizeAndAlign(s).Align()
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpZeroExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVHZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZeroExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to64 x)
+ // cond:
+ // result: (MOVHZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZeroExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt32to64 x)
+ // cond:
+ // result: (MOVWZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZeroExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZeroExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZeroExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to64 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteBlockPPC64(b *Block) bool {
+ switch b.Kind {
+ case BlockPPC64EQ:
+ // match: (EQ (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (EQ (FlagLT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (FlagGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // cond:
+ // result: (EQ cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64EQ
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockPPC64GE:
+ // match: (GE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (FlagLT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GE (FlagGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64LE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockPPC64GT:
+ // match: (GT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagLT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (GT (FlagGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (LT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64LT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64Equal {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64EQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64NotEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64NE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64LessThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64LT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64LessEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64LE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64GreaterThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64GT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64GreaterEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64GE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (FLessThan cc) yes no)
+ // cond:
+ // result: (FLT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FLessThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64FLT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (FLessEqual cc) yes no)
+ // cond:
+ // result: (FLE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FLessEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64FLE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (FGreaterThan cc) yes no)
+ // cond:
+ // result: (FGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FGreaterThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64FGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (FGreaterEqual cc) yes no)
+ // cond:
+ // result: (FGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FGreaterEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64FGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If cond yes no)
+ // cond:
+ // result: (NE (CMPWconst [0] cond) yes no)
+ for {
+ v := b.Control
+ cond := b.Control
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64NE
+ v0 := b.NewValue0(v.Line, OpPPC64CMPWconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(cond)
+ b.SetControl(v0)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockPPC64LE:
+ // match: (LE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagLT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LE (FlagGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64GE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockPPC64LT:
+ // match: (LT (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (FlagLT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (LT (FlagGT) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // cond:
+ // result: (GT cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64GT
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockPPC64NE:
+ // match: (NE (CMPWconst [0] (Equal cc)) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64CMPWconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64Equal {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64EQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPWconst [0] (NotEqual cc)) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64CMPWconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64NotEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64NE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPWconst [0] (LessThan cc)) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64CMPWconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64LessThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64LT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPWconst [0] (LessEqual cc)) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64CMPWconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64LessEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64LE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPWconst [0] (GreaterThan cc)) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64CMPWconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64GreaterThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64GT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64CMPWconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64GreaterEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64GE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // cond:
+ // result: (First nil no yes)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagEQ {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ b.swapSuccessors()
+ _ = no
+ _ = yes
+ return true
+ }
+ // match: (NE (FlagLT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagLT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (FlagGT) yes no)
+ // cond:
+ // result: (First nil yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64FlagGT {
+ break
+ }
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockFirst
+ b.SetControl(nil)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // cond:
+ // result: (NE cmp yes no)
+ for {
+ v := b.Control
+ if v.Op != OpPPC64InvertFlags {
+ break
+ }
+ cmp := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockPPC64NE
+ b.SetControl(cmp)
+ _ = yes
+ _ = no
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
new file mode 100644
index 0000000000..33d90f5341
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -0,0 +1,2534 @@
+// autogenerated from gen/dec64.rules: do not edit!
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+
+var _ = math.MinInt8 // in case not otherwise used
+func rewriteValuedec64(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpAdd64:
+ return rewriteValuedec64_OpAdd64(v, config)
+ case OpAnd64:
+ return rewriteValuedec64_OpAnd64(v, config)
+ case OpArg:
+ return rewriteValuedec64_OpArg(v, config)
+ case OpCom64:
+ return rewriteValuedec64_OpCom64(v, config)
+ case OpConst64:
+ return rewriteValuedec64_OpConst64(v, config)
+ case OpEq64:
+ return rewriteValuedec64_OpEq64(v, config)
+ case OpGeq64:
+ return rewriteValuedec64_OpGeq64(v, config)
+ case OpGeq64U:
+ return rewriteValuedec64_OpGeq64U(v, config)
+ case OpGreater64:
+ return rewriteValuedec64_OpGreater64(v, config)
+ case OpGreater64U:
+ return rewriteValuedec64_OpGreater64U(v, config)
+ case OpInt64Hi:
+ return rewriteValuedec64_OpInt64Hi(v, config)
+ case OpInt64Lo:
+ return rewriteValuedec64_OpInt64Lo(v, config)
+ case OpLeq64:
+ return rewriteValuedec64_OpLeq64(v, config)
+ case OpLeq64U:
+ return rewriteValuedec64_OpLeq64U(v, config)
+ case OpLess64:
+ return rewriteValuedec64_OpLess64(v, config)
+ case OpLess64U:
+ return rewriteValuedec64_OpLess64U(v, config)
+ case OpLoad:
+ return rewriteValuedec64_OpLoad(v, config)
+ case OpLrot64:
+ return rewriteValuedec64_OpLrot64(v, config)
+ case OpLsh16x64:
+ return rewriteValuedec64_OpLsh16x64(v, config)
+ case OpLsh32x64:
+ return rewriteValuedec64_OpLsh32x64(v, config)
+ case OpLsh64x16:
+ return rewriteValuedec64_OpLsh64x16(v, config)
+ case OpLsh64x32:
+ return rewriteValuedec64_OpLsh64x32(v, config)
+ case OpLsh64x64:
+ return rewriteValuedec64_OpLsh64x64(v, config)
+ case OpLsh64x8:
+ return rewriteValuedec64_OpLsh64x8(v, config)
+ case OpLsh8x64:
+ return rewriteValuedec64_OpLsh8x64(v, config)
+ case OpMul64:
+ return rewriteValuedec64_OpMul64(v, config)
+ case OpNeg64:
+ return rewriteValuedec64_OpNeg64(v, config)
+ case OpNeq64:
+ return rewriteValuedec64_OpNeq64(v, config)
+ case OpOr64:
+ return rewriteValuedec64_OpOr64(v, config)
+ case OpRsh16Ux64:
+ return rewriteValuedec64_OpRsh16Ux64(v, config)
+ case OpRsh16x64:
+ return rewriteValuedec64_OpRsh16x64(v, config)
+ case OpRsh32Ux64:
+ return rewriteValuedec64_OpRsh32Ux64(v, config)
+ case OpRsh32x64:
+ return rewriteValuedec64_OpRsh32x64(v, config)
+ case OpRsh64Ux16:
+ return rewriteValuedec64_OpRsh64Ux16(v, config)
+ case OpRsh64Ux32:
+ return rewriteValuedec64_OpRsh64Ux32(v, config)
+ case OpRsh64Ux64:
+ return rewriteValuedec64_OpRsh64Ux64(v, config)
+ case OpRsh64Ux8:
+ return rewriteValuedec64_OpRsh64Ux8(v, config)
+ case OpRsh64x16:
+ return rewriteValuedec64_OpRsh64x16(v, config)
+ case OpRsh64x32:
+ return rewriteValuedec64_OpRsh64x32(v, config)
+ case OpRsh64x64:
+ return rewriteValuedec64_OpRsh64x64(v, config)
+ case OpRsh64x8:
+ return rewriteValuedec64_OpRsh64x8(v, config)
+ case OpRsh8Ux64:
+ return rewriteValuedec64_OpRsh8Ux64(v, config)
+ case OpRsh8x64:
+ return rewriteValuedec64_OpRsh8x64(v, config)
+ case OpSignExt16to64:
+ return rewriteValuedec64_OpSignExt16to64(v, config)
+ case OpSignExt32to64:
+ return rewriteValuedec64_OpSignExt32to64(v, config)
+ case OpSignExt8to64:
+ return rewriteValuedec64_OpSignExt8to64(v, config)
+ case OpStore:
+ return rewriteValuedec64_OpStore(v, config)
+ case OpSub64:
+ return rewriteValuedec64_OpSub64(v, config)
+ case OpTrunc64to16:
+ return rewriteValuedec64_OpTrunc64to16(v, config)
+ case OpTrunc64to32:
+ return rewriteValuedec64_OpTrunc64to32(v, config)
+ case OpTrunc64to8:
+ return rewriteValuedec64_OpTrunc64to8(v, config)
+ case OpXor64:
+ return rewriteValuedec64_OpXor64(v, config)
+ case OpZeroExt16to64:
+ return rewriteValuedec64_OpZeroExt16to64(v, config)
+ case OpZeroExt32to64:
+ return rewriteValuedec64_OpZeroExt32to64(v, config)
+ case OpZeroExt8to64:
+ return rewriteValuedec64_OpZeroExt8to64(v, config)
+ }
+ return false
+}
+func rewriteValuedec64_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (Int64Make (Add32withcarry <config.fe.TypeInt32()> (Int64Hi x) (Int64Hi y) (Select0 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select1 <config.fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpAdd32withcarry, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpSelect0, TypeFlags)
+ v4 := b.NewValue0(v.Line, OpAdd32carry, MakeTuple(TypeFlags, config.fe.TypeUInt32()))
+ v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v0.AddArg(v3)
+ v.AddArg(v0)
+ v7 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Line, OpAdd32carry, MakeTuple(TypeFlags, config.fe.TypeUInt32()))
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(x)
+ v8.AddArg(v9)
+ v10 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v10.AddArg(y)
+ v8.AddArg(v10)
+ v7.AddArg(v8)
+ v.AddArg(v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (Int64Make (And32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (And32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v4.AddArg(x)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v5.AddArg(y)
+ v3.AddArg(v5)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && v.Type.IsSigned()
+ // result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
+ for {
+ off := v.AuxInt
+ n := v.Aux
+ if !(is64BitInt(v.Type) && v.Type.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
+ v0.AuxInt = off + 4
+ v0.Aux = n
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
+ v1.AuxInt = off
+ v1.Aux = n
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && !v.Type.IsSigned()
+ // result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
+ for {
+ off := v.AuxInt
+ n := v.Aux
+ if !(is64BitInt(v.Type) && !v.Type.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
+ v0.AuxInt = off + 4
+ v0.Aux = n
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
+ v1.AuxInt = off
+ v1.Aux = n
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (Int64Make (Com32 <config.fe.TypeUInt32()> (Int64Hi x)) (Com32 <config.fe.TypeUInt32()> (Int64Lo x)))
+ for {
+ x := v.Args[0]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpCom32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpCom32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 <t> [c])
+ // cond: t.IsSigned()
+ // result: (Int64Make (Const32 <config.fe.TypeInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt32())
+ v0.AuxInt = c >> 32
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v1.AuxInt = int64(int32(c))
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Const64 <t> [c])
+ // cond: !t.IsSigned()
+ // result: (Int64Make (Const32 <config.fe.TypeUInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ if !(!t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v0.AuxInt = c >> 32
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v1.AuxInt = int64(int32(c))
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAndB)
+ v0 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v4.AddArg(x)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v5.AddArg(y)
+ v3.AddArg(v5)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpGreater32, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpGeq32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpGeq32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpGreater32, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpInt64Hi(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Int64Hi (Int64Make hi _))
+ // cond:
+ // result: hi
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ v.reset(OpCopy)
+ v.Type = hi.Type
+ v.AddArg(hi)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpInt64Lo(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Int64Lo (Int64Make _ lo))
+ // cond:
+ // result: lo
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpCopy)
+ v.Type = lo.Type
+ v.AddArg(lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpLess32, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpLeq32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpLeq32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpLess32, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
+ v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v7 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
+ v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v8.AddArg(x)
+ v7.AddArg(v8)
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(y)
+ v7.AddArg(v9)
+ v3.AddArg(v7)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && t.IsSigned()
+ // result: (Int64Make (Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem) (Load <config.fe.TypeUInt32()> ptr mem))
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt32().PtrTo())
+ v1.AuxInt = 4
+ v1.AddArg(ptr)
+ v0.AddArg(v1)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
+ v2.AddArg(ptr)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && !t.IsSigned()
+ // result: (Int64Make (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem) (Load <config.fe.TypeUInt32()> ptr mem))
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeUInt32().PtrTo())
+ v1.AuxInt = 4
+ v1.AddArg(ptr)
+ v0.AddArg(v1)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
+ v2.AddArg(ptr)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot64 (Int64Make hi lo) [c])
+ // cond: c <= 32
+ // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c]))) (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c]))))
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ if !(c <= 32) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v2 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v2.AuxInt = c
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v4 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v4.AuxInt = 32 - c
+ v3.AddArg(v4)
+ v0.AddArg(v3)
+ v.AddArg(v0)
+ v5 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
+ v6.AddArg(lo)
+ v7 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v7.AuxInt = c
+ v6.AddArg(v7)
+ v5.AddArg(v6)
+ v8 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v8.AddArg(hi)
+ v9 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v9.AuxInt = 32 - c
+ v8.AddArg(v9)
+ v5.AddArg(v8)
+ v.AddArg(v5)
+ return true
+ }
+ // match: (Lrot64 (Int64Make hi lo) [c])
+ // cond: c > 32
+ // result: (Lrot64 (Int64Make lo hi) [c-32])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ if !(c > 32) {
+ break
+ }
+ v.reset(OpLrot64)
+ v.AuxInt = c - 32
+ v0 := b.NewValue0(v.Line, OpInt64Make, config.fe.TypeUInt64())
+ v0.AddArg(lo)
+ v0.AddArg(hi)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh16x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Lsh16x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpLsh16x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Lsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh16x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh32x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Lsh32x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpLsh32x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Lsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh32x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x16 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Lsh32x16 <config.fe.TypeUInt32()> hi s) (Rsh32Ux16 <config.fe.TypeUInt32()> lo (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) (Lsh32x16 <config.fe.TypeUInt32()> lo (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))) (Lsh32x16 <config.fe.TypeUInt32()> lo s))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
+ v2.AddArg(hi)
+ v2.AddArg(s)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v4 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
+ v5 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
+ v5.AuxInt = 32
+ v4.AddArg(v5)
+ v4.AddArg(s)
+ v3.AddArg(v4)
+ v1.AddArg(v3)
+ v0.AddArg(v1)
+ v6 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
+ v6.AddArg(lo)
+ v7 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
+ v7.AddArg(s)
+ v8 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
+ v8.AuxInt = 32
+ v7.AddArg(v8)
+ v6.AddArg(v7)
+ v0.AddArg(v6)
+ v.AddArg(v0)
+ v9 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
+ v9.AddArg(lo)
+ v9.AddArg(s)
+ v.AddArg(v9)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x32 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> hi s) (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) (Lsh32x32 <config.fe.TypeUInt32()> lo (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))) (Lsh32x32 <config.fe.TypeUInt32()> lo s))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
+ v2.AddArg(hi)
+ v2.AddArg(s)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v4 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v5.AuxInt = 32
+ v4.AddArg(v5)
+ v4.AddArg(s)
+ v3.AddArg(v4)
+ v1.AddArg(v3)
+ v0.AddArg(v1)
+ v6 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
+ v6.AddArg(lo)
+ v7 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
+ v7.AddArg(s)
+ v8 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v8.AuxInt = 32
+ v7.AddArg(v8)
+ v6.AddArg(v7)
+ v0.AddArg(v6)
+ v.AddArg(v0)
+ v9 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
+ v9.AddArg(lo)
+ v9.AddArg(s)
+ v.AddArg(v9)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh64x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Lsh64x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpLsh64x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Lsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh64x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x8 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Lsh32x8 <config.fe.TypeUInt32()> hi s) (Rsh32Ux8 <config.fe.TypeUInt32()> lo (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) (Lsh32x8 <config.fe.TypeUInt32()> lo (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))) (Lsh32x8 <config.fe.TypeUInt32()> lo s))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
+ v2.AddArg(hi)
+ v2.AddArg(s)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v4 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
+ v5 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
+ v5.AuxInt = 32
+ v4.AddArg(v5)
+ v4.AddArg(s)
+ v3.AddArg(v4)
+ v1.AddArg(v3)
+ v0.AddArg(v1)
+ v6 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
+ v6.AddArg(lo)
+ v7 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
+ v7.AddArg(s)
+ v8 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
+ v8.AuxInt = 32
+ v7.AddArg(v8)
+ v6.AddArg(v7)
+ v0.AddArg(v6)
+ v.AddArg(v0)
+ v9 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
+ v9.AddArg(lo)
+ v9.AddArg(s)
+ v.AddArg(v9)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Lsh8x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Lsh8x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpLsh8x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Lsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh8x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (Int64Make (Add32 <config.fe.TypeUInt32()> (Mul32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Hi y)) (Add32 <config.fe.TypeUInt32()> (Mul32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Lo y)) (Select0 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpAdd32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpMul32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v1.AddArg(v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Line, OpAdd32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Line, OpMul32, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v6.AddArg(x)
+ v5.AddArg(v6)
+ v7 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v7.AddArg(y)
+ v5.AddArg(v7)
+ v4.AddArg(v5)
+ v8 := b.NewValue0(v.Line, OpSelect0, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Line, OpMul32uhilo, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v10 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v10.AddArg(x)
+ v9.AddArg(v10)
+ v11 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v11.AddArg(y)
+ v9.AddArg(v11)
+ v8.AddArg(v9)
+ v4.AddArg(v8)
+ v0.AddArg(v4)
+ v.AddArg(v0)
+ v12 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt32())
+ v13 := b.NewValue0(v.Line, OpMul32uhilo, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v14 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v14.AddArg(x)
+ v13.AddArg(v14)
+ v15 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v15.AddArg(y)
+ v13.AddArg(v15)
+ v12.AddArg(v13)
+ v.AddArg(v12)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 <t> x)
+ // cond:
+ // result: (Sub64 (Const64 <t> [0]) x)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Line, OpConst64, t)
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Line, OpNeq32, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpNeq32, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v4.AddArg(x)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v5.AddArg(y)
+ v3.AddArg(v5)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (Or32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v4.AddArg(x)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v5.AddArg(y)
+ v3.AddArg(v5)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh16Ux64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh16Ux32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh16Ux32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh16Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16Ux32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt16to32 x))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh16x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh16x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh32Ux64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh32Ux32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh32Ux32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh32Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32Ux32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh32x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh32x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux16 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Rsh32Ux16 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux16 <config.fe.TypeUInt32()> lo s) (Lsh32x16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) (Rsh32Ux16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
+ v0.AddArg(hi)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v3.AddArg(s)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
+ v4.AddArg(hi)
+ v5 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
+ v6 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
+ v6.AuxInt = 32
+ v5.AddArg(v6)
+ v5.AddArg(s)
+ v4.AddArg(v5)
+ v2.AddArg(v4)
+ v1.AddArg(v2)
+ v7 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
+ v7.AddArg(hi)
+ v8 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
+ v8.AddArg(s)
+ v9 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
+ v9.AuxInt = 32
+ v8.AddArg(v9)
+ v7.AddArg(v8)
+ v1.AddArg(v7)
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux32 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Rsh32Ux32 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux32 <config.fe.TypeUInt32()> lo s) (Lsh32x32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v0.AddArg(hi)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v3.AddArg(s)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
+ v4.AddArg(hi)
+ v5 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v6.AuxInt = 32
+ v5.AddArg(v6)
+ v5.AddArg(s)
+ v4.AddArg(v5)
+ v2.AddArg(v4)
+ v1.AddArg(v2)
+ v7 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v7.AddArg(hi)
+ v8 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
+ v8.AddArg(s)
+ v9 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v9.AuxInt = 32
+ v8.AddArg(v9)
+ v7.AddArg(v8)
+ v1.AddArg(v7)
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh64Ux64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh64Ux32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh64Ux32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh64Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64Ux32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux8 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Rsh32Ux8 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux8 <config.fe.TypeUInt32()> lo s) (Lsh32x8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) (Rsh32Ux8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
+ v0.AddArg(hi)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v3.AddArg(s)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
+ v4.AddArg(hi)
+ v5 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
+ v6 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
+ v6.AuxInt = 32
+ v5.AddArg(v6)
+ v5.AddArg(s)
+ v4.AddArg(v5)
+ v2.AddArg(v4)
+ v1.AddArg(v2)
+ v7 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
+ v7.AddArg(hi)
+ v8 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
+ v8.AddArg(s)
+ v9 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
+ v9.AuxInt = 32
+ v8.AddArg(v9)
+ v7.AddArg(v8)
+ v1.AddArg(v7)
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x16 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Rsh32x16 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux16 <config.fe.TypeUInt32()> lo s) (Lsh32x16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) (And32 <config.fe.TypeUInt32()> (Rsh32x16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <config.fe.TypeUInt16()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpRsh32x16, config.fe.TypeUInt32())
+ v0.AddArg(hi)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v3.AddArg(s)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
+ v4.AddArg(hi)
+ v5 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
+ v6 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
+ v6.AuxInt = 32
+ v5.AddArg(v6)
+ v5.AddArg(s)
+ v4.AddArg(v5)
+ v2.AddArg(v4)
+ v1.AddArg(v2)
+ v7 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Line, OpRsh32x16, config.fe.TypeUInt32())
+ v8.AddArg(hi)
+ v9 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
+ v9.AddArg(s)
+ v10 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
+ v10.AuxInt = 32
+ v9.AddArg(v10)
+ v8.AddArg(v9)
+ v7.AddArg(v8)
+ v11 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v12 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v13 := b.NewValue0(v.Line, OpRsh16Ux32, config.fe.TypeUInt16())
+ v13.AddArg(s)
+ v14 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v14.AuxInt = 5
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v11.AddArg(v12)
+ v7.AddArg(v11)
+ v1.AddArg(v7)
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x32 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Rsh32x32 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux32 <config.fe.TypeUInt32()> lo s) (Lsh32x32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) (And32 <config.fe.TypeUInt32()> (Rsh32x32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32]))) (Zeromask (Rsh32Ux32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [5]))))))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpRsh32x32, config.fe.TypeUInt32())
+ v0.AddArg(hi)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v3.AddArg(s)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
+ v4.AddArg(hi)
+ v5 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v6.AuxInt = 32
+ v5.AddArg(v6)
+ v5.AddArg(s)
+ v4.AddArg(v5)
+ v2.AddArg(v4)
+ v1.AddArg(v2)
+ v7 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Line, OpRsh32x32, config.fe.TypeUInt32())
+ v8.AddArg(hi)
+ v9 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
+ v9.AddArg(s)
+ v10 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v10.AuxInt = 32
+ v9.AddArg(v10)
+ v8.AddArg(v9)
+ v7.AddArg(v8)
+ v11 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v12 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
+ v12.AddArg(s)
+ v13 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v13.AuxInt = 5
+ v12.AddArg(v13)
+ v11.AddArg(v12)
+ v7.AddArg(v11)
+ v1.AddArg(v7)
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
+ v3 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Rsh64x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh64x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh64x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x8 (Int64Make hi lo) s)
+ // cond:
+ // result: (Int64Make (Rsh32x8 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux8 <config.fe.TypeUInt32()> lo s) (Lsh32x8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) (And32 <config.fe.TypeUInt32()> (Rsh32x8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <config.fe.TypeUInt8()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ lo := v_0.Args[1]
+ s := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpRsh32x8, config.fe.TypeUInt32())
+ v0.AddArg(hi)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
+ v3.AddArg(lo)
+ v3.AddArg(s)
+ v2.AddArg(v3)
+ v4 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
+ v4.AddArg(hi)
+ v5 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
+ v6 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
+ v6.AuxInt = 32
+ v5.AddArg(v6)
+ v5.AddArg(s)
+ v4.AddArg(v5)
+ v2.AddArg(v4)
+ v1.AddArg(v2)
+ v7 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Line, OpRsh32x8, config.fe.TypeUInt32())
+ v8.AddArg(hi)
+ v9 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
+ v9.AddArg(s)
+ v10 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
+ v10.AuxInt = 32
+ v9.AddArg(v10)
+ v8.AddArg(v9)
+ v7.AddArg(v8)
+ v11 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v12 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v13 := b.NewValue0(v.Line, OpRsh8Ux32, config.fe.TypeUInt8())
+ v13.AddArg(s)
+ v14 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v14.AuxInt = 5
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v11.AddArg(v12)
+ v7.AddArg(v11)
+ v1.AddArg(v7)
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (Rsh8Ux64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh8Ux32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh8Ux32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh8Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8Ux32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt8to32 x))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := v_1_0.AuxInt
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Int64Make (Const32 [0]) lo))
+ // cond:
+ // result: (Rsh8x32 x lo)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ if v_1_0.AuxInt != 0 {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRsh8x32)
+ v.AddArg(x)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Rsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8x32)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
+ v1.AddArg(hi)
+ v0.AddArg(v1)
+ v0.AddArg(lo)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpSignExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to64 x)
+ // cond:
+ // result: (SignExt32to64 (SignExt16to32 x))
+ for {
+ x := v.Args[0]
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt32to64 x)
+ // cond:
+ // result: (Int64Make (Signmask x) x)
+ for {
+ x := v.Args[0]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to64 x)
+ // cond:
+ // result: (SignExt32to64 (SignExt8to32 x))
+ for {
+ x := v.Args[0]
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [8] dst (Int64Make hi lo) mem)
+ // cond:
+ // result: (Store [4] (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store [4] dst lo mem))
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ dst := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ hi := v_1.Args[0]
+ lo := v_1.Args[1]
+ mem := v.Args[2]
+ v.reset(OpStore)
+ v.AuxInt = 4
+ v0 := b.NewValue0(v.Line, OpOffPtr, hi.Type.PtrTo())
+ v0.AuxInt = 4
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v.AddArg(hi)
+ v1 := b.NewValue0(v.Line, OpStore, TypeMem)
+ v1.AuxInt = 4
+ v1.AddArg(dst)
+ v1.AddArg(lo)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpSub64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64 x y)
+ // cond:
+ // result: (Int64Make (Sub32withcarry <config.fe.TypeInt32()> (Int64Hi x) (Int64Hi y) (Select0 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select1 <config.fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpSub32withcarry, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v3 := b.NewValue0(v.Line, OpSelect0, TypeFlags)
+ v4 := b.NewValue0(v.Line, OpSub32carry, MakeTuple(TypeFlags, config.fe.TypeUInt32()))
+ v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v6.AddArg(y)
+ v4.AddArg(v6)
+ v3.AddArg(v4)
+ v0.AddArg(v3)
+ v.AddArg(v0)
+ v7 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Line, OpSub32carry, MakeTuple(TypeFlags, config.fe.TypeUInt32()))
+ v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v9.AddArg(x)
+ v8.AddArg(v9)
+ v10 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v10.AddArg(y)
+ v8.AddArg(v10)
+ v7.AddArg(v8)
+ v.AddArg(v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to16 (Int64Make _ lo))
+ // cond:
+ // result: (Trunc32to16 lo)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to16)
+ v.AddArg(lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpTrunc64to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to32 (Int64Make _ lo))
+ // cond:
+ // result: lo
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpCopy)
+ v.Type = lo.Type
+ v.AddArg(lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpTrunc64to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to8 (Int64Make _ lo))
+ // cond:
+ // result: (Trunc32to8 lo)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to8)
+ v.AddArg(lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpXor64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor64 x y)
+ // cond:
+ // result: (Int64Make (Xor32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (Xor32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpXor32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ v3 := b.NewValue0(v.Line, OpXor32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v4.AddArg(x)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
+ v5.AddArg(y)
+ v3.AddArg(v5)
+ v.AddArg(v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to64 x)
+ // cond:
+ // result: (ZeroExt32to64 (ZeroExt16to32 x))
+ for {
+ x := v.Args[0]
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt32to64 x)
+ // cond:
+ // result: (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) x)
+ for {
+ x := v.Args[0]
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to64 x)
+ // cond:
+ // result: (ZeroExt32to64 (ZeroExt8to32 x))
+ for {
+ x := v.Args[0]
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockdec64(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 9f08b3c4eb..f4f2b50f62 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -54,8 +54,12 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
return rewriteValuegeneric_OpCvt32Fto64F(v, config)
case OpCvt64Fto32F:
return rewriteValuegeneric_OpCvt64Fto32F(v, config)
+ case OpDiv32F:
+ return rewriteValuegeneric_OpDiv32F(v, config)
case OpDiv64:
return rewriteValuegeneric_OpDiv64(v, config)
+ case OpDiv64F:
+ return rewriteValuegeneric_OpDiv64F(v, config)
case OpDiv64u:
return rewriteValuegeneric_OpDiv64u(v, config)
case OpEq16:
@@ -498,6 +502,40 @@ func rewriteValuegeneric_OpAdd32F(v *Value, config *Config) bool {
v.AuxInt = f2i(float64(i2f32(c) + i2f32(d)))
return true
}
+ // match: (Add32F x (Const32F [0]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32F {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Add32F (Const32F [0]) x)
+ // cond:
+ // result: x
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32F {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpAdd64(v *Value, config *Config) bool {
@@ -582,6 +620,40 @@ func rewriteValuegeneric_OpAdd64F(v *Value, config *Config) bool {
v.AuxInt = f2i(i2f(c) + i2f(d))
return true
}
+ // match: (Add64F x (Const64F [0]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64F {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Add64F (Const64F [0]) x)
+ // cond:
+ // result: x
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64F {
+ break
+ }
+ if v_0.AuxInt != 0 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool {
@@ -661,8 +733,8 @@ func rewriteValuegeneric_OpAddPtr(v *Value, config *Config) bool {
c := v_1.AuxInt
v.reset(OpOffPtr)
v.Type = t
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -1298,19 +1370,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsString()
// result: (StringMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsString()) {
break
}
v.reset(OpStringMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1318,23 +1390,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsSlice()
// result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsSlice()) {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v2.Aux = n
v2.AuxInt = off + 2*config.PtrSize
+ v2.Aux = n
v.AddArg(v2)
return true
}
@@ -1342,19 +1414,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsInterface()
// result: (IMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsInterface()) {
break
}
v.reset(OpIMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1362,19 +1434,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsComplex() && v.Type.Size() == 16
// result: (ComplexMake (Arg <config.fe.TypeFloat64()> {n} [off]) (Arg <config.fe.TypeFloat64()> {n} [off+8]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 16) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
- v1.Aux = n
v1.AuxInt = off + 8
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1382,19 +1454,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsComplex() && v.Type.Size() == 8
// result: (ComplexMake (Arg <config.fe.TypeFloat32()> {n} [off]) (Arg <config.fe.TypeFloat32()> {n} [off+4]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 8) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
- v1.Aux = n
v1.AuxInt = off + 4
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1414,15 +1486,15 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake1)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
return true
}
@@ -1431,19 +1503,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake2)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1452,23 +1524,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake3)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
- v2.Aux = n
v2.AuxInt = off + t.FieldOff(2)
+ v2.Aux = n
v.AddArg(v2)
return true
}
@@ -1477,27 +1549,27 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]) (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake4)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
- v2.Aux = n
v2.AuxInt = off + t.FieldOff(2)
+ v2.Aux = n
v.AddArg(v2)
v3 := b.NewValue0(v.Line, OpArg, t.FieldType(3))
- v3.Aux = n
v3.AuxInt = off + t.FieldOff(3)
+ v3.Aux = n
v.AddArg(v3)
return true
}
@@ -1842,6 +1914,44 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value, config *Config) bool {
}
return false
}
+func rewriteValuegeneric_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x (Const32F [f2i(1)]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32F {
+ break
+ }
+ if v_1.AuxInt != f2i(1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Div32F x (Const32F [f2i(-1)]))
+ // cond:
+ // result: (Neg32F x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32F {
+ break
+ }
+ if v_1.AuxInt != f2i(-1) {
+ break
+ }
+ v.reset(OpNeg32F)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuegeneric_OpDiv64(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1997,6 +2107,44 @@ func rewriteValuegeneric_OpDiv64(v *Value, config *Config) bool {
}
return false
}
+func rewriteValuegeneric_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x (Const64F [f2i(1)]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64F {
+ break
+ }
+ if v_1.AuxInt != f2i(1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Div64F x (Const64F [f2i(-1)]))
+ // cond:
+ // result: (Neg32F x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64F {
+ break
+ }
+ if v_1.AuxInt != f2i(-1) {
+ break
+ }
+ v.reset(OpNeg32F)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuegeneric_OpDiv64u(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5122,6 +5270,22 @@ func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c * d))
return true
}
+ // match: (Mul16 (Const16 [-1]) x)
+ // cond:
+ // result: (Neg16 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst16 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul16 x (Const16 <t> [c]))
// cond: x.Op != OpConst16
// result: (Mul16 (Const16 <t> [c]) x)
@@ -5181,6 +5345,22 @@ func rewriteValuegeneric_OpMul32(v *Value, config *Config) bool {
v.AuxInt = int64(int32(c * d))
return true
}
+ // match: (Mul32 (Const32 [-1]) x)
+ // cond:
+ // result: (Neg32 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul32 x (Const32 <t> [c]))
// cond: x.Op != OpConst32
// result: (Mul32 (Const32 <t> [c]) x)
@@ -5278,6 +5458,72 @@ func rewriteValuegeneric_OpMul32F(v *Value, config *Config) bool {
v.AuxInt = f2i(float64(i2f32(c) * i2f32(d)))
return true
}
+ // match: (Mul32F x (Const32F [f2i(1)]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32F {
+ break
+ }
+ if v_1.AuxInt != f2i(1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Mul32F (Const32F [f2i(1)]) x)
+ // cond:
+ // result: x
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32F {
+ break
+ }
+ if v_0.AuxInt != f2i(1) {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Mul32F x (Const32F [f2i(-1)]))
+ // cond:
+ // result: (Neg32F x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32F {
+ break
+ }
+ if v_1.AuxInt != f2i(-1) {
+ break
+ }
+ v.reset(OpNeg32F)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Mul32F (Const32F [f2i(-1)]) x)
+ // cond:
+ // result: (Neg32F x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32F {
+ break
+ }
+ if v_0.AuxInt != f2i(-1) {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg32F)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpMul64(v *Value, config *Config) bool {
@@ -5301,6 +5547,22 @@ func rewriteValuegeneric_OpMul64(v *Value, config *Config) bool {
v.AuxInt = c * d
return true
}
+ // match: (Mul64 (Const64 [-1]) x)
+ // cond:
+ // result: (Neg64 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul64 x (Const64 <t> [c]))
// cond: x.Op != OpConst64
// result: (Mul64 (Const64 <t> [c]) x)
@@ -5398,6 +5660,72 @@ func rewriteValuegeneric_OpMul64F(v *Value, config *Config) bool {
v.AuxInt = f2i(i2f(c) * i2f(d))
return true
}
+ // match: (Mul64F x (Const64F [f2i(1)]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64F {
+ break
+ }
+ if v_1.AuxInt != f2i(1) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Mul64F (Const64F [f2i(1)]) x)
+ // cond:
+ // result: x
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64F {
+ break
+ }
+ if v_0.AuxInt != f2i(1) {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (Mul64F x (Const64F [f2i(-1)]))
+ // cond:
+ // result: (Neg64F x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64F {
+ break
+ }
+ if v_1.AuxInt != f2i(-1) {
+ break
+ }
+ v.reset(OpNeg64F)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Mul64F (Const64F [f2i(-1)]) x)
+ // cond:
+ // result: (Neg64F x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64F {
+ break
+ }
+ if v_0.AuxInt != f2i(-1) {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg64F)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
@@ -5421,6 +5749,22 @@ func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c * d))
return true
}
+ // match: (Mul8 (Const8 [-1]) x)
+ // cond:
+ // result: (Neg8 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst8 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8
// result: (Mul8 (Const8 <t> [c]) x)
@@ -6015,26 +6359,26 @@ func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool {
// cond:
// result: (OffPtr p [a+b])
for {
+ a := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpOffPtr {
break
}
- p := v_0.Args[0]
b := v_0.AuxInt
- a := v.AuxInt
+ p := v_0.Args[0]
v.reset(OpOffPtr)
- v.AddArg(p)
v.AuxInt = a + b
+ v.AddArg(p)
return true
}
// match: (OffPtr p [0])
// cond: v.Type.Compare(p.Type) == CMPeq
// result: p
for {
- p := v.Args[0]
if v.AuxInt != 0 {
break
}
+ p := v.Args[0]
if !(v.Type.Compare(p.Type) == CMPeq) {
break
}
@@ -8837,7 +9181,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
}
// match: (Store [size] dst (Load <t> src mem) mem)
// cond: !config.fe.CanSSA(t)
- // result: (Move [size] dst src mem)
+ // result: (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src mem)
for {
size := v.AuxInt
dst := v.Args[0]
@@ -8855,7 +9199,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
break
}
v.reset(OpMove)
- v.AuxInt = size
+ v.AuxInt = MakeSizeAndAlign(size, t.Alignment()).Int64()
v.AddArg(dst)
v.AddArg(src)
v.AddArg(mem)
@@ -8863,7 +9207,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
}
// match: (Store [size] dst (Load <t> src mem) (VarDef {x} mem))
// cond: !config.fe.CanSSA(t)
- // result: (Move [size] dst src (VarDef {x} mem))
+ // result: (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src (VarDef {x} mem))
for {
size := v.AuxInt
dst := v.Args[0]
@@ -8886,7 +9230,7 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
break
}
v.reset(OpMove)
- v.AuxInt = size
+ v.AuxInt = MakeSizeAndAlign(size, t.Alignment()).Int64()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpVarDef, TypeMem)
@@ -9348,6 +9692,23 @@ func rewriteValuegeneric_OpSub32F(v *Value, config *Config) bool {
v.AuxInt = f2i(float64(i2f32(c) - i2f32(d)))
return true
}
+ // match: (Sub32F x (Const32F [0]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32F {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpSub64(v *Value, config *Config) bool {
@@ -9463,6 +9824,23 @@ func rewriteValuegeneric_OpSub64F(v *Value, config *Config) bool {
v.AuxInt = f2i(i2f(c) - i2f(d))
return true
}
+ // match: (Sub64F x (Const64F [0]))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64F {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index 765f0c1277..86f1461c42 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -8,6 +8,7 @@ import "container/heap"
const (
ScorePhi = iota // towards top of block
+ ScoreReadTuple
ScoreVarDef
ScoreMemory
ScoreDefault
@@ -83,7 +84,7 @@ func schedule(f *Func) {
// Compute score. Larger numbers are scheduled closer to the end of the block.
for _, v := range b.Values {
switch {
- case v.Op == OpAMD64LoweredGetClosurePtr:
+ case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr || v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr || v.Op == Op386LoweredGetClosurePtr:
// We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no
@@ -103,7 +104,14 @@ func schedule(f *Func) {
// reduce register pressure. It also helps make sure
// VARDEF ops are scheduled before the corresponding LEA.
score[v.ID] = ScoreMemory
- case v.Type.IsFlags():
+ case v.Op == OpSelect0 || v.Op == OpSelect1:
+ // Schedule the pseudo-op of reading part of a tuple
+ // immediately after the tuple-generating op, since
+ // this value is already live. This also removes its
+ // false dependency on the other part of the tuple.
+ // Also ensures tuple is never spilled.
+ score[v.ID] = ScoreReadTuple
+ case v.Type.IsFlags() || v.Type.IsTuple():
// Schedule flag register generation as late as possible.
// This makes sure that we only have one live flags
// value at a time.
@@ -188,6 +196,7 @@ func schedule(f *Func) {
// Schedule highest priority value, update use counts, repeat.
order = order[:0]
+ tuples := make(map[ID][]*Value)
for {
// Find highest priority schedulable value.
// Note that schedule is assembled backwards.
@@ -199,7 +208,31 @@ func schedule(f *Func) {
v := heap.Pop(priq).(*Value)
// Add it to the schedule.
- order = append(order, v)
+ // Do not emit tuple-reading ops until we're ready to emit the tuple-generating op.
+ //TODO: maybe remove ReadTuple score above, if it does not help on performance
+ switch {
+ case v.Op == OpSelect0:
+ if tuples[v.Args[0].ID] == nil {
+ tuples[v.Args[0].ID] = make([]*Value, 2)
+ }
+ tuples[v.Args[0].ID][0] = v
+ case v.Op == OpSelect1:
+ if tuples[v.Args[0].ID] == nil {
+ tuples[v.Args[0].ID] = make([]*Value, 2)
+ }
+ tuples[v.Args[0].ID][1] = v
+ case v.Type.IsTuple() && tuples[v.ID] != nil:
+ if tuples[v.ID][1] != nil {
+ order = append(order, tuples[v.ID][1])
+ }
+ if tuples[v.ID][0] != nil {
+ order = append(order, tuples[v.ID][0])
+ }
+ delete(tuples, v.ID)
+ fallthrough
+ default:
+ order = append(order, v)
+ }
// Update use counts of arguments.
for _, w := range v.Args {
diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go
index 2f7c30929d..5be55ac858 100644
--- a/src/cmd/compile/internal/ssa/tighten.go
+++ b/src/cmd/compile/internal/ssa/tighten.go
@@ -54,13 +54,19 @@ func tighten(f *Func) {
for _, b := range f.Blocks {
for i := 0; i < len(b.Values); i++ {
v := b.Values[i]
- if v.Op == OpPhi || v.Op == OpGetClosurePtr || v.Op == OpConvert || v.Op == OpArg {
+ switch v.Op {
+ case OpPhi, OpGetClosurePtr, OpConvert, OpArg:
// GetClosurePtr & Arg must stay in entry block.
// OpConvert must not float over call sites.
// TODO do we instead need a dependence edge of some sort for OpConvert?
// Would memory do the trick, or do we need something else that relates
// to safe point operations?
continue
+ default:
+ }
+ if v.Op == OpSelect0 || v.Op == OpSelect1 {
+ // tuple selector must stay with tuple generator
+ continue
}
if len(v.Args) > 0 && v.Args[len(v.Args)-1].Type.IsMemory() {
// We can't move values which have a memory arg - it might
diff --git a/src/cmd/compile/internal/ssa/type.go b/src/cmd/compile/internal/ssa/type.go
index 91a4efe78f..4470dc1010 100644
--- a/src/cmd/compile/internal/ssa/type.go
+++ b/src/cmd/compile/internal/ssa/type.go
@@ -27,6 +27,7 @@ type Type interface {
IsMemory() bool // special ssa-package-only types
IsFlags() bool
IsVoid() bool
+ IsTuple() bool
ElemType() Type // given []T or *T or [n]T, return T
PtrTo() Type // given T, return *T
@@ -69,6 +70,7 @@ func (t *CompilerType) IsInterface() bool { return false }
func (t *CompilerType) IsMemory() bool { return t.Memory }
func (t *CompilerType) IsFlags() bool { return t.Flags }
func (t *CompilerType) IsVoid() bool { return t.Void }
+func (t *CompilerType) IsTuple() bool { return false }
func (t *CompilerType) String() string { return t.Name }
func (t *CompilerType) SimpleString() string { return t.Name }
func (t *CompilerType) ElemType() Type { panic("not implemented") }
@@ -79,6 +81,38 @@ func (t *CompilerType) FieldOff(i int) int64 { panic("not implemented") }
func (t *CompilerType) FieldName(i int) string { panic("not implemented") }
func (t *CompilerType) NumElem() int64 { panic("not implemented") }
+type TupleType struct {
+ first Type
+ second Type
+}
+
+func (t *TupleType) Size() int64 { panic("not implemented") }
+func (t *TupleType) Alignment() int64 { panic("not implemented") }
+func (t *TupleType) IsBoolean() bool { return false }
+func (t *TupleType) IsInteger() bool { return false }
+func (t *TupleType) IsSigned() bool { return false }
+func (t *TupleType) IsFloat() bool { return false }
+func (t *TupleType) IsComplex() bool { return false }
+func (t *TupleType) IsPtrShaped() bool { return false }
+func (t *TupleType) IsString() bool { return false }
+func (t *TupleType) IsSlice() bool { return false }
+func (t *TupleType) IsArray() bool { return false }
+func (t *TupleType) IsStruct() bool { return false }
+func (t *TupleType) IsInterface() bool { return false }
+func (t *TupleType) IsMemory() bool { return false }
+func (t *TupleType) IsFlags() bool { return false }
+func (t *TupleType) IsVoid() bool { return false }
+func (t *TupleType) IsTuple() bool { return true }
+func (t *TupleType) String() string { return t.first.String() + "," + t.second.String() }
+func (t *TupleType) SimpleString() string { return "Tuple" }
+func (t *TupleType) ElemType() Type { panic("not implemented") }
+func (t *TupleType) PtrTo() Type { panic("not implemented") }
+func (t *TupleType) NumFields() int { panic("not implemented") }
+func (t *TupleType) FieldType(i int) Type { panic("not implemented") }
+func (t *TupleType) FieldOff(i int) int64 { panic("not implemented") }
+func (t *TupleType) FieldName(i int) string { panic("not implemented") }
+func (t *TupleType) NumElem() int64 { panic("not implemented") }
+
// Cmp is a comparison between values a and b.
// -1 if a < b
// 0 if a == b
@@ -116,6 +150,25 @@ func (t *CompilerType) Compare(u Type) Cmp {
return CMPlt
}
+func (t *TupleType) Compare(u Type) Cmp {
+ // ssa.TupleType is greater than ssa.CompilerType
+ if _, ok := u.(*CompilerType); ok {
+ return CMPgt
+ }
+ // ssa.TupleType is smaller than any other type
+ x, ok := u.(*TupleType)
+ if !ok {
+ return CMPlt
+ }
+ if t == x {
+ return CMPeq
+ }
+ if c := t.first.Compare(x.first); c != CMPeq {
+ return c
+ }
+ return t.second.Compare(x.second)
+}
+
var (
TypeInvalid = &CompilerType{Name: "invalid"}
TypeMem = &CompilerType{Name: "mem", Memory: true}
@@ -123,3 +176,7 @@ var (
TypeVoid = &CompilerType{Name: "void", Void: true}
TypeInt128 = &CompilerType{Name: "int128", size: 16, Int128: true}
)
+
+func MakeTuple(t0, t1 Type) *TupleType {
+ return &TupleType{first: t0, second: t1}
+}
diff --git a/src/cmd/compile/internal/ssa/type_test.go b/src/cmd/compile/internal/ssa/type_test.go
index 3b1a892083..a76a0651bb 100644
--- a/src/cmd/compile/internal/ssa/type_test.go
+++ b/src/cmd/compile/internal/ssa/type_test.go
@@ -39,6 +39,7 @@ func (t *TypeImpl) IsStruct() bool { return t.struct_ }
func (t *TypeImpl) IsInterface() bool { return t.inter }
func (t *TypeImpl) IsMemory() bool { return false }
func (t *TypeImpl) IsFlags() bool { return false }
+func (t *TypeImpl) IsTuple() bool { return false }
func (t *TypeImpl) IsVoid() bool { return false }
func (t *TypeImpl) String() string { return t.Name }
func (t *TypeImpl) SimpleString() string { return t.Name }
diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go
new file mode 100644
index 0000000000..d7c9c71c4a
--- /dev/null
+++ b/src/cmd/compile/internal/x86/387.go
@@ -0,0 +1,386 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "math"
+)
+
+// Generates code for v using 387 instructions. Reports whether
+// the instruction was handled by this routine.
+func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) bool {
+ // The SSA compiler pretends that it has an SSE backend.
+ // If we don't have one of those, we need to translate
+ // all the SSE ops to equivalent 387 ops. That's what this
+ // function does.
+
+ switch v.Op {
+ case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
+ p := gc.Prog(loadPush(v.Type))
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+ popAndSave(s, v)
+ return true
+ case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
+ p := gc.Prog(loadPush(v.Type))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+ popAndSave(s, v)
+ return true
+
+ case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8:
+ p := gc.Prog(loadPush(v.Type))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ switch v.Op {
+ case ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
+ p.From.Scale = 1
+ p.From.Index = gc.SSARegNum(v.Args[1])
+ case ssa.Op386MOVSSloadidx4:
+ p.From.Scale = 4
+ p.From.Index = gc.SSARegNum(v.Args[1])
+ case ssa.Op386MOVSDloadidx8:
+ p.From.Scale = 8
+ p.From.Index = gc.SSARegNum(v.Args[1])
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+ popAndSave(s, v)
+ return true
+
+ case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore:
+ // Push to-be-stored value on top of stack.
+ push(s, v.Args[1])
+
+ // Pop and store value.
+ var op obj.As
+ switch v.Op {
+ case ssa.Op386MOVSSstore:
+ op = x86.AFMOVFP
+ case ssa.Op386MOVSDstore:
+ op = x86.AFMOVDP
+ }
+ p := gc.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ return true
+
+ case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVSDstoreidx8:
+ push(s, v.Args[2])
+ var op obj.As
+ switch v.Op {
+ case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSSstoreidx4:
+ op = x86.AFMOVFP
+ case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8:
+ op = x86.AFMOVDP
+ }
+ p := gc.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ switch v.Op {
+ case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
+ p.To.Scale = 1
+ p.To.Index = gc.SSARegNum(v.Args[1])
+ case ssa.Op386MOVSSstoreidx4:
+ p.To.Scale = 4
+ p.To.Index = gc.SSARegNum(v.Args[1])
+ case ssa.Op386MOVSDstoreidx8:
+ p.To.Scale = 8
+ p.To.Index = gc.SSARegNum(v.Args[1])
+ }
+ return true
+
+ case ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
+ ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD:
+ if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+
+ // Push arg1 on top of stack
+ push(s, v.Args[1])
+
+ // Set precision if needed. 64 bits is the default.
+ switch v.Op {
+ case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
+ p := gc.Prog(x86.AFSTCW)
+ scratch387(s, &p.To)
+ p = gc.Prog(x86.AFLDCW)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = gc.Linksym(gc.Pkglookup("controlWord32", gc.Runtimepkg))
+ }
+
+ var op obj.As
+ switch v.Op {
+ case ssa.Op386ADDSS, ssa.Op386ADDSD:
+ op = x86.AFADDDP
+ case ssa.Op386SUBSS, ssa.Op386SUBSD:
+ op = x86.AFSUBDP
+ case ssa.Op386MULSS, ssa.Op386MULSD:
+ op = x86.AFMULDP
+ case ssa.Op386DIVSS, ssa.Op386DIVSD:
+ op = x86.AFDIVDP
+ }
+ p := gc.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s.SSEto387[gc.SSARegNum(v)] + 1
+
+ // Restore precision if needed.
+ switch v.Op {
+ case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
+ p := gc.Prog(x86.AFLDCW)
+ scratch387(s, &p.From)
+ }
+
+ return true
+
+ case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
+ push(s, v.Args[0])
+
+ // Compare.
+ p := gc.Prog(x86.AFUCOMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s.SSEto387[gc.SSARegNum(v.Args[1])] + 1
+
+ // Save AX.
+ p = gc.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ scratch387(s, &p.To)
+
+ // Move status word into AX.
+ p = gc.Prog(x86.AFSTSW)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+
+ // Then move the flags we need to the integer flags.
+ gc.Prog(x86.ASAHF)
+
+ // Restore AX.
+ p = gc.Prog(x86.AMOVL)
+ scratch387(s, &p.From)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+
+ return true
+
+ case ssa.Op386SQRTSD:
+ push(s, v.Args[0])
+ gc.Prog(x86.AFSQRT)
+ popAndSave(s, v)
+ return true
+
+ case ssa.Op386FCHS:
+ push(s, v.Args[0])
+ gc.Prog(x86.AFCHS)
+ popAndSave(s, v)
+ return true
+
+ case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD:
+ p := gc.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ scratch387(s, &p.To)
+ p = gc.Prog(x86.AFMOVL)
+ scratch387(s, &p.From)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+ popAndSave(s, v)
+ return true
+
+ case ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL:
+ push(s, v.Args[0])
+
+ // Save control word.
+ p := gc.Prog(x86.AFSTCW)
+ scratch387(s, &p.To)
+ p.To.Offset += 4
+
+ // Load control word which truncates (rounds towards zero).
+ p = gc.Prog(x86.AFLDCW)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = gc.Linksym(gc.Pkglookup("controlWord64trunc", gc.Runtimepkg))
+
+ // Now do the conversion.
+ p = gc.Prog(x86.AFMOVLP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ scratch387(s, &p.To)
+ p = gc.Prog(x86.AMOVL)
+ scratch387(s, &p.From)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ // Restore control word.
+ p = gc.Prog(x86.AFLDCW)
+ scratch387(s, &p.From)
+ p.From.Offset += 4
+ return true
+
+ case ssa.Op386CVTSS2SD:
+ // float32 -> float64 is a nop
+ push(s, v.Args[0])
+ popAndSave(s, v)
+ return true
+
+ case ssa.Op386CVTSD2SS:
+ // Round to nearest float32.
+ push(s, v.Args[0])
+ p := gc.Prog(x86.AFMOVFP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ scratch387(s, &p.To)
+ p = gc.Prog(x86.AFMOVF)
+ scratch387(s, &p.From)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+ popAndSave(s, v)
+ return true
+
+ case ssa.OpLoadReg:
+ if !v.Type.IsFloat() {
+ return false
+ }
+ // Load+push the value we need.
+ p := gc.Prog(loadPush(v.Type))
+ n, off := gc.AutoVar(v.Args[0])
+ p.From.Type = obj.TYPE_MEM
+ p.From.Node = n
+ p.From.Sym = gc.Linksym(n.Sym)
+ p.From.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.From.Name = obj.NAME_PARAM
+ p.From.Offset += n.Xoffset
+ } else {
+ p.From.Name = obj.NAME_AUTO
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+ // Move the value to its assigned register.
+ popAndSave(s, v)
+ return true
+
+ case ssa.OpStoreReg:
+ if !v.Type.IsFloat() {
+ return false
+ }
+ push(s, v.Args[0])
+ var op obj.As
+ switch v.Type.Size() {
+ case 4:
+ op = x86.AFMOVFP
+ case 8:
+ op = x86.AFMOVDP
+ }
+ p := gc.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ n, off := gc.AutoVar(v)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Node = n
+ p.To.Sym = gc.Linksym(n.Sym)
+ p.To.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.To.Name = obj.NAME_PARAM
+ p.To.Offset += n.Xoffset
+ } else {
+ p.To.Name = obj.NAME_AUTO
+ }
+ return true
+
+ case ssa.OpCopy:
+ if !v.Type.IsFloat() {
+ return false
+ }
+ push(s, v.Args[0])
+ popAndSave(s, v)
+ return true
+
+ case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLdefer, ssa.Op386CALLgo, ssa.Op386CALLinter:
+ flush387(s) // Calls must empty the the FP stack.
+ return false // then issue the call as normal
+ }
+ return false
+}
+
+// push pushes v onto the floating-point stack. v must be in a register.
+func push(s *gc.SSAGenState, v *ssa.Value) {
+ p := gc.Prog(x86.AFMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = s.SSEto387[gc.SSARegNum(v)]
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+}
+
+// popAndSave pops a value off of the floating-point stack and stores
+// it in the reigster assigned to v.
+func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
+ r := gc.SSARegNum(v)
+ if _, ok := s.SSEto387[r]; ok {
+ // Pop value, write to correct register.
+ p := gc.Prog(x86.AFMOVDP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s.SSEto387[gc.SSARegNum(v)] + 1
+ } else {
+ // Don't actually pop value. This 387 register is now the
+ // new home for the not-yet-assigned-a-home SSE register.
+ // Increase the register mapping of all other registers by one.
+ for rSSE, r387 := range s.SSEto387 {
+ s.SSEto387[rSSE] = r387 + 1
+ }
+ s.SSEto387[r] = x86.REG_F0
+ }
+}
+
+// loadPush returns the opcode for load+push of the given type.
+func loadPush(t ssa.Type) obj.As {
+ if t.Size() == 4 {
+ return x86.AFMOVF
+ }
+ return x86.AFMOVD
+}
+
+// flush387 removes all entries from the 387 floating-point stack.
+func flush387(s *gc.SSAGenState) {
+ for k := range s.SSEto387 {
+ p := gc.Prog(x86.AFMOVDP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_F0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_F0
+ delete(s.SSEto387, k)
+ }
+}
+
+// scratch387 initializes a to the scratch location used by some 387 rewrites.
+func scratch387(s *gc.SSAGenState, a *obj.Addr) {
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_AUTO
+ a.Node = s.ScratchFpMem
+ a.Sym = gc.Linksym(s.ScratchFpMem.Sym)
+ a.Reg = x86.REG_SP
+}
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
index 738d887742..a28b28742d 100644
--- a/src/cmd/compile/internal/x86/galign.go
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -77,6 +77,11 @@ func Main() {
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
+ gc.Thearch.SSARegToReg = ssaRegToReg
+ gc.Thearch.SSAMarkMoves = ssaMarkMoves
+ gc.Thearch.SSAGenValue = ssaGenValue
+ gc.Thearch.SSAGenBlock = ssaGenBlock
+
gc.Main()
gc.Exit(0)
}
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
new file mode 100644
index 0000000000..3005a19bfd
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -0,0 +1,1020 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "fmt"
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// Smallest possible faulting page at address zero.
+const minZeroPage = 4096
+
+// ssaRegToReg maps ssa register numbers to obj register numbers.
+var ssaRegToReg = []int16{
+ x86.REG_AX,
+ x86.REG_CX,
+ x86.REG_DX,
+ x86.REG_BX,
+ x86.REG_SP,
+ x86.REG_BP,
+ x86.REG_SI,
+ x86.REG_DI,
+ x86.REG_X0,
+ x86.REG_X1,
+ x86.REG_X2,
+ x86.REG_X3,
+ x86.REG_X4,
+ x86.REG_X5,
+ x86.REG_X6,
+ x86.REG_X7,
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ if b.Control != nil && b.Control.Type.IsFlags() {
+ flive = true
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && v.Op == ssa.Op386MOVLconst {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = v
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t ssa.Type) obj.As {
+ // Avoid partial register write
+ if !t.IsFloat() && t.Size() <= 2 {
+ if t.Size() == 1 {
+ return x86.AMOVBLZX
+ } else {
+ return x86.AMOVWLZX
+ }
+ }
+ // Otherwise, there's no difference between load and store opcodes.
+ return storeByType(t)
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t ssa.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ }
+ } else {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ }
+ }
+ panic("bad store type")
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ default:
+ panic(fmt.Sprintf("bad float register width %d:%s", t.Size(), t))
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ // Avoids partial register write
+ return x86.AMOVL
+ case 2:
+ return x86.AMOVL
+ case 4:
+ return x86.AMOVL
+ default:
+ panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
+ }
+ }
+}
+
+// opregreg emits instructions for
+// dest := dest(To) op src(From)
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(op obj.As, dest, src int16) *obj.Prog {
+ p := gc.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ s.SetLineno(v.Line)
+
+ if gc.Thearch.Use387 {
+ if ssaGenValue387(s, v) {
+ return // v was handled by 387 generation.
+ }
+ }
+
+ switch v.Op {
+ case ssa.Op386ADDL:
+ r := gc.SSARegNum(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ switch {
+ case r == r1:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r == r2:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ p := gc.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r1
+ p.From.Scale = 1
+ p.From.Index = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+
+ // 2-address opcode arithmetic
+ case ssa.Op386SUBL,
+ ssa.Op386MULL,
+ ssa.Op386ANDL,
+ ssa.Op386ORL,
+ ssa.Op386XORL,
+ ssa.Op386SHLL,
+ ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB,
+ ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB,
+ ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
+ ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD,
+ ssa.Op386PXOR,
+ ssa.Op386ADCL,
+ ssa.Op386SBBL:
+ r := gc.SSARegNum(v)
+ if r != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1]))
+
+ case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ r := gc.SSARegNum1(v)
+ if r != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output[1] not in same register %s", v.LongString())
+ }
+ opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1]))
+
+ case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ r := gc.SSARegNum1(v)
+ if r != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output[1] not in same register %s", v.LongString())
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.Op386DIVL, ssa.Op386DIVW,
+ ssa.Op386DIVLU, ssa.Op386DIVWU,
+ ssa.Op386MODL, ssa.Op386MODW,
+ ssa.Op386MODLU, ssa.Op386MODWU:
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and AX is the only output
+ x := gc.SSARegNum(v.Args[1])
+
+ // CPU faults upon signed overflow, which occurs when most
+ // negative int is divided by -1.
+ var j *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW ||
+ v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW {
+
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ c = gc.Prog(x86.ACMPL)
+ j = gc.Prog(x86.AJEQ)
+ gc.Prog(x86.ACDQ) //TODO: fix
+
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ c = gc.Prog(x86.ACMPW)
+ j = gc.Prog(x86.AJEQ)
+ gc.Prog(x86.ACWD)
+ }
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+ }
+
+ // for unsigned ints, we sign extend by setting DX = 0
+ // signed ints were sign extended above
+ if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
+ v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
+ c := gc.Prog(x86.AXORL)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x86.REG_DX
+ c.To.Type = obj.TYPE_REG
+ c.To.Reg = x86.REG_DX
+ }
+
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+
+ // signed division, rest of the check for -1 case
+ if j != nil {
+ j2 := gc.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ var n *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
+ // n * -1 = -n
+ n = gc.Prog(x86.ANEGL)
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_AX
+ } else {
+ // n % -1 == 0
+ n = gc.Prog(x86.AXORL)
+ n.From.Type = obj.TYPE_REG
+ n.From.Reg = x86.REG_DX
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_DX
+ }
+
+ j.To.Val = n
+ j2.To.Val = s.Pc()
+ }
+
+ case ssa.Op386HMULL, ssa.Op386HMULW, ssa.Op386HMULB,
+ ssa.Op386HMULLU, ssa.Op386HMULWU, ssa.Op386HMULBU:
+ // the frontend rewrites constant division by 8/16/32 bit integers into
+ // HMUL by a constant
+ // SSA rewrites generate the 64 bit versions
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and DX is the only output we care about (the high bits)
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+
+ // IMULB puts the high portion in AH instead of DL,
+ // so move it to DL for consistency
+ if v.Type.Size() == 1 {
+ m := gc.Prog(x86.AMOVB)
+ m.From.Type = obj.TYPE_REG
+ m.From.Reg = x86.REG_AH
+ m.To.Type = obj.TYPE_REG
+ m.To.Reg = x86.REG_DX
+ }
+
+ case ssa.Op386MULLQU:
+ // AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+
+ case ssa.Op386ADDLconst:
+ r := gc.SSARegNum(v)
+ a := gc.SSARegNum(v.Args[0])
+ if r == a {
+ if v.AuxInt == 1 {
+ p := gc.Prog(x86.AINCL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ if v.AuxInt == -1 {
+ p := gc.Prog(x86.ADECL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := gc.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = a
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.Op386MULLconst:
+ r := gc.SSARegNum(v)
+ if r != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ // TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2
+ // then we don't need to use resultInArg0 for these ops.
+ //p.From3 = new(obj.Addr)
+ //p.From3.Type = obj.TYPE_REG
+ //p.From3.Reg = gc.SSARegNum(v.Args[0])
+
+ case ssa.Op386SUBLconst,
+ ssa.Op386ADCLconst,
+ ssa.Op386SBBLconst,
+ ssa.Op386ANDLconst,
+ ssa.Op386ORLconst,
+ ssa.Op386XORLconst,
+ ssa.Op386SHLLconst,
+ ssa.Op386SHRLconst, ssa.Op386SHRWconst, ssa.Op386SHRBconst,
+ ssa.Op386SARLconst, ssa.Op386SARWconst, ssa.Op386SARBconst,
+ ssa.Op386ROLLconst, ssa.Op386ROLWconst, ssa.Op386ROLBconst:
+ r := gc.SSARegNum(v)
+ if r != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386SBBLcarrymask:
+ r := gc.SSARegNum(v)
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
+ r := gc.SSARegNum(v.Args[0])
+ i := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(x86.ALEAL)
+ switch v.Op {
+ case ssa.Op386LEAL1:
+ p.From.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386LEAL2:
+ p.From.Scale = 2
+ case ssa.Op386LEAL4:
+ p.From.Scale = 4
+ case ssa.Op386LEAL8:
+ p.From.Scale = 8
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Index = i
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386LEAL:
+ p := gc.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
+ ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
+ opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[0]))
+ case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
+ // Go assembler has swapped operands for UCOMISx relative to CMP,
+ // must account for that right here.
+ opregreg(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]))
+ case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.Op386MOVLconst:
+ x := gc.SSARegNum(v)
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ // If flags are live at this instruction, suppress the
+ // MOV $0,AX -> XOR AX,AX optimization.
+ if v.Aux != nil {
+ p.Mark |= x86.PRESERVEFLAGS
+ }
+ case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
+ x := gc.SSARegNum(v)
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.Op386MOVSSconst1, ssa.Op386MOVSDconst1:
+ var literal string
+ if v.Op == ssa.Op386MOVSDconst1 {
+ literal = fmt.Sprintf("$f64.%016x", uint64(v.AuxInt))
+ } else {
+ literal = fmt.Sprintf("$f32.%08x", math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt)))))
+ }
+ p := gc.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = obj.Linklookup(gc.Ctxt, literal, 0)
+ p.From.Sym.Local = true
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386MOVSDloadidx8:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.From.Scale = 8
+ p.From.Index = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.From.Scale = 4
+ p.From.Index = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386MOVWloadidx2:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.From.Scale = 2
+ p.From.Index = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
+ r := gc.SSARegNum(v.Args[0])
+ i := gc.SSARegNum(v.Args[1])
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Scale = 1
+ p.From.Index = i
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ case ssa.Op386MOVSDstoreidx8:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Scale = 8
+ p.To.Index = gc.SSARegNum(v.Args[1])
+ gc.AddAux(&p.To, v)
+ case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Scale = 4
+ p.To.Index = gc.SSARegNum(v.Args[1])
+ gc.AddAux(&p.To, v)
+ case ssa.Op386MOVWstoreidx2:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Scale = 2
+ p.To.Index = gc.SSARegNum(v.Args[1])
+ gc.AddAux(&p.To, v)
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
+ r := gc.SSARegNum(v.Args[0])
+ i := gc.SSARegNum(v.Args[1])
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[2])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Scale = 1
+ p.To.Index = i
+ gc.AddAux(&p.To, v)
+ case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ r := gc.SSARegNum(v.Args[0])
+ i := gc.SSARegNum(v.Args[1])
+ switch v.Op {
+ case ssa.Op386MOVBstoreconstidx1, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVLstoreconstidx1:
+ p.To.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386MOVWstoreconstidx2:
+ p.To.Scale = 2
+ case ssa.Op386MOVLstoreconstidx4:
+ p.To.Scale = 4
+ }
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Index = i
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
+ ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
+ ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
+ ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
+ opregreg(v.Op.Asm(), gc.SSARegNum(v), gc.SSARegNum(v.Args[0]))
+ case ssa.Op386DUFFZERO:
+ p := gc.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+ p.To.Offset = v.AuxInt
+ case ssa.Op386DUFFCOPY:
+ p := gc.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpCopy, ssa.Op386MOVLconvert: // TODO: use MOVLreg for reg->reg copies instead of OpCopy?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ if x != y {
+ opregreg(moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(loadByType(v.Type))
+ n, off := gc.AutoVar(v.Args[0])
+ p.From.Type = obj.TYPE_MEM
+ p.From.Node = n
+ p.From.Sym = gc.Linksym(n.Sym)
+ p.From.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.From.Name = obj.NAME_PARAM
+ p.From.Offset += n.Xoffset
+ } else {
+ p.From.Name = obj.NAME_AUTO
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ n, off := gc.AutoVar(v)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Node = n
+ p.To.Sym = gc.Linksym(n.Sym)
+ p.To.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.To.Name = obj.NAME_PARAM
+ p.To.Offset += n.Xoffset
+ } else {
+ p.To.Name = obj.NAME_AUTO
+ }
+ case ssa.OpPhi:
+ gc.CheckLoweredPhi(v)
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.Op386LoweredGetClosurePtr:
+ // Closure pointer is DX.
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.Op386LoweredGetG:
+ r := gc.SSARegNum(v)
+ // See the comments in cmd/internal/obj/x86/obj6.go
+ // near CanUse1InsnTLS for a detailed explanation of these instructions.
+ if x86.CanUse1InsnTLS(gc.Ctxt) {
+ // MOVL (TLS), r
+ p := gc.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ } else {
+ // MOVL TLS, r
+ // MOVL (r)(TLS*1), r
+ p := gc.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ q := gc.Prog(x86.AMOVL)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = r
+ q.From.Index = x86.REG_TLS
+ q.From.Scale = 1
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+ }
+ case ssa.Op386CALLstatic:
+ if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an actual hardware NOP that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ ginsnop()
+ }
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.Op386CALLclosure:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.Op386CALLdefer:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.Op386CALLgo:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Newproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.Op386CALLinter:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.Op386NEGL,
+ ssa.Op386BSWAPL,
+ ssa.Op386NOTL:
+ r := gc.SSARegNum(v)
+ if r != gc.SSARegNum(v.Args[0]) {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := gc.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386BSFL, ssa.Op386BSFW,
+ ssa.Op386BSRL, ssa.Op386BSRW,
+ ssa.Op386SQRTSD:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpSP, ssa.OpSB, ssa.OpSelect0, ssa.OpSelect1:
+ // nothing to do
+ case ssa.Op386SETEQ, ssa.Op386SETNE,
+ ssa.Op386SETL, ssa.Op386SETLE,
+ ssa.Op386SETG, ssa.Op386SETGE,
+ ssa.Op386SETGF, ssa.Op386SETGEF,
+ ssa.Op386SETB, ssa.Op386SETBE,
+ ssa.Op386SETORD, ssa.Op386SETNAN,
+ ssa.Op386SETA, ssa.Op386SETAE:
+ p := gc.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ case ssa.Op386SETNEF:
+ p := gc.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ q := gc.Prog(x86.ASETPS)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(x86.AORL, gc.SSARegNum(v), x86.REG_AX)
+
+ case ssa.Op386SETEQF:
+ p := gc.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ q := gc.Prog(x86.ASETPC)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(x86.AANDL, gc.SSARegNum(v), x86.REG_AX)
+
+ case ssa.Op386InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.Op386REPSTOSL:
+ gc.Prog(x86.AREP)
+ gc.Prog(x86.ASTOSL)
+ case ssa.Op386REPMOVSL:
+ gc.Prog(x86.AREP)
+ gc.Prog(x86.AMOVSL)
+ case ssa.OpVarDef:
+ gc.Gvardef(v.Aux.(*gc.Node))
+ case ssa.OpVarKill:
+ gc.Gvarkill(v.Aux.(*gc.Node))
+ case ssa.OpVarLive:
+ gc.Gvarlive(v.Aux.(*gc.Node))
+ case ssa.OpKeepAlive:
+ if !v.Args[0].Type.IsPtrShaped() {
+ v.Fatalf("keeping non-pointer alive %v", v.Args[0])
+ }
+ n, off := gc.AutoVar(v.Args[0])
+ if n == nil {
+ v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
+ }
+ if off != 0 {
+ v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
+ }
+ gc.Gvarlive(n)
+ case ssa.Op386LoweredNilCheck:
+ // Optimization - if the subsequent block has a load or store
+ // at the same address, we don't need to issue this instruction.
+ mem := v.Args[1]
+ for _, w := range v.Block.Succs[0].Block().Values {
+ if w.Op == ssa.OpPhi {
+ if w.Type.IsMemory() {
+ mem = w
+ }
+ continue
+ }
+ if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
+ // w doesn't use a store - can't be a memory op.
+ continue
+ }
+ if w.Args[len(w.Args)-1] != mem {
+ v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
+ }
+ switch w.Op {
+ case ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload,
+ ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore,
+ ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload,
+ ssa.Op386MOVSSload, ssa.Op386MOVSDload,
+ ssa.Op386MOVSSstore, ssa.Op386MOVSDstore:
+ if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
+ off := ssa.ValAndOff(v.AuxInt).Off()
+ if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
+ if gc.Debug_checknil != 0 && int(v.Line) > 1 {
+ gc.Warnl(v.Line, "removed nil check")
+ }
+ return
+ }
+ }
+ if w.Type.IsMemory() {
+ if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
+ // these ops are OK
+ mem = w
+ continue
+ }
+ // We can't delay the nil check past the next store.
+ break
+ }
+ }
+ // Issue a load which will fault if the input is nil.
+ // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // but it doesn't have false dependency on AX.
+ // Or maybe allocate an output register and use MOVL (reg),reg2 ?
+ // That trades clobbering flags for clobbering a register.
+ p := gc.Prog(x86.ATESTB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
+ gc.Warnl(v.Line, "generated nil check")
+ }
+ case ssa.Op386FCHS:
+ v.Fatalf("FCHS in non-387 mode")
+ default:
+ v.Unimplementedf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+}{
+ ssa.Block386EQ: {x86.AJEQ, x86.AJNE},
+ ssa.Block386NE: {x86.AJNE, x86.AJEQ},
+ ssa.Block386LT: {x86.AJLT, x86.AJGE},
+ ssa.Block386GE: {x86.AJGE, x86.AJLT},
+ ssa.Block386LE: {x86.AJLE, x86.AJGT},
+ ssa.Block386GT: {x86.AJGT, x86.AJLE},
+ ssa.Block386ULT: {x86.AJCS, x86.AJCC},
+ ssa.Block386UGE: {x86.AJCC, x86.AJCS},
+ ssa.Block386UGT: {x86.AJHI, x86.AJLS},
+ ssa.Block386ULE: {x86.AJLS, x86.AJHI},
+ ssa.Block386ORD: {x86.AJPC, x86.AJPS},
+ ssa.Block386NAN: {x86.AJPS, x86.AJPC},
+}
+
+var eqfJumps = [2][2]gc.FloatingEQNEJump{
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
+}
+var nefJumps = [2][2]gc.FloatingEQNEJump{
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ s.SetLineno(b.Line)
+
+ if gc.Thearch.Use387 {
+ // Empty the 387's FP stack before the block ends.
+ flush387(s)
+ }
+
+ switch b.Kind {
+ case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in rax:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := gc.Prog(x86.ATESTL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ p = gc.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ case ssa.BlockRet:
+ gc.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
+
+ case ssa.Block386EQF:
+ gc.SSAGenFPJump(s, b, next, &eqfJumps)
+
+ case ssa.Block386NEF:
+ gc.SSAGenFPJump(s, b, next, &nefJumps)
+
+ case ssa.Block386EQ, ssa.Block386NE,
+ ssa.Block386LT, ssa.Block386GE,
+ ssa.Block386LE, ssa.Block386GT,
+ ssa.Block386ULT, ssa.Block386UGT,
+ ssa.Block386ULE, ssa.Block386UGE:
+ jmp := blockJump[b.Kind]
+ likely := b.Likely
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = gc.Prog(jmp.invasm)
+ likely *= -1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ case b.Succs[1].Block():
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ default:
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ q := gc.Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
+
+ // liblink reorders the instruction stream as it sees fit.
+ // Pass along what we know so liblink can make use of it.
+ // TODO: Once we've fully switched to SSA,
+ // make liblink leave our output alone.
+ switch likely {
+ case ssa.BranchUnlikely:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ case ssa.BranchLikely:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ }
+
+ default:
+ b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
+ }
+}
diff --git a/src/cmd/internal/obj/arm/a.out.go b/src/cmd/internal/obj/arm/a.out.go
index 4234b5967c..9fcc483b9a 100644
--- a/src/cmd/internal/obj/arm/a.out.go
+++ b/src/cmd/internal/obj/arm/a.out.go
@@ -234,6 +234,8 @@ const (
ASQRTD
AABSF
AABSD
+ ANEGF
+ ANEGD
ASRL
ASRA
diff --git a/src/cmd/internal/obj/arm/anames.go b/src/cmd/internal/obj/arm/anames.go
index 0ef68a6be2..6d7db2dee6 100644
--- a/src/cmd/internal/obj/arm/anames.go
+++ b/src/cmd/internal/obj/arm/anames.go
@@ -59,6 +59,8 @@ var Anames = []string{
"SQRTD",
"ABSF",
"ABSD",
+ "NEGF",
+ "NEGD",
"SRL",
"SRA",
"SLL",
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index d37091fe45..2158cf19fa 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -1434,6 +1434,8 @@ func buildop(ctxt *obj.Link) {
opset(AMOVDF, r0)
opset(AABSF, r0)
opset(AABSD, r0)
+ opset(ANEGF, r0)
+ opset(ANEGD, r0)
case ACMPF:
opset(ACMPD, r0)
@@ -1930,7 +1932,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
r := int(p.Reg)
if r == 0 {
r = rt
- if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD {
+ if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD || p.As == ANEGF || p.As == ANEGD {
r = 0
}
}
@@ -2508,6 +2510,10 @@ func oprrr(ctxt *obj.Link, a obj.As, sc int) uint32 {
return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 0xc<<4
case AABSF:
return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 0xc<<4
+ case ANEGD:
+ return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xb<<8 | 0x4<<4
+ case ANEGF:
+ return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xa<<8 | 0x4<<4
case ACMPD:
return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xb<<8 | 0xc<<4
case ACMPF:
diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go
index 9cf2f2918e..a3d421eb6b 100644
--- a/src/cmd/internal/obj/arm/obj5.go
+++ b/src/cmd/internal/obj/arm/obj5.go
@@ -669,7 +669,9 @@ func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
ASQRTF,
ASQRTD,
AABSF,
- AABSD:
+ AABSD,
+ ANEGF,
+ ANEGD:
goto soft
default:
diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go
index ab05894950..9a02452158 100644
--- a/src/cmd/internal/obj/arm64/a.out.go
+++ b/src/cmd/internal/obj/arm64/a.out.go
@@ -274,6 +274,7 @@ const (
C_ADDCON // 12-bit unsigned, shifted left by 0 or 12
C_MOVCON // generated by a 16-bit constant, optionally inverted and/or shifted by multiple of 16
C_BITCON // bitfield and logical immediate masks
+ C_ABCON0 // could be C_ADDCON0 or C_BITCON
C_ABCON // could be C_ADDCON or C_BITCON
C_MBCON // could be C_MOVCON or C_BITCON
C_LCON // 32-bit constant
@@ -713,3 +714,10 @@ const (
AB = obj.AJMP
ABL = obj.ACALL
)
+
+const (
+ // shift types
+ SHIFT_LL = 0 << 22
+ SHIFT_LR = 1 << 22
+ SHIFT_AR = 2 << 22
+)
diff --git a/src/cmd/internal/obj/arm64/anames7.go b/src/cmd/internal/obj/arm64/anames7.go
index eb348d4850..d55b34f9ae 100644
--- a/src/cmd/internal/obj/arm64/anames7.go
+++ b/src/cmd/internal/obj/arm64/anames7.go
@@ -20,6 +20,7 @@ var cnames7 = []string{
"ADDCON",
"MOVCON",
"BITCON",
+ "ABCON0",
"ABCON",
"MBCON",
"LCON",
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 28bebaa3f7..fbf0378591 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -161,10 +161,12 @@ var optab = []Optab{
{AADD, C_ADDCON, C_RSP, C_RSP, 2, 4, 0, 0, 0},
{AADD, C_ADDCON, C_NONE, C_RSP, 2, 4, 0, 0, 0},
{ACMP, C_ADDCON, C_RSP, C_NONE, 2, 4, 0, 0, 0},
- // TODO: these don't work properly.
- // {AADD, C_MBCON, C_RSP, C_RSP, 2, 4, 0, 0, 0},
- // {AADD, C_MBCON, C_NONE, C_RSP, 2, 4, 0, 0, 0},
- // {ACMP, C_MBCON, C_RSP, C_NONE, 2, 4, 0, 0, 0},
+ {AADD, C_MOVCON, C_RSP, C_RSP, 62, 8, 0, 0, 0},
+ {AADD, C_MOVCON, C_NONE, C_RSP, 62, 8, 0, 0, 0},
+ {ACMP, C_MOVCON, C_RSP, C_NONE, 62, 8, 0, 0, 0},
+ {AADD, C_BITCON, C_RSP, C_RSP, 62, 8, 0, 0, 0},
+ {AADD, C_BITCON, C_NONE, C_RSP, 62, 8, 0, 0, 0},
+ {ACMP, C_BITCON, C_RSP, C_NONE, 62, 8, 0, 0, 0},
{AADD, C_VCON, C_RSP, C_RSP, 13, 8, 0, LFROM, 0},
{AADD, C_VCON, C_NONE, C_RSP, 13, 8, 0, LFROM, 0},
{ACMP, C_VCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0},
@@ -188,11 +190,14 @@ var optab = []Optab{
{AAND, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
{ABIC, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
{ABIC, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
- // TODO: these don't work properly.
- // {AAND, C_BITCON, C_REG, C_REG, 53, 4, 0, 0, 0},
- // {AAND, C_BITCON, C_NONE, C_REG, 53, 4, 0, 0, 0},
- // {ABIC, C_BITCON, C_REG, C_REG, 53, 4, 0, 0, 0},
- // {ABIC, C_BITCON, C_NONE, C_REG, 53, 4, 0, 0, 0},
+ {AAND, C_BITCON, C_REG, C_REG, 53, 4, 0, 0, 0},
+ {AAND, C_BITCON, C_NONE, C_REG, 53, 4, 0, 0, 0},
+ {ABIC, C_BITCON, C_REG, C_REG, 53, 4, 0, 0, 0},
+ {ABIC, C_BITCON, C_NONE, C_REG, 53, 4, 0, 0, 0},
+ {AAND, C_MOVCON, C_REG, C_REG, 62, 8, 0, 0, 0},
+ {AAND, C_MOVCON, C_NONE, C_REG, 62, 8, 0, 0, 0},
+ {ABIC, C_MOVCON, C_REG, C_REG, 62, 8, 0, 0, 0},
+ {ABIC, C_MOVCON, C_NONE, C_REG, 62, 8, 0, 0, 0},
{AAND, C_VCON, C_REG, C_REG, 28, 8, 0, LFROM, 0},
{AAND, C_VCON, C_NONE, C_REG, 28, 8, 0, LFROM, 0},
{ABIC, C_VCON, C_REG, C_REG, 28, 8, 0, LFROM, 0},
@@ -216,8 +221,8 @@ var optab = []Optab{
// TODO: these don't work properly.
// { AMOVW, C_ADDCON, C_NONE, C_REG, 2, 4, 0 , 0},
// { AMOVD, C_ADDCON, C_NONE, C_REG, 2, 4, 0 , 0},
- // { AMOVW, C_BITCON, C_NONE, C_REG, 53, 4, 0 , 0},
- // { AMOVD, C_BITCON, C_NONE, C_REG, 53, 4, 0 , 0},
+ {AMOVW, C_BITCON, C_NONE, C_REG, 32, 4, 0, 0, 0},
+ {AMOVD, C_BITCON, C_NONE, C_REG, 32, 4, 0, 0, 0},
{AMOVK, C_VCON, C_NONE, C_REG, 33, 4, 0, 0, 0},
{AMOVD, C_AACON, C_NONE, C_REG, 4, 4, REGFROM, 0, 0},
@@ -715,15 +720,18 @@ func flushpool(ctxt *obj.Link, p *obj.Prog, skip int) {
*/
func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
c := aclass(ctxt, a)
+ lit := ctxt.Instoffset
t := *ctxt.NewProg()
t.As = AWORD
sz := 4
- // MOVW foo(SB), R is actually
- // MOV addr, REGTEMP
- // MOVW REGTEMP, R
+ // MOVD foo(SB), R is actually
+ // MOVD addr, REGTMP
+ // MOVD REGTMP, R
// where addr is the address of the DWORD containing the address of foo.
- if p.As == AMOVD || c == C_ADDR || c == C_VCON {
+ if p.As == AMOVD || c == C_ADDR || c == C_VCON || int64(lit) != int64(int32(lit)) || uint64(lit) != uint64(uint32(lit)) {
+ // conservative: don't know if we want signed or unsigned extension.
+ // in case of ambiguity, store 64-bit
t.As = ADWORD
sz = 8
}
@@ -740,29 +748,12 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
t.To.Type = a.Type
t.To.Name = a.Name
- /* This is here to work around a bug where we generate negative
- operands that match C_MOVCON, but we use them with
- instructions that only accept unsigned immediates. This
- will cause oplook to return a variant of the instruction
- that loads the negative constant from memory, rather than
- using the immediate form. Because of that load, we get here,
- so we need to know what to do with C_MOVCON.
-
- The correct fix is to use the "negation" instruction variant,
- e.g. CMN $1, R instead of CMP $-1, R, or SUB $1, R instead
- of ADD $-1, R. */
- case C_MOVCON,
-
- /* This is here because MOV uint12<<12, R is disabled in optab.
- Because of this, we need to load the constant from memory. */
- C_ADDCON,
-
- /* These are here because they are disabled in optab.
- Because of this, we need to load the constant from memory. */
- C_BITCON,
- C_ABCON,
- C_MBCON,
- C_PSAUTO,
+ /* This is here because MOV uint12<<12, R is disabled in optab.
+ Because of this, we need to load the constant from memory. */
+ case C_ADDCON:
+ fallthrough
+
+ case C_PSAUTO,
C_PPAUTO,
C_UAUTO4K,
C_UAUTO8K,
@@ -790,7 +781,7 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
}
t.To.Type = obj.TYPE_CONST
- t.To.Offset = ctxt.Instoffset
+ t.To.Offset = lit
break
}
@@ -844,11 +835,137 @@ func isaddcon(v int64) bool {
return v <= 0xFFF
}
-func isbitcon(v uint64) bool {
- /* fancy bimm32 or bimm64? */
- // TODO(aram):
- return false
- // return findmask(v) != nil || (v>>32) == 0 && findmask(v|(v<<32)) != nil
+// isbitcon returns whether a constant can be encoded into a logical instruction.
+// bitcon has a binary form of repetition of a bit sequence of length 2, 4, 8, 16, 32, or 64,
+// which itself is a rotate (w.r.t. the length of the unit) of a sequence of ones.
+// special cases: 0 and -1 are not bitcon.
+// this function needs to run against virtually all the constants, so it needs to be fast.
+// for this reason, bitcon testing and bitcon encoding are separate functions.
+func isbitcon(x uint64) bool {
+ if x == 1<<64-1 || x == 0 {
+ return false
+ }
+ // determine the period and sign-extend a unit to 64 bits
+ switch {
+ case x != x>>32|x<<32:
+ // period is 64
+ // nothing to do
+ case x != x>>16|x<<48:
+ // period is 32
+ x = uint64(int64(int32(x)))
+ case x != x>>8|x<<56:
+ // period is 16
+ x = uint64(int64(int16(x)))
+ case x != x>>4|x<<60:
+ // period is 8
+ x = uint64(int64(int8(x)))
+ default:
+ // period is 4 or 2, always true
+ // 0001, 0010, 0100, 1000 -- 0001 rotate
+ // 0011, 0110, 1100, 1001 -- 0011 rotate
+ // 0111, 1011, 1101, 1110 -- 0111 rotate
+ // 0101, 1010 -- 01 rotate, repeat
+ return true
+ }
+ return sequenceOfOnes(x) || sequenceOfOnes(^x)
+}
+
+// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros
+func sequenceOfOnes(x uint64) bool {
+ y := x & -x // lowest set bit of x. x is good iff x+y is a power of 2
+ y += x
+ return (y-1)&y == 0
+}
+
+// bitconEncode returns the encoding of a bitcon used in logical instructions
+// x is known to be a bitcon
+// a bitcon is a sequence of n ones at low bits (i.e. 1<<n-1), right rotated
+// by R bits, and repeated with period of 64, 32, 16, 8, 4, or 2.
+// it is encoded in logical instructions with 3 bitfields
+// N (1 bit) : R (6 bits) : S (6 bits), where
+// N=1 -- period=64
+// N=0, S=0xxxxx -- period=32
+// N=0, S=10xxxx -- period=16
+// N=0, S=110xxx -- period=8
+// N=0, S=1110xx -- period=4
+// N=0, S=11110x -- period=2
+// R is the shift amount, low bits of S = n-1
+func bitconEncode(x uint64, mode int) uint32 {
+ var period uint32
+ // determine the period and sign-extend a unit to 64 bits
+ switch {
+ case x != x>>32|x<<32:
+ period = 64
+ case x != x>>16|x<<48:
+ period = 32
+ x = uint64(int64(int32(x)))
+ case x != x>>8|x<<56:
+ period = 16
+ x = uint64(int64(int16(x)))
+ case x != x>>4|x<<60:
+ period = 8
+ x = uint64(int64(int8(x)))
+ case x != x>>2|x<<62:
+ period = 4
+ x = uint64(int64(x<<60) >> 60)
+ default:
+ period = 2
+ x = uint64(int64(x<<62) >> 62)
+ }
+ neg := false
+ if int64(x) < 0 {
+ x = ^x
+ neg = true
+ }
+ y := x & -x // lowest set bit of x.
+ s := log2(y)
+ n := log2(x+y) - s // x (or ^x) is a sequence of n ones left shifted by s bits
+ if neg {
+ // ^x is a sequence of n ones left shifted by s bits
+ // adjust n, s for x
+ s = n + s
+ n = period - n
+ }
+
+ N := uint32(0)
+ if mode == 64 && period == 64 {
+ N = 1
+ }
+ R := (period - s) & (period - 1) & uint32(mode-1) // shift amount of right rotate
+ S := (n - 1) | 63&^(period<<1-1) // low bits = #ones - 1, high bits encodes period
+ return N<<22 | R<<16 | S<<10
+}
+
+func log2(x uint64) uint32 {
+ if x == 0 {
+ panic("log2 of 0")
+ }
+ n := uint32(0)
+ if x >= 1<<32 {
+ x >>= 32
+ n += 32
+ }
+ if x >= 1<<16 {
+ x >>= 16
+ n += 16
+ }
+ if x >= 1<<8 {
+ x >>= 8
+ n += 8
+ }
+ if x >= 1<<4 {
+ x >>= 4
+ n += 4
+ }
+ if x >= 1<<2 {
+ x >>= 2
+ n += 2
+ }
+ if x >= 1<<1 {
+ x >>= 1
+ n += 1
+ }
+ return n
}
func autoclass(l int64) int {
@@ -1019,6 +1136,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
}
if isaddcon(v) {
if v <= 0xFFF {
+ if isbitcon(uint64(v)) {
+ return C_ABCON0
+ }
return C_ADDCON0
}
if isbitcon(uint64(v)) {
@@ -1085,6 +1205,10 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return C_GOK
}
+func oclass(a *obj.Addr) int {
+ return int(a.Class) - 1
+}
+
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
a1 := int(p.Optab)
if a1 != 0 {
@@ -1151,17 +1275,17 @@ func cmp(a int, b int) bool {
}
case C_ADDCON0:
- if b == C_ZCON {
+ if b == C_ZCON || b == C_ABCON0 {
return true
}
case C_ADDCON:
- if b == C_ZCON || b == C_ADDCON0 || b == C_ABCON {
+ if b == C_ZCON || b == C_ABCON0 || b == C_ADDCON0 || b == C_ABCON {
return true
}
case C_BITCON:
- if b == C_ABCON || b == C_MBCON {
+ if b == C_ABCON0 || b == C_ABCON || b == C_MBCON {
return true
}
@@ -1171,7 +1295,7 @@ func cmp(a int, b int) bool {
}
case C_LCON:
- if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_MBCON || b == C_MOVCON {
+ if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_ABCON0 || b == C_MBCON || b == C_MOVCON {
return true
}
@@ -2306,34 +2430,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.To.Reg))
case 32: /* mov $con, R -> movz/movn */
- r := 32
-
- if p.As == AMOVD {
- r = 64
- }
- d := p.From.Offset
- s := movcon(d)
- if s < 0 || s >= r {
- d = ^d
- s = movcon(d)
- if s < 0 || s >= r {
- ctxt.Diag("impossible move wide: %#x\n%v", uint64(p.From.Offset), p)
- }
- if p.As == AMOVD {
- o1 = opirr(ctxt, AMOVN)
- } else {
- o1 = opirr(ctxt, AMOVNW)
- }
- } else {
- if p.As == AMOVD {
- o1 = opirr(ctxt, AMOVZ)
- } else {
- o1 = opirr(ctxt, AMOVZW)
- }
- }
-
- rt := int(p.To.Reg)
- o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31))
+ o1 = omovconst(ctxt, p.As, p, &p.From, int(p.To.Reg))
case 33: /* movk $uimm16 << pos */
o1 = opirr(ctxt, p.As)
@@ -2601,8 +2698,26 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= uint32((p.From.Offset & 0x7F) << 5)
- case 53: /* and/or/eor/bic/... $bimmN, Rn, Rd -> op (N,r,s), Rn, Rd */
- ctxt.Diag("bitmask immediate not implemented\n%v", p)
+ case 53: /* and/or/eor/bic/... $bitcon, Rn, Rd */
+ a := p.As
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
+ if r == 0 {
+ r = rt
+ }
+ mode := 64
+ v := uint64(p.From.Offset)
+ switch p.As {
+ case AANDW, AORRW, AEORW, AANDSW:
+ mode = 32
+ case ABIC, AORN, AEON, ABICS:
+ v = ^v
+ case ABICW, AORNW, AEONW, ABICSW:
+ v = ^v
+ mode = 32
+ }
+ o1 = opirr(ctxt, a)
+ o1 |= bitconEncode(v, mode) | uint32(r&31)<<5 | uint32(rt&31)
case 54: /* floating point arith */
o1 = oprrr(ctxt, p.As)
@@ -2694,6 +2809,31 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = ADR(0, uint32(d), uint32(p.To.Reg))
+ case 62: /* op $movcon, [R], R -> mov $movcon, REGTMP + op REGTMP, [R], R */
+ if p.Reg == REGTMP {
+ ctxt.Diag("cannot use REGTMP as source: %v\n", p)
+ }
+ o1 = omovconst(ctxt, AMOVD, p, &p.From, REGTMP)
+
+ rt := int(p.To.Reg)
+ if p.To.Type == obj.TYPE_NONE {
+ rt = REGZERO
+ }
+ r := int(p.Reg)
+ if r == 0 {
+ r = rt
+ }
+ if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) {
+ o2 = opxrrr(ctxt, p.As)
+ o2 |= REGTMP & 31 << 16
+ o2 |= LSL0_64
+ } else {
+ o2 = oprrr(ctxt, p.As)
+ o2 |= REGTMP & 31 << 16 /* shift is 0 */
+ }
+ o2 |= uint32(r&31) << 5
+ o2 |= uint32(rt & 31)
+
/* reloc ops */
case 64: /* movT R,addr -> adrp + add + movT R, (REGTMP) */
o1 = ADR(1, 0, REGTMP)
@@ -3374,28 +3514,28 @@ func opirr(ctxt *obj.Link, a obj.As) uint32 {
return 1<<31 | 0x10<<24
/* op $bimm, Rn, Rd */
- case AAND:
+ case AAND, ABIC:
return S64 | 0<<29 | 0x24<<23
- case AANDW:
+ case AANDW, ABICW:
return S32 | 0<<29 | 0x24<<23 | 0<<22
- case AORR:
+ case AORR, AORN:
return S64 | 1<<29 | 0x24<<23
- case AORRW:
+ case AORRW, AORNW:
return S32 | 1<<29 | 0x24<<23 | 0<<22
- case AEOR:
+ case AEOR, AEON:
return S64 | 2<<29 | 0x24<<23
- case AEORW:
+ case AEORW, AEONW:
return S32 | 2<<29 | 0x24<<23 | 0<<22
- case AANDS:
+ case AANDS, ABICS:
return S64 | 3<<29 | 0x24<<23
- case AANDSW:
+ case AANDSW, ABICSW:
return S32 | 3<<29 | 0x24<<23 | 0<<22
case AASR:
@@ -4100,6 +4240,52 @@ func omovlit(ctxt *obj.Link, as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32
return uint32(o1)
}
+// load a constant (MOVCON or BITCON) in a into rt
+func omovconst(ctxt *obj.Link, as obj.As, p *obj.Prog, a *obj.Addr, rt int) (o1 uint32) {
+ if c := oclass(a); c == C_BITCON || c == C_ABCON || c == C_ABCON0 {
+ // or $bitcon, REGZERO, rt
+ mode := 64
+ var as1 obj.As
+ switch as {
+ case AMOVW:
+ as1 = AORRW
+ mode = 32
+ case AMOVD:
+ as1 = AORR
+ }
+ o1 = opirr(ctxt, as1)
+ o1 |= bitconEncode(uint64(a.Offset), mode) | uint32(REGZERO&31)<<5 | uint32(rt&31)
+ return o1
+ }
+
+ r := 32
+ if as == AMOVD {
+ r = 64
+ }
+ d := a.Offset
+ s := movcon(d)
+ if s < 0 || s >= r {
+ d = ^d
+ s = movcon(d)
+ if s < 0 || s >= r {
+ ctxt.Diag("impossible move wide: %#x\n%v", uint64(a.Offset), p)
+ }
+ if as == AMOVD {
+ o1 = opirr(ctxt, AMOVN)
+ } else {
+ o1 = opirr(ctxt, AMOVNW)
+ }
+ } else {
+ if as == AMOVD {
+ o1 = opirr(ctxt, AMOVZ)
+ } else {
+ o1 = opirr(ctxt, AMOVZW)
+ }
+ }
+ o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31))
+ return o1
+}
+
func opbfm(ctxt *obj.Link, a obj.As, r int, s int, rf int, rt int) uint32 {
var c uint32
o := opirr(ctxt, a)
diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go
index ffa1b416d6..3f606dd697 100644
--- a/src/cmd/internal/obj/arm64/obj7.go
+++ b/src/cmd/internal/obj/arm64/obj7.go
@@ -279,20 +279,30 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// Rewrite negative immediates as positive immediates with
// complementary instruction.
switch p.As {
- case AADD,
- AADDW,
- ASUB,
- ASUBW,
- ACMP,
- ACMPW,
- ACMN,
- ACMNW:
- if p.From.Type == obj.NAME_EXTERN && p.From.Offset < 0 {
+ case AADD, ASUB, ACMP, ACMN:
+ if p.From.Type == obj.TYPE_CONST && p.From.Offset < 0 && p.From.Offset != -1<<63 {
p.From.Offset = -p.From.Offset
p.As = complements[p.As]
}
+ case AADDW, ASUBW, ACMPW, ACMNW:
+ if p.From.Type == obj.TYPE_CONST && p.From.Offset < 0 && int32(p.From.Offset) != -1<<31 {
+ p.From.Offset = -p.From.Offset
+ p.As = complements[p.As]
+ }
+ }
- break
+ // For 32-bit logical instruction with constant,
+ // rewrite the high 32-bit to be a repetition of
+ // the low 32-bit, so that the BITCON test can be
+ // shared for both 32-bit and 64-bit. 32-bit ops
+ // will zero the high 32-bit of the destination
+ // register anyway.
+ switch p.As {
+ case AANDW, AORRW, AEORW, AANDSW:
+ if p.From.Type == obj.TYPE_CONST {
+ v := p.From.Offset & 0xffffffff
+ p.From.Offset = v | v<<32
+ }
}
if ctxt.Flag_dynlink {
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index b6861f4c1e..3c66eecbf0 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -112,13 +112,17 @@ import (
// val = int32(y)
//
// reg<<shift, reg>>shift, reg->shift, reg@>shift
-// Shifted register value, for ARM.
+// Shifted register value, for ARM and ARM64.
// In this form, reg must be a register and shift can be a register or an integer constant.
// Encoding:
// type = TYPE_SHIFT
+// On ARM:
// offset = (reg&15) | shifttype<<5 | count
// shifttype = 0, 1, 2, 3 for <<, >>, ->, @>
// count = (reg&15)<<8 | 1<<4 for a register shift count, (n&31)<<7 for an integer constant.
+// On ARM64:
+// offset = (reg&31)<<16 | shifttype<<22 | (count&63)<<10
+// shifttype = 0, 1, 2 for <<, >>, ->
//
// (reg, reg)
// A destination register pair. When used as the last argument of an instruction,
diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go
index 8e58c59238..eb0baaf6df 100644
--- a/src/cmd/internal/obj/ppc64/a.out.go
+++ b/src/cmd/internal/obj/ppc64/a.out.go
@@ -185,6 +185,15 @@ const (
NOSCHED = 1 << 9
)
+// Bit settings from the CR
+
+const (
+ C_COND_LT = iota // 0 result is negative
+ C_COND_GT // 1 result is positive
+ C_COND_EQ // 2 result is zero
+ C_COND_SO // 3 summary overflow
+)
+
const (
C_NONE = iota
C_REG
@@ -210,8 +219,8 @@ const (
C_LAUTO
C_SEXT
C_LEXT
- C_ZOREG
- C_SOREG
+ C_ZOREG // conjecture: either (1) register + zeroed offset, or (2) "R0" implies zero or C_REG
+ C_SOREG // register + signed offset
C_LOREG
C_FPSCR
C_MSR
@@ -315,6 +324,8 @@ const (
AFMOVDU
AFMOVS
AFMOVSU
+ AFMOVSX
+ AFMOVSZ
AFMSUB
AFMSUBCC
AFMSUBS
diff --git a/src/cmd/internal/obj/ppc64/anames.go b/src/cmd/internal/obj/ppc64/anames.go
index eb42c9a953..a2e5cf46d8 100644
--- a/src/cmd/internal/obj/ppc64/anames.go
+++ b/src/cmd/internal/obj/ppc64/anames.go
@@ -91,6 +91,8 @@ var Anames = []string{
"FMOVDU",
"FMOVS",
"FMOVSU",
+ "FMOVSX",
+ "FMOVSZ",
"FMSUB",
"FMSUBCC",
"FMSUBS",
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 5366809d2f..52cae12333 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -53,7 +53,7 @@ type Optab struct {
a2 uint8
a3 uint8
a4 uint8
- type_ int8
+ type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
size int8
param int16
}
@@ -310,6 +310,12 @@ var optab = []Optab{
{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
+ {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
+ {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
+ {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
+ {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
+ {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
{ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
{ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
@@ -920,7 +926,7 @@ func buildop(ctxt *obj.Link) {
switch r {
default:
ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
- log.Fatalf("bad code")
+ log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", obj.Aconv(r))
case ADCBF: /* unary indexed: op (b+a); op (b) */
opset(ADCBI, r0)
@@ -1265,6 +1271,8 @@ func buildop(ctxt *obj.Link) {
case AADD,
AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra; andis. $uimm,Rs,Ra */
+ AFMOVSX,
+ AFMOVSZ,
ALSW,
AMOVW,
/* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
@@ -3238,6 +3246,10 @@ func oploadx(ctxt *obj.Link, a obj.As) uint32 {
return OPVCC(31, 535, 0, 0) /* lfsx */
case AFMOVSU:
return OPVCC(31, 567, 0, 0) /* lfsux */
+ case AFMOVSX:
+ return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
+ case AFMOVSZ:
+ return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
case AMOVH:
return OPVCC(31, 343, 0, 0) /* lhax */
case AMOVHU:
@@ -3332,6 +3344,8 @@ func opstorex(ctxt *obj.Link, a obj.As) uint32 {
return OPVCC(31, 663, 0, 0) /* stfsx */
case AFMOVSU:
return OPVCC(31, 695, 0, 0) /* stfsux */
+ case AFMOVSX:
+ return OPVCC(31, 983, 0, 0) /* stfiwx */
case AMOVHZ, AMOVH:
return OPVCC(31, 407, 0, 0) /* sthx */
diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go
index 18813c35a8..c8f8760af1 100644
--- a/src/cmd/internal/obj/util.go
+++ b/src/cmd/internal/obj/util.go
@@ -286,14 +286,23 @@ func Dconv(p *Prog, a *Addr) string {
case TYPE_SHIFT:
v := int(a.Offset)
- op := "<<>>->@>"[((v>>5)&3)<<1:]
- if v&(1<<4) != 0 {
- str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
- } else {
- str = fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
- }
- if a.Reg != 0 {
- str += fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
+ ops := "<<>>->@>"
+ switch goarch := Getgoarch(); goarch {
+ case "arm":
+ op := ops[((v>>5)&3)<<1:]
+ if v&(1<<4) != 0 {
+ str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ str = fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+ if a.Reg != 0 {
+ str += fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
+ }
+ case "arm64":
+ op := ops[((v>>22)&3)<<1:]
+ str = fmt.Sprintf("R%d%c%c%d", (v>>16)&31, op[0], op[1], (v>>10)&63)
+ default:
+ panic("TYPE_SHIFT is not supported on " + goarch)
}
case TYPE_REGREG:
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 676da40ba5..8a299fefe3 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2835,7 +2835,9 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
goto bad
}
if p.Mode == 32 && ctxt.Flag_shared {
- base = REG_CX
+ // The base register has already been set. It holds the PC
+ // of this instruction returned by a PC-reading thunk.
+ // See obj6.go:rewriteToPcrel.
} else {
base = REG_NONE
}
@@ -2880,7 +2882,9 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
ctxt.Diag("bad addr: %v", p)
}
if p.Mode == 32 && ctxt.Flag_shared {
- base = REG_CX
+ // The base register has already been set. It holds the PC
+ // of this instruction returned by a PC-reading thunk.
+ // See obj6.go:rewriteToPcrel.
} else {
base = REG_NONE
}
@@ -4016,25 +4020,26 @@ func doasm(ctxt *obj.Link, p *obj.Prog) {
obj.Hnacl:
if ctxt.Flag_shared {
// Note that this is not generating the same insns as the other cases.
- // MOV TLS, R_to
+ // MOV TLS, dst
// becomes
- // call __x86.get_pc_thunk.cx
- // movl (gotpc + g@gotntpoff)(%ecx),$R_To
+ // call __x86.get_pc_thunk.dst
+ // movl (gotpc + g@gotntpoff)(dst), dst
// which is encoded as
- // call __x86.get_pc_thunk.cx
- // movq 0(%ecx), R_to
+ // call __x86.get_pc_thunk.dst
+ // movq 0(dst), dst
// and R_CALL & R_TLS_IE relocs. This all assumes the only tls variable we access
// is g, which we can't check here, but will when we assemble the second
// instruction.
+ dst := p.To.Reg
ctxt.AsmBuf.Put1(0xe8)
r = obj.Addrel(ctxt.Cursym)
r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
r.Type = obj.R_CALL
r.Siz = 4
- r.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk.cx", 0)
+ r.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk."+strings.ToLower(Rconv(int(dst))), 0)
ctxt.AsmBuf.PutInt32(0)
- ctxt.AsmBuf.Put2(0x8B, byte(2<<6|reg[REG_CX]|(reg[p.To.Reg]<<3)))
+ ctxt.AsmBuf.Put2(0x8B, byte(2<<6|reg[dst]|(reg[dst]<<3)))
r = obj.Addrel(ctxt.Cursym)
r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
r.Type = obj.R_TLS_IE
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 75638a0183..c4b0d89cda 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -36,6 +36,7 @@ import (
"fmt"
"log"
"math"
+ "strings"
)
func CanUse1InsnTLS(ctxt *obj.Link) bool {
@@ -333,6 +334,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
lea = ALEAL
mov = AMOVL
reg = REG_CX
+ if p.As == ALEAL && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index {
+ // Special case: clobber the destination register with
+ // the PC so we don't have to clobber CX.
+ // The SSA backend depends on CX not being clobbered across LEAL.
+ // See cmd/compile/internal/ssa/gen/386.rules (search for Flag_shared).
+ reg = p.To.Reg
+ }
}
if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
@@ -391,7 +399,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
dest = p.To
p.As = mov
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_CX
+ p.To.Reg = reg
p.To.Sym = nil
p.To.Name = obj.NAME_NONE
}
@@ -412,7 +420,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
q.As = pAs
q.To = dest
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_CX
+ q.From.Reg = reg
}
}
if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
@@ -509,7 +517,7 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) {
return
}
// Any Prog (aside from the above special cases) with an Addr with Name ==
- // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.cx
+ // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.XX
// inserted before it.
isName := func(a *obj.Addr) bool {
if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 {
@@ -542,12 +550,18 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) {
if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) {
return
}
+ var dst int16 = REG_CX
+ if (p.As == ALEAL || p.As == AMOVL) && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index {
+ dst = p.To.Reg
+ // Why? See the comment near the top of rewriteToUseGot above.
+ // AMOVLs might be introduced by the GOT rewrites.
+ }
q := obj.Appendp(ctxt, p)
q.RegTo2 = 1
r := obj.Appendp(ctxt, q)
r.RegTo2 = 1
q.As = obj.ACALL
- q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk.cx", 0)
+ q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk."+strings.ToLower(Rconv(int(dst))), 0)
q.To.Type = obj.TYPE_MEM
q.To.Name = obj.NAME_EXTERN
q.To.Sym.Local = true
@@ -557,6 +571,15 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) {
r.From3 = p.From3
r.Reg = p.Reg
r.To = p.To
+ if isName(&p.From) {
+ r.From.Reg = dst
+ }
+ if isName(&p.To) {
+ r.To.Reg = dst
+ }
+ if p.From3 != nil && isName(p.From3) {
+ r.From3.Reg = dst
+ }
obj.Nopout(p)
}
diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go
index cc8f96f27f..b826d70619 100644
--- a/src/cmd/link/internal/x86/asm.go
+++ b/src/cmd/link/internal/x86/asm.go
@@ -55,21 +55,37 @@ func gentext() {
return
}
- thunkfunc := ld.Linklookup(ld.Ctxt, "__x86.get_pc_thunk.cx", 0)
- thunkfunc.Type = obj.STEXT
- thunkfunc.Attr |= ld.AttrLocal
- thunkfunc.Attr |= ld.AttrReachable
- o := func(op ...uint8) {
- for _, op1 := range op {
- ld.Adduint8(ld.Ctxt, thunkfunc, op1)
+ // Generate little thunks that load the PC of the next instruction into a register.
+ for _, r := range [...]struct {
+ name string
+ num uint8
+ }{
+ {"ax", 0},
+ {"cx", 1},
+ {"dx", 2},
+ {"bx", 3},
+ // sp
+ {"bp", 5},
+ {"si", 6},
+ {"di", 7},
+ } {
+ thunkfunc := ld.Linklookup(ld.Ctxt, "__x86.get_pc_thunk."+r.name, 0)
+ thunkfunc.Type = obj.STEXT
+ thunkfunc.Attr |= ld.AttrLocal
+ thunkfunc.Attr |= ld.AttrReachable //TODO: remove?
+ o := func(op ...uint8) {
+ for _, op1 := range op {
+ ld.Adduint8(ld.Ctxt, thunkfunc, op1)
+ }
}
- }
- // 8b 0c 24 mov (%esp),%ecx
- o(0x8b, 0x0c, 0x24)
- // c3 ret
- o(0xc3)
+ // 8b 04 24 mov (%esp),%eax
+ // Destination register is in bits 3-5 of the middle byte, so add that in.
+ o(0x8b, 0x04+r.num<<3, 0x24)
+ // c3 ret
+ o(0xc3)
- ld.Ctxt.Textp = append(ld.Ctxt.Textp, thunkfunc)
+ ld.Ctxt.Textp = append(ld.Ctxt.Textp, thunkfunc)
+ }
addmoduledata := ld.Linklookup(ld.Ctxt, "runtime.addmoduledata", 0)
if addmoduledata.Type == obj.STEXT {
@@ -84,7 +100,7 @@ func gentext() {
initfunc.Type = obj.STEXT
initfunc.Attr |= ld.AttrLocal
initfunc.Attr |= ld.AttrReachable
- o = func(op ...uint8) {
+ o := func(op ...uint8) {
for _, op1 := range op {
ld.Adduint8(ld.Ctxt, initfunc, op1)
}
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index ea11b2b2fb..b9dabc004f 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -193,9 +193,7 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0
// Other operating systems use double precision.
// Change to double precision to match them,
// and to match other hardware that only has double.
- PUSHL $0x27F
- FLDCW 0(SP)
- POPL AX
+ FLDCW runtime·controlWord64(SB)
RET
/*
@@ -1637,3 +1635,21 @@ TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
MOVL AX, moduledata_next(DX)
MOVL AX, runtime·lastmoduledatap(SB)
RET
+
+TEXT runtime·uint32tofloat64(SB),NOSPLIT,$8-12
+ MOVL a+0(FP), AX
+ MOVL AX, 0(SP)
+ MOVL $0, 4(SP)
+ FMOVV 0(SP), F0
+ FMOVDP F0, ret+4(FP)
+ RET
+
+TEXT runtime·float64touint32(SB),NOSPLIT,$12-12
+ FMOVD a+0(FP), F0
+ FSTCW 0(SP)
+ FLDCW runtime·controlWord64trunc(SB)
+ FMOVVP F0, 4(SP)
+ FLDCW 0(SP)
+ MOVL 4(SP), AX
+ MOVL AX, ret+8(FP)
+ RET
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 3b238cba1c..1efbace63c 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -220,10 +220,11 @@ var gcphase uint32
// The compiler knows about this variable.
// If you change it, you must change the compiler too.
var writeBarrier struct {
- enabled bool // compiler emits a check of this before calling write barrier
- needed bool // whether we need a write barrier for current GC phase
- cgo bool // whether we need a write barrier for a cgo check
- alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
+ enabled bool // compiler emits a check of this before calling write barrier
+ pad [3]byte // compiler uses 32-bit load for "enabled" field
+ needed bool // whether we need a write barrier for current GC phase
+ cgo bool // whether we need a write barrier for a cgo check
+ alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
// gcBlackenEnabled is 1 if mutator assists and background mark
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index 302f58de5f..3c9eed5905 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -68,7 +68,6 @@ func goargs() {
if GOOS == "windows" {
return
}
-
argslice = make([]string, argc)
for i := int32(0); i < argc; i++ {
argslice[i] = gostringnocopy(argv_index(argv, i))
diff --git a/src/runtime/softfloat_arm.go b/src/runtime/softfloat_arm.go
index 5f609c80d3..3cbb4b3fc0 100644
--- a/src/runtime/softfloat_arm.go
+++ b/src/runtime/softfloat_arm.go
@@ -446,6 +446,23 @@ execute:
}
return 1
+ case 0xeeb10b40: // D[regd] = neg D[regm]
+ m.freglo[regd] = m.freglo[regm]
+ m.freghi[regd] = m.freghi[regm] ^ 1<<31
+
+ if fptrace > 0 {
+ print("*** D[", regd, "] = neg D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ return 1
+
+ case 0xeeb10a40: // F[regd] = neg F[regm]
+ m.freglo[regd] = m.freglo[regm] ^ 1<<31
+
+ if fptrace > 0 {
+ print("*** F[", regd, "] = neg F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ return 1
+
case 0xeeb40bc0: // D[regd] :: D[regm] (CMPD)
cmp, nan := fcmp64(fgetd(regd), fgetd(regm))
m.fflag = fstatus(nan, cmp)
@@ -464,6 +481,24 @@ execute:
}
return 1
+ case 0xeeb50bc0: // D[regd] :: 0 (CMPD)
+ cmp, nan := fcmp64(fgetd(regd), 0)
+ m.fflag = fstatus(nan, cmp)
+
+ if fptrace > 0 {
+ print("*** cmp D[", regd, "]::0 ", hex(m.fflag), "\n")
+ }
+ return 1
+
+ case 0xeeb50ac0: // F[regd] :: 0 (CMPF)
+ cmp, nan := fcmp64(f32to64(m.freglo[regd]), 0)
+ m.fflag = fstatus(nan, cmp)
+
+ if fptrace > 0 {
+ print("*** cmp F[", regd, "]::0 ", hex(m.fflag), "\n")
+ }
+ return 1
+
case 0xeeb70ac0: // D[regd] = F[regm] (MOVFD)
fputd(regd, f32to64(m.freglo[regm]))
diff --git a/src/runtime/vlrt.go b/src/runtime/vlrt.go
index cd37828ae4..7300f55dad 100644
--- a/src/runtime/vlrt.go
+++ b/src/runtime/vlrt.go
@@ -255,3 +255,17 @@ func slowdodiv(n, d uint64) (q, r uint64) {
}
return q, n
}
+
+// Floating point control word values for GOARCH=386 GO386=387.
+// Bits 0-5 are bits to disable floating-point exceptions.
+// Bits 8-9 are the precision control:
+// 0 = single precision a.k.a. float32
+// 2 = double precision a.k.a. float64
+// Bits 10-11 are the rounding mode:
+// 0 = round to nearest (even on a tie)
+// 3 = round toward zero
+var (
+ controlWord64 uint16 = 0x3f + 2<<8 + 0<<10
+ controlWord32 = 0x3f + 0<<8 + 0<<10
+ controlWord64trunc = 0x3f + 2<<8 + 3<<10
+)
diff --git a/test/live.go b/test/live.go
index da0606db71..8675840d0e 100644
--- a/test/live.go
+++ b/test/live.go
@@ -1,4 +1,4 @@
-// +build !amd64
+// +build !amd64,!arm,!amd64p32,!386,!arm64
// errorcheck -0 -l -live -wb=0
// Copyright 2014 The Go Authors. All rights reserved.
diff --git a/test/live_ssa.go b/test/live_ssa.go
index bd709246f6..881f139a20 100644
--- a/test/live_ssa.go
+++ b/test/live_ssa.go
@@ -1,4 +1,4 @@
-// +build amd64
+// +build amd64 arm amd64p32 386 arm64
// errorcheck -0 -l -live -wb=0
// Copyright 2014 The Go Authors. All rights reserved.
diff --git a/test/nilptr3.go b/test/nilptr3.go
index 8922729ec8..a81efb7d8e 100644
--- a/test/nilptr3.go
+++ b/test/nilptr3.go
@@ -2,7 +2,7 @@
// Fails on ppc64x because of incomplete optimization.
// See issues 9058.
// Same reason for mips64x and s390x.
-// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x
+// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x,!arm,!amd64p32,!386,!arm64
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/nilptr3_ssa.go b/test/nilptr3_ssa.go
index 0d690eb446..0974a84333 100644
--- a/test/nilptr3_ssa.go
+++ b/test/nilptr3_ssa.go
@@ -1,7 +1,5 @@
// errorcheck -0 -d=nil
-// Fails on ppc64x because of incomplete optimization.
-// See issues 9058.
-// +build !ppc64,!ppc64le,amd64
+// +build amd64 arm amd64p32 386 arm64
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/sliceopt.go b/test/sliceopt.go
index a830ab7cf6..9dc8a4444c 100644
--- a/test/sliceopt.go
+++ b/test/sliceopt.go
@@ -1,4 +1,4 @@
-// +build !amd64
+// +build !amd64,!arm,!amd64p32,!386,!arm64
// errorcheck -0 -d=append,slice
// Copyright 2015 The Go Authors. All rights reserved.