aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCherry Zhang <cherryyz@google.com>2016-07-21 12:42:49 -0400
committerCherry Zhang <cherryyz@google.com>2016-07-23 21:25:58 +0000
commitae9570a5b95b0b321f91f504661e9c36dc1caa0e (patch)
tree26758ce906029ef4e90641c9059a5c6fd7a13dd7
parent7bca2c599d593544d449bafc2fe1978c93b94a2f (diff)
downloadgo-ae9570a5b95b0b321f91f504661e9c36dc1caa0e.tar.gz
go-ae9570a5b95b0b321f91f504661e9c36dc1caa0e.zip
[dev.ssa] cmd/compile: initial ARM64 SSA port
Mostly copied from ARM port, with instruction names and Prog fields adjusted, and 64-bit int ops added. Not complete. Fib compiles and runs correctly. Change-Id: Id3ecb0d4b571200a035344b3e8e4408769f76221 Reviewed-on: https://go-review.googlesource.com/25130 Reviewed-by: David Chase <drchase@google.com> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
-rw-r--r--src/cmd/compile/internal/arm64/galign.go6
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go628
-rw-r--r--src/cmd/compile/internal/ssa/config.go11
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules323
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go347
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go1629
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go4607
7 files changed, 7551 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index 7acc4e08eb..12f9b0515f 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -6,6 +6,7 @@ package arm64
import (
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
"cmd/internal/obj/arm64"
)
@@ -61,6 +62,11 @@ func Main() {
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
+ gc.Thearch.SSARegToReg = ssaRegToReg
+ gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ gc.Thearch.SSAGenValue = ssaGenValue
+ gc.Thearch.SSAGenBlock = ssaGenBlock
+
gc.Main()
gc.Exit(0)
}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
new file mode 100644
index 0000000000..90dab7f4dc
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -0,0 +1,628 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+)
+
+var ssaRegToReg = []int16{
+ arm64.REG_R0,
+ arm64.REG_R1,
+ arm64.REG_R2,
+ arm64.REG_R3,
+ arm64.REG_R4,
+ arm64.REG_R5,
+ arm64.REG_R6,
+ arm64.REG_R7,
+ arm64.REG_R8,
+ arm64.REG_R9,
+ arm64.REG_R10,
+ arm64.REG_R11,
+ arm64.REG_R12,
+ arm64.REG_R13,
+ arm64.REG_R14,
+ arm64.REG_R15,
+ arm64.REG_R16,
+ arm64.REG_R17,
+ arm64.REG_R18,
+ arm64.REG_R19,
+ arm64.REG_R20,
+ arm64.REG_R21,
+ arm64.REG_R22,
+ arm64.REG_R23,
+ arm64.REG_R24,
+ arm64.REG_R25,
+ arm64.REG_R26,
+ // R27 = REGTMP not used in regalloc
+ arm64.REGG, // R28
+ arm64.REG_R29,
+ // R30 = REGLINK not used in regalloc
+ arm64.REGSP, // R31
+
+ arm64.REG_F0,
+ arm64.REG_F1,
+ arm64.REG_F2,
+ arm64.REG_F3,
+ arm64.REG_F4,
+ arm64.REG_F5,
+ arm64.REG_F6,
+ arm64.REG_F7,
+ arm64.REG_F8,
+ arm64.REG_F9,
+ arm64.REG_F10,
+ arm64.REG_F11,
+ arm64.REG_F12,
+ arm64.REG_F13,
+ arm64.REG_F14,
+ arm64.REG_F15,
+ arm64.REG_F16,
+ arm64.REG_F17,
+ arm64.REG_F18,
+ arm64.REG_F19,
+ arm64.REG_F20,
+ arm64.REG_F21,
+ arm64.REG_F22,
+ arm64.REG_F23,
+ arm64.REG_F24,
+ arm64.REG_F25,
+ arm64.REG_F26,
+ arm64.REG_F27,
+ arm64.REG_F28,
+ arm64.REG_F29,
+ arm64.REG_F30,
+ arm64.REG_F31,
+
+ arm64.REG_NZCV, // flag
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+// Smallest possible faulting page at address zero,
+// see ../../../../runtime/mheap.go:/minPhysPageSize
+const minZeroPage = 4096
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm64.AMOVB
+ } else {
+ return arm64.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm64.AMOVH
+ } else {
+ return arm64.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return arm64.AMOVW
+ } else {
+ return arm64.AMOVWU
+ }
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm64.AMOVB
+ case 2:
+ return arm64.AMOVH
+ case 4:
+ return arm64.AMOVW
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ s.SetLineno(v.Line)
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpCopy, ssa.OpARM64MOVDconvert, ssa.OpARM64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ if x == y {
+ return
+ }
+ as := arm64.AMOVD
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm64.AFMOVS
+ case 8:
+ as = arm64.AFMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(loadByType(v.Type))
+ n, off := gc.AutoVar(v.Args[0])
+ p.From.Type = obj.TYPE_MEM
+ p.From.Node = n
+ p.From.Sym = gc.Linksym(n.Sym)
+ p.From.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.From.Name = obj.NAME_PARAM
+ p.From.Offset += n.Xoffset
+ } else {
+ p.From.Name = obj.NAME_AUTO
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpPhi:
+ gc.CheckLoweredPhi(v)
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ n, off := gc.AutoVar(v)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Node = n
+ p.To.Sym = gc.Linksym(n.Sym)
+ p.To.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.To.Name = obj.NAME_PARAM
+ p.To.Offset += n.Xoffset
+ } else {
+ p.To.Name = obj.NAME_AUTO
+ }
+ case ssa.OpARM64ADD,
+ ssa.OpARM64SUB,
+ ssa.OpARM64AND,
+ ssa.OpARM64OR,
+ ssa.OpARM64XOR,
+ ssa.OpARM64BIC,
+ ssa.OpARM64MUL,
+ ssa.OpARM64DIV,
+ ssa.OpARM64UDIV,
+ ssa.OpARM64DIVW,
+ ssa.OpARM64UDIVW,
+ ssa.OpARM64MOD,
+ ssa.OpARM64UMOD,
+ ssa.OpARM64MODW,
+ ssa.OpARM64UMODW,
+ ssa.OpARM64FADDS,
+ ssa.OpARM64FADDD,
+ ssa.OpARM64FSUBS,
+ ssa.OpARM64FSUBD,
+ ssa.OpARM64FMULS,
+ ssa.OpARM64FMULD,
+ ssa.OpARM64FDIVS,
+ ssa.OpARM64FDIVD:
+ r := gc.SSARegNum(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64ADDconst,
+ ssa.OpARM64SUBconst,
+ ssa.OpARM64ANDconst,
+ ssa.OpARM64ORconst,
+ ssa.OpARM64XORconst,
+ ssa.OpARM64BICconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64MOVDconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64FMOVSconst,
+ ssa.OpARM64FMOVDconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64CMP,
+ ssa.OpARM64CMPW,
+ ssa.OpARM64CMN,
+ ssa.OpARM64CMNW,
+ ssa.OpARM64FCMPS,
+ ssa.OpARM64FCMPD:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARM64CMPconst,
+ ssa.OpARM64CMPWconst,
+ ssa.OpARM64CMNconst,
+ ssa.OpARM64CMNWconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARM64MOVDaddr:
+ p := gc.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ var wantreg string
+ // MOVD $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *ssa.ExternSymbol:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *ssa.ArgSymbol, *ssa.AutoSymbol:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVD $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = arm64.REGSP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
+ }
+ case ssa.OpARM64MOVBload,
+ ssa.OpARM64MOVBUload,
+ ssa.OpARM64MOVHload,
+ ssa.OpARM64MOVHUload,
+ ssa.OpARM64MOVWload,
+ ssa.OpARM64MOVWUload,
+ ssa.OpARM64MOVDload,
+ ssa.OpARM64FMOVSload,
+ ssa.OpARM64FMOVDload:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64MOVBstore,
+ ssa.OpARM64MOVHstore,
+ ssa.OpARM64MOVWstore,
+ ssa.OpARM64MOVDstore,
+ ssa.OpARM64FMOVSstore,
+ ssa.OpARM64FMOVDstore:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBreg,
+ ssa.OpARM64MOVBUreg,
+ ssa.OpARM64MOVHreg,
+ ssa.OpARM64MOVHUreg,
+ ssa.OpARM64MOVWreg,
+ ssa.OpARM64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
+ return
+ }
+ p := gc.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARM64MVN,
+ ssa.OpARM64NEG,
+ ssa.OpARM64FNEGS,
+ ssa.OpARM64FNEGD,
+ ssa.OpARM64FSQRTD,
+ ssa.OpARM64FCVTZSSW,
+ ssa.OpARM64FCVTZSDW,
+ ssa.OpARM64FCVTZUSW,
+ ssa.OpARM64FCVTZUDW,
+ ssa.OpARM64FCVTZSS,
+ ssa.OpARM64FCVTZSD,
+ ssa.OpARM64FCVTZUS,
+ ssa.OpARM64FCVTZUD,
+ ssa.OpARM64SCVTFWS,
+ ssa.OpARM64SCVTFWD,
+ ssa.OpARM64SCVTFS,
+ ssa.OpARM64SCVTFD,
+ ssa.OpARM64UCVTFWS,
+ ssa.OpARM64UCVTFWD,
+ ssa.OpARM64UCVTFS,
+ ssa.OpARM64UCVTFD,
+ ssa.OpARM64FCVTSD,
+ ssa.OpARM64FCVTDS:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64CALLstatic:
+ if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an actual hardware NOP that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ ginsnop()
+ }
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLclosure:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLdefer:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLgo:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Newproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLinter:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := gc.Prog(arm64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
+ gc.Warnl(v.Line, "generated nil check")
+ }
+ case ssa.OpVarDef:
+ gc.Gvardef(v.Aux.(*gc.Node))
+ case ssa.OpVarKill:
+ gc.Gvarkill(v.Aux.(*gc.Node))
+ case ssa.OpVarLive:
+ gc.Gvarlive(v.Aux.(*gc.Node))
+ case ssa.OpKeepAlive:
+ if !v.Args[0].Type.IsPtrShaped() {
+ v.Fatalf("keeping non-pointer alive %v", v.Args[0])
+ }
+ n, off := gc.AutoVar(v.Args[0])
+ if n == nil {
+ v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
+ }
+ if off != 0 {
+ v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
+ }
+ gc.Gvarlive(n)
+ case ssa.OpARM64Equal,
+ ssa.OpARM64NotEqual,
+ ssa.OpARM64LessThan,
+ ssa.OpARM64LessEqual,
+ ssa.OpARM64GreaterThan,
+ ssa.OpARM64GreaterEqual,
+ ssa.OpARM64LessThanU,
+ ssa.OpARM64LessEqualU,
+ ssa.OpARM64GreaterThanU,
+ ssa.OpARM64GreaterEqualU:
+ // generate boolean values using CSET
+ p := gc.Prog(arm64.ACSET)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = condBits[v.Op]
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpSelect0, ssa.OpSelect1:
+ // nothing to do
+ case ssa.OpARM64LoweredGetClosurePtr:
+ // Closure pointer is R26 (arm64.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64FlagEQ,
+ ssa.OpARM64FlagLT_ULT,
+ ssa.OpARM64FlagLT_UGT,
+ ssa.OpARM64FlagGT_ULT,
+ ssa.OpARM64FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpARM64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ default:
+ v.Unimplementedf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]int16{
+ ssa.OpARM64Equal: arm64.COND_EQ,
+ ssa.OpARM64NotEqual: arm64.COND_NE,
+ ssa.OpARM64LessThan: arm64.COND_LT,
+ ssa.OpARM64LessThanU: arm64.COND_LO,
+ ssa.OpARM64LessEqual: arm64.COND_LE,
+ ssa.OpARM64LessEqualU: arm64.COND_LS,
+ ssa.OpARM64GreaterThan: arm64.COND_GT,
+ ssa.OpARM64GreaterThanU: arm64.COND_HI,
+ ssa.OpARM64GreaterEqual: arm64.COND_GE,
+ ssa.OpARM64GreaterEqualU: arm64.COND_HS,
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
+ ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
+ ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
+ ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
+ ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
+ ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
+ ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
+ ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ s.SetLineno(b.Line)
+
+ switch b.Kind {
+ case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := gc.Prog(arm64.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm64.REG_R0
+ p = gc.Prog(arm64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit:
+ gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+
+ case ssa.BlockRet:
+ gc.Prog(obj.ARET)
+
+ case ssa.BlockRetJmp:
+ p := gc.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
+
+ case ssa.BlockARM64EQ, ssa.BlockARM64NE,
+ ssa.BlockARM64LT, ssa.BlockARM64GE,
+ ssa.BlockARM64LE, ssa.BlockARM64GT,
+ ssa.BlockARM64ULT, ssa.BlockARM64UGT,
+ ssa.BlockARM64ULE, ssa.BlockARM64UGE:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = gc.Prog(jmp.invasm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ case b.Succs[1].Block():
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ default:
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ q := gc.Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
+
+ default:
+ b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 9c1daa9c7b..4a9817423f 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -162,6 +162,17 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
c.flagRegMask = flagRegMaskARM
c.FPReg = framepointerRegARM
c.hasGReg = true
+ case "arm64":
+ c.IntSize = 8
+ c.PtrSize = 8
+ c.lowerBlock = rewriteBlockARM64
+ c.lowerValue = rewriteValueARM64
+ c.registers = registersARM64[:]
+ c.gpRegMask = gpRegMaskARM64
+ c.fpRegMask = fpRegMaskARM64
+ c.flagRegMask = flagRegMaskARM64
+ c.FPReg = framepointerRegARM64
+ c.hasGReg = true
case "ppc64le":
c.IntSize = 8
c.PtrSize = 8
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
new file mode 100644
index 0000000000..9290b3c186
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -0,0 +1,323 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(AddPtr x y) -> (ADD x y)
+(Add64 x y) -> (ADD x y)
+(Add32 x y) -> (ADD x y)
+(Add16 x y) -> (ADD x y)
+(Add8 x y) -> (ADD x y)
+(Add32F x y) -> (FADDS x y)
+(Add64F x y) -> (FADDD x y)
+
+(SubPtr x y) -> (SUB x y)
+(Sub64 x y) -> (SUB x y)
+(Sub32 x y) -> (SUB x y)
+(Sub16 x y) -> (SUB x y)
+(Sub8 x y) -> (SUB x y)
+(Sub32F x y) -> (FSUBS x y)
+(Sub64F x y) -> (FSUBD x y)
+
+(Mul64 x y) -> (MUL x y)
+(Mul32 x y) -> (MUL x y)
+(Mul16 x y) -> (MUL x y)
+(Mul8 x y) -> (MUL x y)
+(Mul32F x y) -> (FMULS x y)
+(Mul64F x y) -> (FMULD x y)
+
+(Div64 x y) -> (DIV x y)
+(Div64u x y) -> (UDIV x y)
+(Div32 x y) -> (DIVW x y)
+(Div32u x y) -> (UDIVW x y)
+(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) -> (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) -> (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div32F x y) -> (FDIVS x y)
+(Div64F x y) -> (FDIVD x y)
+
+(Mod64 x y) -> (MOD x y)
+(Mod64u x y) -> (UMOD x y)
+(Mod32 x y) -> (MODW x y)
+(Mod32u x y) -> (UMODW x y)
+(Mod16 x y) -> (MODW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) -> (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) -> (MODW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) -> (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(And64 x y) -> (AND x y)
+(And32 x y) -> (AND x y)
+(And16 x y) -> (AND x y)
+(And8 x y) -> (AND x y)
+
+(Or64 x y) -> (OR x y)
+(Or32 x y) -> (OR x y)
+(Or16 x y) -> (OR x y)
+(Or8 x y) -> (OR x y)
+
+(Xor64 x y) -> (XOR x y)
+(Xor32 x y) -> (XOR x y)
+(Xor16 x y) -> (XOR x y)
+(Xor8 x y) -> (XOR x y)
+
+// unary ops
+(Neg64 x) -> (NEG x)
+(Neg32 x) -> (NEG x)
+(Neg16 x) -> (NEG x)
+(Neg8 x) -> (NEG x)
+(Neg32F x) -> (FNEGS x)
+(Neg64F x) -> (FNEGD x)
+
+(Com64 x) -> (MVN x)
+(Com32 x) -> (MVN x)
+(Com16 x) -> (MVN x)
+(Com8 x) -> (MVN x)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB x y) -> (AND x y)
+(OrB x y) -> (OR x y)
+(EqB x y) -> (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+(NeqB x y) -> (XOR x y)
+(Not x) -> (XORconst [1] x)
+
+// constants
+(Const64 [val]) -> (MOVDconst [val])
+(Const32 [val]) -> (MOVDconst [val])
+(Const16 [val]) -> (MOVDconst [val])
+(Const8 [val]) -> (MOVDconst [val])
+(Const32F [val]) -> (FMOVSconst [val])
+(Const64F [val]) -> (FMOVDconst [val])
+(ConstNil) -> (MOVDconst [0])
+(ConstBool [b]) -> (MOVDconst [b])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 x) -> x
+(Trunc32to8 x) -> x
+(Trunc32to16 x) -> x
+(Trunc64to8 x) -> x
+(Trunc64to16 x) -> x
+(Trunc64to32 x) -> x
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 x) -> (MOVBUreg x)
+(ZeroExt8to32 x) -> (MOVBUreg x)
+(ZeroExt16to32 x) -> (MOVHUreg x)
+(ZeroExt8to64 x) -> (MOVBUreg x)
+(ZeroExt16to64 x) -> (MOVHUreg x)
+(ZeroExt32to64 x) -> (MOVWUreg x)
+
+(SignExt8to16 x) -> (MOVBreg x)
+(SignExt8to32 x) -> (MOVBreg x)
+(SignExt16to32 x) -> (MOVHreg x)
+(SignExt8to64 x) -> (MOVBreg x)
+(SignExt16to64 x) -> (MOVHreg x)
+(SignExt32to64 x) -> (MOVWreg x)
+
+// float <-> int conversion
+(Cvt32to32F x) -> (SCVTFWS x)
+(Cvt32to64F x) -> (SCVTFWD x)
+(Cvt64to32F x) -> (SCVTFS x)
+(Cvt64to64F x) -> (SCVTFD x)
+(Cvt32Uto32F x) -> (UCVTFWS x)
+(Cvt32Uto64F x) -> (UCVTFWD x)
+//(Cvt64Uto32F x) -> (UCVTFS x)
+//(Cvt64Uto64F x) -> (UCVTFD x)
+(Cvt32Fto32 x) -> (FCVTZSSW x)
+(Cvt64Fto32 x) -> (FCVTZSDW x)
+(Cvt32Fto64 x) -> (FCVTZSS x)
+(Cvt64Fto64 x) -> (FCVTZSD x)
+(Cvt32Fto32U x) -> (FCVTZUSW x)
+(Cvt64Fto32U x) -> (FCVTZUDW x)
+//(Cvt32Fto64U x) -> (FCVTZUS x)
+//(Cvt64Fto64U x) -> (FCVTZUD x)
+(Cvt32Fto64F x) -> (FCVTSD x)
+(Cvt64Fto32F x) -> (FCVTDS x)
+
+// comparisons
+(Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) -> (Equal (CMPW x y))
+(Eq64 x y) -> (Equal (CMP x y))
+(EqPtr x y) -> (Equal (CMP x y))
+(Eq32F x y) -> (Equal (FCMPS x y))
+(Eq64F x y) -> (Equal (FCMPD x y))
+
+(Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) -> (NotEqual (CMPW x y))
+(Neq64 x y) -> (NotEqual (CMP x y))
+(NeqPtr x y) -> (NotEqual (CMP x y))
+(Neq32F x y) -> (NotEqual (FCMPS x y))
+(Neq64F x y) -> (NotEqual (FCMPD x y))
+
+(Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) -> (LessThan (CMPW x y))
+(Less64 x y) -> (LessThan (CMP x y))
+(Less32F x y) -> (GreaterThan (FCMPS y x)) // reverse operands to work around NaN
+(Less64F x y) -> (GreaterThan (FCMPD y x)) // reverse operands to work around NaN
+
+(Less8U x y) -> (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) -> (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) -> (LessThanU (CMPW x y))
+(Less64U x y) -> (LessThanU (CMP x y))
+
+(Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) -> (LessEqual (CMPW x y))
+(Leq64 x y) -> (LessEqual (CMP x y))
+(Leq32F x y) -> (GreaterEqual (FCMPS y x)) // reverse operands to work around NaN
+(Leq64F x y) -> (GreaterEqual (FCMPD y x)) // reverse operands to work around NaN
+
+(Leq8U x y) -> (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) -> (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) -> (LessEqualU (CMPW x y))
+(Leq64U x y) -> (LessEqualU (CMP x y))
+
+(Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Greater32 x y) -> (GreaterThan (CMPW x y))
+(Greater64 x y) -> (GreaterThan (CMP x y))
+(Greater32F x y) -> (GreaterThan (FCMPS x y))
+(Greater64F x y) -> (GreaterThan (FCMPD x y))
+
+(Greater8U x y) -> (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Greater16U x y) -> (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Greater32U x y) -> (GreaterThanU (CMPW x y))
+(Greater64U x y) -> (GreaterThanU (CMP x y))
+
+(Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Geq32 x y) -> (GreaterEqual (CMPW x y))
+(Geq64 x y) -> (GreaterEqual (CMP x y))
+(Geq32F x y) -> (GreaterEqual (FCMPS x y))
+(Geq64F x y) -> (GreaterEqual (FCMPD x y))
+
+(Geq8U x y) -> (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Geq16U x y) -> (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Geq32U x y) -> (GreaterEqualU (CMPW x y))
+(Geq64U x y) -> (GreaterEqualU (CMP x y))
+
+(OffPtr [off] ptr:(SP)) -> (MOVDaddr [off] ptr)
+(OffPtr [off] ptr) -> (ADDconst [off] ptr)
+
+(Addr {sym} base) -> (MOVDaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
+
+// stores
+(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
+(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
+(Store [4] ptr val mem) && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store [8] ptr val mem) && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+
+// calls
+(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
+(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
+(DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
+(GoCall [argwid] mem) -> (CALLgo [argwid] mem)
+(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
+
+// checks
+(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
+(IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) -> (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) -> (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr) -> (LoweredGetClosurePtr)
+(Convert x mem) -> (MOVDconvert x mem)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) -> (EQ cc yes no)
+(If (NotEqual cc) yes no) -> (NE cc yes no)
+(If (LessThan cc) yes no) -> (LT cc yes no)
+(If (LessThanU cc) yes no) -> (ULT cc yes no)
+(If (LessEqual cc) yes no) -> (LE cc yes no)
+(If (LessEqualU cc) yes no) -> (ULE cc yes no)
+(If (GreaterThan cc) yes no) -> (GT cc yes no)
+(If (GreaterThanU cc) yes no) -> (UGT cc yes no)
+(If (GreaterEqual cc) yes no) -> (GE cc yes no)
+(If (GreaterEqualU cc) yes no) -> (UGE cc yes no)
+
+(If cond yes no) -> (NE (CMPconst [0] cond) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
+(NE (CMPconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
+(NE (CMPconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
+(NE (CMPconst [0] (LessThanU cc)) yes no) -> (ULT cc yes no)
+(NE (CMPconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
+(NE (CMPconst [0] (LessEqualU cc)) yes no) -> (ULE cc yes no)
+(NE (CMPconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
+(NE (CMPconst [0] (GreaterThanU cc)) yes no) -> (UGT cc yes no)
+(NE (CMPconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
+(NE (CMPconst [0] (GreaterEqualU cc)) yes no) -> (UGE cc yes no)
+
+// Optimizations
+
+// fold offset into address
+(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVWload [off1+off2] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVWUload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVDload [off1+off2] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) -> (FMOVSload [off1+off2] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) -> (FMOVDload [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVDstore [off1+off2] {sym} ptr val mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
new file mode 100644
index 0000000000..9925a65b00
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -0,0 +1,347 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instuction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R27).
+
+// Suffixes encode the bit width of various instructions.
+// D (double word) = 64 bit
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesARM64 = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18", // platform register?
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ // R27 = REGTMP not used in regalloc
+ "g", // aka R28
+ "R29", // frame pointer?
+ // R30 = REGLINK not used in regalloc
+ "SP", // aka R31
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28", // 0.0
+ "F29", // 0.5
+ "F30", // 1.0
+ "F31", // 2.0
+
+ // pseudo-registers
+ "FLAGS",
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ flags = buildReg("FLAGS")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27")
+ callerSave = gp | fp | flags | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: []regMask{}, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{flags}}
+ //gp1flags1 = regInfo{inputs: []regMask{gp, flags}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ //gp21cf = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}, clobbers: flags} // cf: clobbers flags
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{flags}}
+ //gp2flags1 = regInfo{inputs: []regMask{gp, gp, flags}, outputs: []regMask{gp}}
+ //gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ //gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ //gp3flags = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{flags}}
+ //gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp, flags}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{}}
+ //gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ //gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{}}
+ fp01 = regInfo{inputs: []regMask{}, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ //fp1flags = regInfo{inputs: []regMask{fp}, outputs: []regMask{flags}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{flags}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}, outputs: []regMask{}}
+ readflags = regInfo{inputs: []regMask{flags}, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32"}, // arg0 - auxInt
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"}, // arg0 / arg1, signed
+ {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsighed
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"}, // arg0 / arg1, signed, 32 bit
+ {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsighed, 32 bit
+ {name: "MOD", argLength: 2, reg: gp21, asm: "REM"}, // arg0 % arg1, signed
+ {name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"}, // arg0 % arg1, unsigned
+ {name: "MODW", argLength: 2, reg: gp21, asm: "REMW"}, // arg0 % arg1, signed, 32 bit
+ {name: "UMODW", argLength: 2, reg: gp21, asm: "UREMW"}, // arg0 % arg1, unsigned, 32 bit
+
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0 + arg1
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0 - arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0 * arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true}, // arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0 / arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int32"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int32"}, // arg0 &^ auxInt
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1, 32 bit
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt, 32 bit
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags"}, // arg0 compare to -arg1
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags"}, // arg0 compare to -arg1, 32 bit
+ {name: "CMNWconst", argLength: 1, reg: gp1flags, asm: "CMNW", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt, 32 bit
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+
+ // moves
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVD", typ: "UInt32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVS", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVD", typ: "UInt64"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVSload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVS", typ: "Float32"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVD", typ: "Float64"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVS", typ: "Mem"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVD", typ: "Mem"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOVD"}, // move from arg0
+
+ {name: "SCVTFWS", argLength: 1, reg: gpfp, asm: "SCVTFWS"}, // int32 -> float32
+ {name: "SCVTFWD", argLength: 1, reg: gpfp, asm: "SCVTFWD"}, // int32 -> float64
+ {name: "UCVTFWS", argLength: 1, reg: gpfp, asm: "UCVTFWS"}, // uint32 -> float32
+ {name: "UCVTFWD", argLength: 1, reg: gpfp, asm: "UCVTFWD"}, // uint32 -> float64
+ {name: "SCVTFS", argLength: 1, reg: gpfp, asm: "SCVTFS"}, // int64 -> float32
+ {name: "SCVTFD", argLength: 1, reg: gpfp, asm: "SCVTFD"}, // int64 -> float64
+ {name: "UCVTFS", argLength: 1, reg: gpfp, asm: "UCVTFS"}, // uint64 -> float32
+ {name: "UCVTFD", argLength: 1, reg: gpfp, asm: "UCVTFD"}, // uint64 -> float64
+ {name: "FCVTZSSW", argLength: 1, reg: fpgp, asm: "FCVTZSSW"}, // float32 -> int32
+ {name: "FCVTZSDW", argLength: 1, reg: fpgp, asm: "FCVTZSDW"}, // float64 -> int32
+ {name: "FCVTZUSW", argLength: 1, reg: fpgp, asm: "FCVTZUSW"}, // float32 -> uint32
+ {name: "FCVTZUDW", argLength: 1, reg: fpgp, asm: "FCVTZUDW"}, // float64 -> uint32
+ {name: "FCVTZSS", argLength: 1, reg: fpgp, asm: "FCVTZSS"}, // float32 -> int64
+ {name: "FCVTZSD", argLength: 1, reg: fpgp, asm: "FCVTZSD"}, // float64 -> int64
+ {name: "FCVTZUS", argLength: 1, reg: fpgp, asm: "FCVTZUS"}, // float32 -> uint64
+ {name: "FCVTZUD", argLength: 1, reg: fpgp, asm: "FCVTZUD"}, // float64 -> uint64
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD"}, // float32 -> float64
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "Int64"}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64"}, // call deferproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64"}, // call newproc. arg0=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64"}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R26 (arm64.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}},
+
+ // MOVDconvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ {name: "MOVDconvert", argLength: 2, reg: gp11, asm: "MOVD"},
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned <
+ {name: "FlagGT_ULT"}, // signed > and unsigned >
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+ }
+
+ blocks := []blockData{
+ {name: "EQ"},
+ {name: "NE"},
+ {name: "LT"},
+ {name: "LE"},
+ {name: "GT"},
+ {name: "GE"},
+ {name: "ULT"},
+ {name: "ULE"},
+ {name: "UGT"},
+ {name: "UGE"},
+ }
+
+ archs = append(archs, arch{
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ gpregmask: gp,
+ fpregmask: fp,
+ flagmask: flags,
+ framepointerreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 60350ebaee..f898931479 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -6,6 +6,7 @@ package ssa
import (
"cmd/internal/obj"
"cmd/internal/obj/arm"
+ "cmd/internal/obj/arm64"
"cmd/internal/obj/ppc64"
"cmd/internal/obj/x86"
)
@@ -54,6 +55,17 @@ const (
BlockARMUGT
BlockARMUGE
+ BlockARM64EQ
+ BlockARM64NE
+ BlockARM64LT
+ BlockARM64LE
+ BlockARM64GT
+ BlockARM64GE
+ BlockARM64ULT
+ BlockARM64ULE
+ BlockARM64UGT
+ BlockARM64UGE
+
BlockPPC64EQ
BlockPPC64NE
BlockPPC64LT
@@ -120,6 +132,17 @@ var blockString = [...]string{
BlockARMUGT: "UGT",
BlockARMUGE: "UGE",
+ BlockARM64EQ: "EQ",
+ BlockARM64NE: "NE",
+ BlockARM64LT: "LT",
+ BlockARM64LE: "LE",
+ BlockARM64GT: "GT",
+ BlockARM64GE: "GE",
+ BlockARM64ULT: "ULT",
+ BlockARM64ULE: "ULE",
+ BlockARM64UGT: "UGT",
+ BlockARM64UGE: "UGE",
+
BlockPPC64EQ: "EQ",
BlockPPC64NE: "NE",
BlockPPC64LT: "LT",
@@ -774,6 +797,119 @@ const (
OpARMFlagGT_ULT
OpARMInvertFlags
+ OpARM64ADD
+ OpARM64ADDconst
+ OpARM64SUB
+ OpARM64SUBconst
+ OpARM64MUL
+ OpARM64DIV
+ OpARM64UDIV
+ OpARM64DIVW
+ OpARM64UDIVW
+ OpARM64MOD
+ OpARM64UMOD
+ OpARM64MODW
+ OpARM64UMODW
+ OpARM64FADDS
+ OpARM64FADDD
+ OpARM64FSUBS
+ OpARM64FSUBD
+ OpARM64FMULS
+ OpARM64FMULD
+ OpARM64FDIVS
+ OpARM64FDIVD
+ OpARM64AND
+ OpARM64ANDconst
+ OpARM64OR
+ OpARM64ORconst
+ OpARM64XOR
+ OpARM64XORconst
+ OpARM64BIC
+ OpARM64BICconst
+ OpARM64MVN
+ OpARM64NEG
+ OpARM64FNEGS
+ OpARM64FNEGD
+ OpARM64FSQRTD
+ OpARM64CMP
+ OpARM64CMPconst
+ OpARM64CMPW
+ OpARM64CMPWconst
+ OpARM64CMN
+ OpARM64CMNconst
+ OpARM64CMNW
+ OpARM64CMNWconst
+ OpARM64FCMPS
+ OpARM64FCMPD
+ OpARM64MOVDconst
+ OpARM64FMOVSconst
+ OpARM64FMOVDconst
+ OpARM64MOVDaddr
+ OpARM64MOVBload
+ OpARM64MOVBUload
+ OpARM64MOVHload
+ OpARM64MOVHUload
+ OpARM64MOVWload
+ OpARM64MOVWUload
+ OpARM64MOVDload
+ OpARM64FMOVSload
+ OpARM64FMOVDload
+ OpARM64MOVBstore
+ OpARM64MOVHstore
+ OpARM64MOVWstore
+ OpARM64MOVDstore
+ OpARM64FMOVSstore
+ OpARM64FMOVDstore
+ OpARM64MOVBreg
+ OpARM64MOVBUreg
+ OpARM64MOVHreg
+ OpARM64MOVHUreg
+ OpARM64MOVWreg
+ OpARM64MOVWUreg
+ OpARM64MOVDreg
+ OpARM64SCVTFWS
+ OpARM64SCVTFWD
+ OpARM64UCVTFWS
+ OpARM64UCVTFWD
+ OpARM64SCVTFS
+ OpARM64SCVTFD
+ OpARM64UCVTFS
+ OpARM64UCVTFD
+ OpARM64FCVTZSSW
+ OpARM64FCVTZSDW
+ OpARM64FCVTZUSW
+ OpARM64FCVTZUDW
+ OpARM64FCVTZSS
+ OpARM64FCVTZSD
+ OpARM64FCVTZUS
+ OpARM64FCVTZUD
+ OpARM64FCVTSD
+ OpARM64FCVTDS
+ OpARM64CALLstatic
+ OpARM64CALLclosure
+ OpARM64CALLdefer
+ OpARM64CALLgo
+ OpARM64CALLinter
+ OpARM64LoweredNilCheck
+ OpARM64Equal
+ OpARM64NotEqual
+ OpARM64LessThan
+ OpARM64LessEqual
+ OpARM64GreaterThan
+ OpARM64GreaterEqual
+ OpARM64LessThanU
+ OpARM64LessEqualU
+ OpARM64GreaterThanU
+ OpARM64GreaterEqualU
+ OpARM64LoweredGetClosurePtr
+ OpARM64MOVDconvert
+ OpARM64FlagEQ
+ OpARM64FlagLT_ULT
+ OpARM64FlagLT_UGT
+ OpARM64FlagGT_UGT
+ OpARM64FlagGT_ULT
+ OpARM64InvertFlags
+
OpPPC64ADD
OpPPC64ADDconst
OpPPC64FADD
@@ -9825,6 +9961,1429 @@ var opcodeTable = [...]opInfo{
name: "ADD",
argLen: 2,
commutative: true,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741823}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: arm64.ASDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "UDIV",
+ argLen: 2,
+ asm: arm64.AUDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: arm64.ASDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "UDIVW",
+ argLen: 2,
+ asm: arm64.AUDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOD",
+ argLen: 2,
+ asm: arm64.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "UMOD",
+ argLen: 2,
+ asm: arm64.AUREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ asm: arm64.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "UMODW",
+ argLen: 2,
+ asm: arm64.AUREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: arm64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: arm64.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: arm64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: arm64.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "BICconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: arm64.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: arm64.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: arm64.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMNW",
+ argLen: 2,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "CMNWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "FCMPD",
+ argLen: 2,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037391646720}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ {0, 9223372037928517631}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "SCVTFWS",
+ argLen: 1,
+ asm: arm64.ASCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "SCVTFWD",
+ argLen: 1,
+ asm: arm64.ASCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFWS",
+ argLen: 1,
+ asm: arm64.AUCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFWD",
+ argLen: 1,
+ asm: arm64.AUCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "SCVTFS",
+ argLen: 1,
+ asm: arm64.ASCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "SCVTFD",
+ argLen: 1,
+ asm: arm64.ASCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFS",
+ argLen: 1,
+ asm: arm64.AUCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "UCVTFD",
+ argLen: 1,
+ asm: arm64.AUCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FCVTZSSW",
+ argLen: 1,
+ asm: arm64.AFCVTZSSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTZSDW",
+ argLen: 1,
+ asm: arm64.AFCVTZSDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTZUSW",
+ argLen: 1,
+ asm: arm64.AFCVTZUSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTZUDW",
+ argLen: 1,
+ asm: arm64.AFCVTZUDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTZSS",
+ argLen: 1,
+ asm: arm64.AFCVTZSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTZSD",
+ argLen: 1,
+ asm: arm64.AFCVTZSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTZUS",
+ argLen: 1,
+ asm: arm64.AFCVTZUS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTZUD",
+ argLen: 1,
+ asm: arm64.AFCVTZUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: arm64.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: arm64.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ outputs: []outputInfo{
+ {0, 288230375077969920}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxSymOff,
+ argLen: 1,
+ reg: regInfo{
+ clobbers: 4899916394042228735, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 FLAGS
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 67108864}, // R26
+ {0, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29 SP
+ },
+ clobbers: 4899916394042228735, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 FLAGS
+ },
+ },
+ {
+ name: "CALLdefer",
+ auxType: auxInt64,
+ argLen: 1,
+ reg: regInfo{
+ clobbers: 4899916394042228735, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 FLAGS
+ },
+ },
+ {
+ name: "CALLgo",
+ auxType: auxInt64,
+ argLen: 1,
+ reg: regInfo{
+ clobbers: 4899916394042228735, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 FLAGS
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxInt64,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ clobbers: 4899916394042228735, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 FLAGS
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387904}, // FLAGS
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 67108864}, // R26
+ },
+ },
+ },
+ {
+ name: "MOVDconvert",
+ argLen: 2,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870911}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 g R29
+ },
+ outputs: []outputInfo{
+ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R29
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
asm: ppc64.AADD,
reg: regInfo{
inputs: []inputInfo{
@@ -12354,6 +13913,76 @@ var gpRegMaskARM = regMask(5119)
var fpRegMaskARM = regMask(4294901760)
var flagRegMaskARM = regMask(4294967296)
var framepointerRegARM = int8(-1)
+var registersARM64 = [...]Register{
+ {0, "R0"},
+ {1, "R1"},
+ {2, "R2"},
+ {3, "R3"},
+ {4, "R4"},
+ {5, "R5"},
+ {6, "R6"},
+ {7, "R7"},
+ {8, "R8"},
+ {9, "R9"},
+ {10, "R10"},
+ {11, "R11"},
+ {12, "R12"},
+ {13, "R13"},
+ {14, "R14"},
+ {15, "R15"},
+ {16, "R16"},
+ {17, "R17"},
+ {18, "R18"},
+ {19, "R19"},
+ {20, "R20"},
+ {21, "R21"},
+ {22, "R22"},
+ {23, "R23"},
+ {24, "R24"},
+ {25, "R25"},
+ {26, "R26"},
+ {27, "g"},
+ {28, "R29"},
+ {29, "SP"},
+ {30, "F0"},
+ {31, "F1"},
+ {32, "F2"},
+ {33, "F3"},
+ {34, "F4"},
+ {35, "F5"},
+ {36, "F6"},
+ {37, "F7"},
+ {38, "F8"},
+ {39, "F9"},
+ {40, "F10"},
+ {41, "F11"},
+ {42, "F12"},
+ {43, "F13"},
+ {44, "F14"},
+ {45, "F15"},
+ {46, "F16"},
+ {47, "F17"},
+ {48, "F18"},
+ {49, "F19"},
+ {50, "F20"},
+ {51, "F21"},
+ {52, "F22"},
+ {53, "F23"},
+ {54, "F24"},
+ {55, "F25"},
+ {56, "F26"},
+ {57, "F27"},
+ {58, "F28"},
+ {59, "F29"},
+ {60, "F30"},
+ {61, "F31"},
+ {62, "FLAGS"},
+ {63, "SB"},
+}
+var gpRegMaskARM64 = regMask(402653183)
+var fpRegMaskARM64 = regMask(288230375077969920)
+var flagRegMaskARM64 = regMask(4611686018427387904)
+var framepointerRegARM64 = int8(-1)
var registersPPC64 = [...]Register{
{0, "R0"},
{1, "SP"},
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
new file mode 100644
index 0000000000..72717f66be
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -0,0 +1,4607 @@
+// autogenerated from gen/ARM64.rules: do not edit!
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+
+var _ = math.MinInt8 // in case not otherwise used
+func rewriteValueARM64(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpARM64ADDconst:
+ return rewriteValueARM64_OpARM64ADDconst(v, config)
+ case OpAdd16:
+ return rewriteValueARM64_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValueARM64_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValueARM64_OpAdd32F(v, config)
+ case OpAdd64:
+ return rewriteValueARM64_OpAdd64(v, config)
+ case OpAdd64F:
+ return rewriteValueARM64_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValueARM64_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValueARM64_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValueARM64_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValueARM64_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValueARM64_OpAnd32(v, config)
+ case OpAnd64:
+ return rewriteValueARM64_OpAnd64(v, config)
+ case OpAnd8:
+ return rewriteValueARM64_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValueARM64_OpAndB(v, config)
+ case OpClosureCall:
+ return rewriteValueARM64_OpClosureCall(v, config)
+ case OpCom16:
+ return rewriteValueARM64_OpCom16(v, config)
+ case OpCom32:
+ return rewriteValueARM64_OpCom32(v, config)
+ case OpCom64:
+ return rewriteValueARM64_OpCom64(v, config)
+ case OpCom8:
+ return rewriteValueARM64_OpCom8(v, config)
+ case OpConst16:
+ return rewriteValueARM64_OpConst16(v, config)
+ case OpConst32:
+ return rewriteValueARM64_OpConst32(v, config)
+ case OpConst32F:
+ return rewriteValueARM64_OpConst32F(v, config)
+ case OpConst64:
+ return rewriteValueARM64_OpConst64(v, config)
+ case OpConst64F:
+ return rewriteValueARM64_OpConst64F(v, config)
+ case OpConst8:
+ return rewriteValueARM64_OpConst8(v, config)
+ case OpConstBool:
+ return rewriteValueARM64_OpConstBool(v, config)
+ case OpConstNil:
+ return rewriteValueARM64_OpConstNil(v, config)
+ case OpConvert:
+ return rewriteValueARM64_OpConvert(v, config)
+ case OpCvt32Fto32:
+ return rewriteValueARM64_OpCvt32Fto32(v, config)
+ case OpCvt32Fto32U:
+ return rewriteValueARM64_OpCvt32Fto32U(v, config)
+ case OpCvt32Fto64:
+ return rewriteValueARM64_OpCvt32Fto64(v, config)
+ case OpCvt32Fto64F:
+ return rewriteValueARM64_OpCvt32Fto64F(v, config)
+ case OpCvt32Uto32F:
+ return rewriteValueARM64_OpCvt32Uto32F(v, config)
+ case OpCvt32Uto64F:
+ return rewriteValueARM64_OpCvt32Uto64F(v, config)
+ case OpCvt32to32F:
+ return rewriteValueARM64_OpCvt32to32F(v, config)
+ case OpCvt32to64F:
+ return rewriteValueARM64_OpCvt32to64F(v, config)
+ case OpCvt64Fto32:
+ return rewriteValueARM64_OpCvt64Fto32(v, config)
+ case OpCvt64Fto32F:
+ return rewriteValueARM64_OpCvt64Fto32F(v, config)
+ case OpCvt64Fto32U:
+ return rewriteValueARM64_OpCvt64Fto32U(v, config)
+ case OpCvt64Fto64:
+ return rewriteValueARM64_OpCvt64Fto64(v, config)
+ case OpCvt64to32F:
+ return rewriteValueARM64_OpCvt64to32F(v, config)
+ case OpCvt64to64F:
+ return rewriteValueARM64_OpCvt64to64F(v, config)
+ case OpDeferCall:
+ return rewriteValueARM64_OpDeferCall(v, config)
+ case OpDiv16:
+ return rewriteValueARM64_OpDiv16(v, config)
+ case OpDiv16u:
+ return rewriteValueARM64_OpDiv16u(v, config)
+ case OpDiv32:
+ return rewriteValueARM64_OpDiv32(v, config)
+ case OpDiv32F:
+ return rewriteValueARM64_OpDiv32F(v, config)
+ case OpDiv32u:
+ return rewriteValueARM64_OpDiv32u(v, config)
+ case OpDiv64:
+ return rewriteValueARM64_OpDiv64(v, config)
+ case OpDiv64F:
+ return rewriteValueARM64_OpDiv64F(v, config)
+ case OpDiv64u:
+ return rewriteValueARM64_OpDiv64u(v, config)
+ case OpDiv8:
+ return rewriteValueARM64_OpDiv8(v, config)
+ case OpDiv8u:
+ return rewriteValueARM64_OpDiv8u(v, config)
+ case OpEq16:
+ return rewriteValueARM64_OpEq16(v, config)
+ case OpEq32:
+ return rewriteValueARM64_OpEq32(v, config)
+ case OpEq32F:
+ return rewriteValueARM64_OpEq32F(v, config)
+ case OpEq64:
+ return rewriteValueARM64_OpEq64(v, config)
+ case OpEq64F:
+ return rewriteValueARM64_OpEq64F(v, config)
+ case OpEq8:
+ return rewriteValueARM64_OpEq8(v, config)
+ case OpEqB:
+ return rewriteValueARM64_OpEqB(v, config)
+ case OpEqPtr:
+ return rewriteValueARM64_OpEqPtr(v, config)
+ case OpARM64FMOVDload:
+ return rewriteValueARM64_OpARM64FMOVDload(v, config)
+ case OpARM64FMOVDstore:
+ return rewriteValueARM64_OpARM64FMOVDstore(v, config)
+ case OpARM64FMOVSload:
+ return rewriteValueARM64_OpARM64FMOVSload(v, config)
+ case OpARM64FMOVSstore:
+ return rewriteValueARM64_OpARM64FMOVSstore(v, config)
+ case OpGeq16:
+ return rewriteValueARM64_OpGeq16(v, config)
+ case OpGeq16U:
+ return rewriteValueARM64_OpGeq16U(v, config)
+ case OpGeq32:
+ return rewriteValueARM64_OpGeq32(v, config)
+ case OpGeq32F:
+ return rewriteValueARM64_OpGeq32F(v, config)
+ case OpGeq32U:
+ return rewriteValueARM64_OpGeq32U(v, config)
+ case OpGeq64:
+ return rewriteValueARM64_OpGeq64(v, config)
+ case OpGeq64F:
+ return rewriteValueARM64_OpGeq64F(v, config)
+ case OpGeq64U:
+ return rewriteValueARM64_OpGeq64U(v, config)
+ case OpGeq8:
+ return rewriteValueARM64_OpGeq8(v, config)
+ case OpGeq8U:
+ return rewriteValueARM64_OpGeq8U(v, config)
+ case OpGetClosurePtr:
+ return rewriteValueARM64_OpGetClosurePtr(v, config)
+ case OpGoCall:
+ return rewriteValueARM64_OpGoCall(v, config)
+ case OpGreater16:
+ return rewriteValueARM64_OpGreater16(v, config)
+ case OpGreater16U:
+ return rewriteValueARM64_OpGreater16U(v, config)
+ case OpGreater32:
+ return rewriteValueARM64_OpGreater32(v, config)
+ case OpGreater32F:
+ return rewriteValueARM64_OpGreater32F(v, config)
+ case OpGreater32U:
+ return rewriteValueARM64_OpGreater32U(v, config)
+ case OpGreater64:
+ return rewriteValueARM64_OpGreater64(v, config)
+ case OpGreater64F:
+ return rewriteValueARM64_OpGreater64F(v, config)
+ case OpGreater64U:
+ return rewriteValueARM64_OpGreater64U(v, config)
+ case OpGreater8:
+ return rewriteValueARM64_OpGreater8(v, config)
+ case OpGreater8U:
+ return rewriteValueARM64_OpGreater8U(v, config)
+ case OpInterCall:
+ return rewriteValueARM64_OpInterCall(v, config)
+ case OpIsInBounds:
+ return rewriteValueARM64_OpIsInBounds(v, config)
+ case OpIsNonNil:
+ return rewriteValueARM64_OpIsNonNil(v, config)
+ case OpIsSliceInBounds:
+ return rewriteValueARM64_OpIsSliceInBounds(v, config)
+ case OpLeq16:
+ return rewriteValueARM64_OpLeq16(v, config)
+ case OpLeq16U:
+ return rewriteValueARM64_OpLeq16U(v, config)
+ case OpLeq32:
+ return rewriteValueARM64_OpLeq32(v, config)
+ case OpLeq32F:
+ return rewriteValueARM64_OpLeq32F(v, config)
+ case OpLeq32U:
+ return rewriteValueARM64_OpLeq32U(v, config)
+ case OpLeq64:
+ return rewriteValueARM64_OpLeq64(v, config)
+ case OpLeq64F:
+ return rewriteValueARM64_OpLeq64F(v, config)
+ case OpLeq64U:
+ return rewriteValueARM64_OpLeq64U(v, config)
+ case OpLeq8:
+ return rewriteValueARM64_OpLeq8(v, config)
+ case OpLeq8U:
+ return rewriteValueARM64_OpLeq8U(v, config)
+ case OpLess16:
+ return rewriteValueARM64_OpLess16(v, config)
+ case OpLess16U:
+ return rewriteValueARM64_OpLess16U(v, config)
+ case OpLess32:
+ return rewriteValueARM64_OpLess32(v, config)
+ case OpLess32F:
+ return rewriteValueARM64_OpLess32F(v, config)
+ case OpLess32U:
+ return rewriteValueARM64_OpLess32U(v, config)
+ case OpLess64:
+ return rewriteValueARM64_OpLess64(v, config)
+ case OpLess64F:
+ return rewriteValueARM64_OpLess64F(v, config)
+ case OpLess64U:
+ return rewriteValueARM64_OpLess64U(v, config)
+ case OpLess8:
+ return rewriteValueARM64_OpLess8(v, config)
+ case OpLess8U:
+ return rewriteValueARM64_OpLess8U(v, config)
+ case OpLoad:
+ return rewriteValueARM64_OpLoad(v, config)
+ case OpARM64MOVBUload:
+ return rewriteValueARM64_OpARM64MOVBUload(v, config)
+ case OpARM64MOVBload:
+ return rewriteValueARM64_OpARM64MOVBload(v, config)
+ case OpARM64MOVBstore:
+ return rewriteValueARM64_OpARM64MOVBstore(v, config)
+ case OpARM64MOVDload:
+ return rewriteValueARM64_OpARM64MOVDload(v, config)
+ case OpARM64MOVDstore:
+ return rewriteValueARM64_OpARM64MOVDstore(v, config)
+ case OpARM64MOVHUload:
+ return rewriteValueARM64_OpARM64MOVHUload(v, config)
+ case OpARM64MOVHload:
+ return rewriteValueARM64_OpARM64MOVHload(v, config)
+ case OpARM64MOVHstore:
+ return rewriteValueARM64_OpARM64MOVHstore(v, config)
+ case OpARM64MOVWUload:
+ return rewriteValueARM64_OpARM64MOVWUload(v, config)
+ case OpARM64MOVWload:
+ return rewriteValueARM64_OpARM64MOVWload(v, config)
+ case OpARM64MOVWstore:
+ return rewriteValueARM64_OpARM64MOVWstore(v, config)
+ case OpMod16:
+ return rewriteValueARM64_OpMod16(v, config)
+ case OpMod16u:
+ return rewriteValueARM64_OpMod16u(v, config)
+ case OpMod32:
+ return rewriteValueARM64_OpMod32(v, config)
+ case OpMod32u:
+ return rewriteValueARM64_OpMod32u(v, config)
+ case OpMod64:
+ return rewriteValueARM64_OpMod64(v, config)
+ case OpMod64u:
+ return rewriteValueARM64_OpMod64u(v, config)
+ case OpMod8:
+ return rewriteValueARM64_OpMod8(v, config)
+ case OpMod8u:
+ return rewriteValueARM64_OpMod8u(v, config)
+ case OpMul16:
+ return rewriteValueARM64_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValueARM64_OpMul32(v, config)
+ case OpMul32F:
+ return rewriteValueARM64_OpMul32F(v, config)
+ case OpMul64:
+ return rewriteValueARM64_OpMul64(v, config)
+ case OpMul64F:
+ return rewriteValueARM64_OpMul64F(v, config)
+ case OpMul8:
+ return rewriteValueARM64_OpMul8(v, config)
+ case OpNeg16:
+ return rewriteValueARM64_OpNeg16(v, config)
+ case OpNeg32:
+ return rewriteValueARM64_OpNeg32(v, config)
+ case OpNeg32F:
+ return rewriteValueARM64_OpNeg32F(v, config)
+ case OpNeg64:
+ return rewriteValueARM64_OpNeg64(v, config)
+ case OpNeg64F:
+ return rewriteValueARM64_OpNeg64F(v, config)
+ case OpNeg8:
+ return rewriteValueARM64_OpNeg8(v, config)
+ case OpNeq16:
+ return rewriteValueARM64_OpNeq16(v, config)
+ case OpNeq32:
+ return rewriteValueARM64_OpNeq32(v, config)
+ case OpNeq32F:
+ return rewriteValueARM64_OpNeq32F(v, config)
+ case OpNeq64:
+ return rewriteValueARM64_OpNeq64(v, config)
+ case OpNeq64F:
+ return rewriteValueARM64_OpNeq64F(v, config)
+ case OpNeq8:
+ return rewriteValueARM64_OpNeq8(v, config)
+ case OpNeqB:
+ return rewriteValueARM64_OpNeqB(v, config)
+ case OpNeqPtr:
+ return rewriteValueARM64_OpNeqPtr(v, config)
+ case OpNilCheck:
+ return rewriteValueARM64_OpNilCheck(v, config)
+ case OpNot:
+ return rewriteValueARM64_OpNot(v, config)
+ case OpOffPtr:
+ return rewriteValueARM64_OpOffPtr(v, config)
+ case OpOr16:
+ return rewriteValueARM64_OpOr16(v, config)
+ case OpOr32:
+ return rewriteValueARM64_OpOr32(v, config)
+ case OpOr64:
+ return rewriteValueARM64_OpOr64(v, config)
+ case OpOr8:
+ return rewriteValueARM64_OpOr8(v, config)
+ case OpOrB:
+ return rewriteValueARM64_OpOrB(v, config)
+ case OpSignExt16to32:
+ return rewriteValueARM64_OpSignExt16to32(v, config)
+ case OpSignExt16to64:
+ return rewriteValueARM64_OpSignExt16to64(v, config)
+ case OpSignExt32to64:
+ return rewriteValueARM64_OpSignExt32to64(v, config)
+ case OpSignExt8to16:
+ return rewriteValueARM64_OpSignExt8to16(v, config)
+ case OpSignExt8to32:
+ return rewriteValueARM64_OpSignExt8to32(v, config)
+ case OpSignExt8to64:
+ return rewriteValueARM64_OpSignExt8to64(v, config)
+ case OpStaticCall:
+ return rewriteValueARM64_OpStaticCall(v, config)
+ case OpStore:
+ return rewriteValueARM64_OpStore(v, config)
+ case OpSub16:
+ return rewriteValueARM64_OpSub16(v, config)
+ case OpSub32:
+ return rewriteValueARM64_OpSub32(v, config)
+ case OpSub32F:
+ return rewriteValueARM64_OpSub32F(v, config)
+ case OpSub64:
+ return rewriteValueARM64_OpSub64(v, config)
+ case OpSub64F:
+ return rewriteValueARM64_OpSub64F(v, config)
+ case OpSub8:
+ return rewriteValueARM64_OpSub8(v, config)
+ case OpSubPtr:
+ return rewriteValueARM64_OpSubPtr(v, config)
+ case OpTrunc16to8:
+ return rewriteValueARM64_OpTrunc16to8(v, config)
+ case OpTrunc32to16:
+ return rewriteValueARM64_OpTrunc32to16(v, config)
+ case OpTrunc32to8:
+ return rewriteValueARM64_OpTrunc32to8(v, config)
+ case OpTrunc64to16:
+ return rewriteValueARM64_OpTrunc64to16(v, config)
+ case OpTrunc64to32:
+ return rewriteValueARM64_OpTrunc64to32(v, config)
+ case OpTrunc64to8:
+ return rewriteValueARM64_OpTrunc64to8(v, config)
+ case OpXor16:
+ return rewriteValueARM64_OpXor16(v, config)
+ case OpXor32:
+ return rewriteValueARM64_OpXor32(v, config)
+ case OpXor64:
+ return rewriteValueARM64_OpXor64(v, config)
+ case OpXor8:
+ return rewriteValueARM64_OpXor8(v, config)
+ case OpZeroExt16to32:
+ return rewriteValueARM64_OpZeroExt16to32(v, config)
+ case OpZeroExt16to64:
+ return rewriteValueARM64_OpZeroExt16to64(v, config)
+ case OpZeroExt32to64:
+ return rewriteValueARM64_OpZeroExt32to64(v, config)
+ case OpZeroExt8to16:
+ return rewriteValueARM64_OpZeroExt8to16(v, config)
+ case OpZeroExt8to32:
+ return rewriteValueARM64_OpZeroExt8to32(v, config)
+ case OpZeroExt8to64:
+ return rewriteValueARM64_OpZeroExt8to64(v, config)
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
+ // cond:
+ // result: (MOVDaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym := v_0.Aux
+ ptr := v_0.Args[0]
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (FADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (FADDD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FADDD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64AND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (FMOVSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64FMOVSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (FMOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64FMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVDconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert x mem)
+ // cond:
+ // result: (MOVDconvert x mem)
+ for {
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVDconvert)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (FCVTZSSW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSSW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32U x)
+ // cond:
+ // result: (FCVTZUSW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZUSW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64 x)
+ // cond:
+ // result: (FCVTZSS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (FCVTSD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTSD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto32F x)
+ // cond:
+ // result: (UCVTFWS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64UCVTFWS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto64F x)
+ // cond:
+ // result: (UCVTFWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64UCVTFWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (SCVTFWS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFWS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (SCVTFWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (FCVTZSDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (FCVTDS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTDS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32U x)
+ // cond:
+ // result: (FCVTZUDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZUDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto64 x)
+ // cond:
+ // result: (FCVTZSD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FCVTZSD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to32F x)
+ // cond:
+ // result: (SCVTFS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to64F x)
+ // cond:
+ // result: (SCVTFD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64SCVTFD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARM64CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (FDIVS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FDIVS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (UDIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64 x y)
+ // cond:
+ // result: (DIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (FDIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64u x y)
+ // cond:
+ // result: (UDIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (Equal (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (Equal (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (Equal (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Line, OpARM64XOR, config.fe.TypeBool())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (GreaterEqualU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (GreaterEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpARM64LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueARM64_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARM64CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (GreaterThan (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (GreaterThan (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (GreaterThanU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (GreaterThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (GreaterThan (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (GreaterThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqualU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (GreaterEqual (FCMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (GreaterThan (FCMPS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (LessThanU (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (LessThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (GreaterThan (FCMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MODW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MODW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MODW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (UMODW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMODW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64 x y)
+ // cond:
+ // result: (MOD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MOD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64u x y)
+ // cond:
+ // result: (UMOD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMOD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MODW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (FMULS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FMULS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (FMULD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FMULD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64MUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (FNEGS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FNEGS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (FNEGD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FNEGD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEG x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (NotEqual (FCMPS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (NotEqual (FCMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr:(SP))
+ // cond:
+ // result: (MOVDaddr [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (OR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64OR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to32 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt16to64 x)
+ // cond:
+ // result: (MOVHreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt32to64 x)
+ // cond:
+ // result: (MOVWreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to16 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to32 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpSignExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SignExt8to64 x)
+ // cond:
+ // result: (MOVBreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpStaticCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StaticCall [argwid] {target} mem)
+ // cond:
+ // result: (CALLstatic [argwid] {target} mem)
+ for {
+ argwid := v.AuxInt
+ target := v.Aux
+ mem := v.Args[0]
+ v.reset(OpARM64CALLstatic)
+ v.AuxInt = argwid
+ v.Aux = target
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpStore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Store [1] ptr val mem)
+ // cond:
+ // result: (MOVBstore ptr val mem)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [2] ptr val mem)
+ // cond:
+ // result: (MOVHstore ptr val mem)
+ for {
+ if v.AuxInt != 2 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(!is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(!is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [4] ptr val mem)
+ // cond: is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ if v.AuxInt != 4 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Store [8] ptr val mem)
+ // cond: is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ if v.AuxInt != 8 {
+ break
+ }
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSub16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub16 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub32F x y)
+ // cond:
+ // result: (FSUBS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FSUBS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub64F x y)
+ // cond:
+ // result: (FSUBD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64FSUBD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSub8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Sub8 x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpSubPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SubPtr x y)
+ // cond:
+ // result: (SUB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64SUB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc16to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc32to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc32to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc64to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to16 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc64to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to32 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpTrunc64to8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Trunc64to8 x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor16 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor32 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor64 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpXor8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Xor8 x y)
+ // cond:
+ // result: (XOR x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64XOR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt16to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to32 x)
+ // cond:
+ // result: (MOVHUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt16to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt16to64 x)
+ // cond:
+ // result: (MOVHUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt32to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt32to64 x)
+ // cond:
+ // result: (MOVWUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt8to16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to16 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt8to32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to32 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM64_OpZeroExt8to64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ZeroExt8to64 x)
+ // cond:
+ // result: (MOVBUreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64MOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteBlockARM64(b *Block) bool {
+ switch b.Kind {
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64Equal {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64EQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64NotEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64NE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64LessEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterThan {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterThanU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterEqual {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64GreaterEqualU {
+ break
+ }
+ cc := v.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (If cond yes no)
+ // cond:
+ // result: (NE (CMPconst [0] cond) yes no)
+ for {
+ v := b.Control
+ cond := b.Control
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64NE
+ v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(cond)
+ b.SetControl(v0)
+ _ = yes
+ _ = no
+ return true
+ }
+ case BlockARM64NE:
+ // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // cond:
+ // result: (EQ cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64Equal {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64EQ
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // cond:
+ // result: (NE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64NotEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64NE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // cond:
+ // result: (LT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // cond:
+ // result: (ULT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ // cond:
+ // result: (LE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64LE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // cond:
+ // result: (ULE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64LessEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64ULE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // cond:
+ // result: (GT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterThan {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ // cond:
+ // result: (UGT cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterThanU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGT
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // cond:
+ // result: (GE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterEqual {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64GE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // cond:
+ // result: (UGE cc yes no)
+ for {
+ v := b.Control
+ if v.Op != OpARM64CMPconst {
+ break
+ }
+ if v.AuxInt != 0 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64GreaterEqualU {
+ break
+ }
+ cc := v_0.Args[0]
+ yes := b.Succs[0]
+ no := b.Succs[1]
+ b.Kind = BlockARM64UGE
+ b.SetControl(cc)
+ _ = yes
+ _ = no
+ return true
+ }
+ }
+ return false
+}