aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/arm64/ssa.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/arm64/ssa.go')
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go628
1 files changed, 628 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
new file mode 100644
index 0000000000..90dab7f4dc
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -0,0 +1,628 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+)
+
+var ssaRegToReg = []int16{
+ arm64.REG_R0,
+ arm64.REG_R1,
+ arm64.REG_R2,
+ arm64.REG_R3,
+ arm64.REG_R4,
+ arm64.REG_R5,
+ arm64.REG_R6,
+ arm64.REG_R7,
+ arm64.REG_R8,
+ arm64.REG_R9,
+ arm64.REG_R10,
+ arm64.REG_R11,
+ arm64.REG_R12,
+ arm64.REG_R13,
+ arm64.REG_R14,
+ arm64.REG_R15,
+ arm64.REG_R16,
+ arm64.REG_R17,
+ arm64.REG_R18,
+ arm64.REG_R19,
+ arm64.REG_R20,
+ arm64.REG_R21,
+ arm64.REG_R22,
+ arm64.REG_R23,
+ arm64.REG_R24,
+ arm64.REG_R25,
+ arm64.REG_R26,
+ // R27 = REGTMP not used in regalloc
+ arm64.REGG, // R28
+ arm64.REG_R29,
+ // R30 = REGLINK not used in regalloc
+ arm64.REGSP, // R31
+
+ arm64.REG_F0,
+ arm64.REG_F1,
+ arm64.REG_F2,
+ arm64.REG_F3,
+ arm64.REG_F4,
+ arm64.REG_F5,
+ arm64.REG_F6,
+ arm64.REG_F7,
+ arm64.REG_F8,
+ arm64.REG_F9,
+ arm64.REG_F10,
+ arm64.REG_F11,
+ arm64.REG_F12,
+ arm64.REG_F13,
+ arm64.REG_F14,
+ arm64.REG_F15,
+ arm64.REG_F16,
+ arm64.REG_F17,
+ arm64.REG_F18,
+ arm64.REG_F19,
+ arm64.REG_F20,
+ arm64.REG_F21,
+ arm64.REG_F22,
+ arm64.REG_F23,
+ arm64.REG_F24,
+ arm64.REG_F25,
+ arm64.REG_F26,
+ arm64.REG_F27,
+ arm64.REG_F28,
+ arm64.REG_F29,
+ arm64.REG_F30,
+ arm64.REG_F31,
+
+ arm64.REG_NZCV, // flag
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+// Smallest possible faulting page at address zero,
+// see ../../../../runtime/mheap.go:/minPhysPageSize
+const minZeroPage = 4096
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm64.AMOVB
+ } else {
+ return arm64.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm64.AMOVH
+ } else {
+ return arm64.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return arm64.AMOVW
+ } else {
+ return arm64.AMOVWU
+ }
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t ssa.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm64.AMOVB
+ case 2:
+ return arm64.AMOVH
+ case 4:
+ return arm64.AMOVW
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ s.SetLineno(v.Line)
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpCopy, ssa.OpARM64MOVDconvert, ssa.OpARM64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := gc.SSARegNum(v.Args[0])
+ y := gc.SSARegNum(v)
+ if x == y {
+ return
+ }
+ as := arm64.AMOVD
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm64.AFMOVS
+ case 8:
+ as = arm64.AFMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := gc.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(loadByType(v.Type))
+ n, off := gc.AutoVar(v.Args[0])
+ p.From.Type = obj.TYPE_MEM
+ p.From.Node = n
+ p.From.Sym = gc.Linksym(n.Sym)
+ p.From.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.From.Name = obj.NAME_PARAM
+ p.From.Offset += n.Xoffset
+ } else {
+ p.From.Name = obj.NAME_AUTO
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpPhi:
+ gc.CheckLoweredPhi(v)
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Unimplementedf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := gc.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ n, off := gc.AutoVar(v)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Node = n
+ p.To.Sym = gc.Linksym(n.Sym)
+ p.To.Offset = off
+ if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
+ p.To.Name = obj.NAME_PARAM
+ p.To.Offset += n.Xoffset
+ } else {
+ p.To.Name = obj.NAME_AUTO
+ }
+ case ssa.OpARM64ADD,
+ ssa.OpARM64SUB,
+ ssa.OpARM64AND,
+ ssa.OpARM64OR,
+ ssa.OpARM64XOR,
+ ssa.OpARM64BIC,
+ ssa.OpARM64MUL,
+ ssa.OpARM64DIV,
+ ssa.OpARM64UDIV,
+ ssa.OpARM64DIVW,
+ ssa.OpARM64UDIVW,
+ ssa.OpARM64MOD,
+ ssa.OpARM64UMOD,
+ ssa.OpARM64MODW,
+ ssa.OpARM64UMODW,
+ ssa.OpARM64FADDS,
+ ssa.OpARM64FADDD,
+ ssa.OpARM64FSUBS,
+ ssa.OpARM64FSUBD,
+ ssa.OpARM64FMULS,
+ ssa.OpARM64FMULD,
+ ssa.OpARM64FDIVS,
+ ssa.OpARM64FDIVD:
+ r := gc.SSARegNum(v)
+ r1 := gc.SSARegNum(v.Args[0])
+ r2 := gc.SSARegNum(v.Args[1])
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64ADDconst,
+ ssa.OpARM64SUBconst,
+ ssa.OpARM64ANDconst,
+ ssa.OpARM64ORconst,
+ ssa.OpARM64XORconst,
+ ssa.OpARM64BICconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64MOVDconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64FMOVSconst,
+ ssa.OpARM64FMOVDconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64CMP,
+ ssa.OpARM64CMPW,
+ ssa.OpARM64CMN,
+ ssa.OpARM64CMNW,
+ ssa.OpARM64FCMPS,
+ ssa.OpARM64FCMPD:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARM64CMPconst,
+ ssa.OpARM64CMPWconst,
+ ssa.OpARM64CMNconst,
+ ssa.OpARM64CMNWconst:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = gc.SSARegNum(v.Args[0])
+ case ssa.OpARM64MOVDaddr:
+ p := gc.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+
+ var wantreg string
+ // MOVD $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *ssa.ExternSymbol:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *ssa.ArgSymbol, *ssa.AutoSymbol:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVD $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = arm64.REGSP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
+ }
+ case ssa.OpARM64MOVBload,
+ ssa.OpARM64MOVBUload,
+ ssa.OpARM64MOVHload,
+ ssa.OpARM64MOVHUload,
+ ssa.OpARM64MOVWload,
+ ssa.OpARM64MOVWUload,
+ ssa.OpARM64MOVDload,
+ ssa.OpARM64FMOVSload,
+ ssa.OpARM64FMOVDload:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64MOVBstore,
+ ssa.OpARM64MOVHstore,
+ ssa.OpARM64MOVWstore,
+ ssa.OpARM64MOVDstore,
+ ssa.OpARM64FMOVSstore,
+ ssa.OpARM64FMOVDstore:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[1])
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBreg,
+ ssa.OpARM64MOVBUreg,
+ ssa.OpARM64MOVHreg,
+ ssa.OpARM64MOVHUreg,
+ ssa.OpARM64MOVWreg,
+ ssa.OpARM64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) {
+ return
+ }
+ p := gc.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARM64MVN,
+ ssa.OpARM64NEG,
+ ssa.OpARM64FNEGS,
+ ssa.OpARM64FNEGD,
+ ssa.OpARM64FSQRTD,
+ ssa.OpARM64FCVTZSSW,
+ ssa.OpARM64FCVTZSDW,
+ ssa.OpARM64FCVTZUSW,
+ ssa.OpARM64FCVTZUDW,
+ ssa.OpARM64FCVTZSS,
+ ssa.OpARM64FCVTZSD,
+ ssa.OpARM64FCVTZUS,
+ ssa.OpARM64FCVTZUD,
+ ssa.OpARM64SCVTFWS,
+ ssa.OpARM64SCVTFWD,
+ ssa.OpARM64SCVTFS,
+ ssa.OpARM64SCVTFD,
+ ssa.OpARM64UCVTFWS,
+ ssa.OpARM64UCVTFWD,
+ ssa.OpARM64UCVTFS,
+ ssa.OpARM64UCVTFD,
+ ssa.OpARM64FCVTSD,
+ ssa.OpARM64FCVTDS:
+ p := gc.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpARM64CALLstatic:
+ if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an actual hardware NOP that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ ginsnop()
+ }
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLclosure:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLdefer:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLgo:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(gc.Newproc.Sym)
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64CALLinter:
+ p := gc.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = gc.SSARegNum(v.Args[0])
+ if gc.Maxarg < v.AuxInt {
+ gc.Maxarg = v.AuxInt
+ }
+ case ssa.OpARM64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := gc.Prog(arm64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = gc.SSARegNum(v.Args[0])
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
+ gc.Warnl(v.Line, "generated nil check")
+ }
+ case ssa.OpVarDef:
+ gc.Gvardef(v.Aux.(*gc.Node))
+ case ssa.OpVarKill:
+ gc.Gvarkill(v.Aux.(*gc.Node))
+ case ssa.OpVarLive:
+ gc.Gvarlive(v.Aux.(*gc.Node))
+ case ssa.OpKeepAlive:
+ if !v.Args[0].Type.IsPtrShaped() {
+ v.Fatalf("keeping non-pointer alive %v", v.Args[0])
+ }
+ n, off := gc.AutoVar(v.Args[0])
+ if n == nil {
+ v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
+ }
+ if off != 0 {
+ v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
+ }
+ gc.Gvarlive(n)
+ case ssa.OpARM64Equal,
+ ssa.OpARM64NotEqual,
+ ssa.OpARM64LessThan,
+ ssa.OpARM64LessEqual,
+ ssa.OpARM64GreaterThan,
+ ssa.OpARM64GreaterEqual,
+ ssa.OpARM64LessThanU,
+ ssa.OpARM64LessEqualU,
+ ssa.OpARM64GreaterThanU,
+ ssa.OpARM64GreaterEqualU:
+ // generate boolean values using CSET
+ p := gc.Prog(arm64.ACSET)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = condBits[v.Op]
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = gc.SSARegNum(v)
+ case ssa.OpSelect0, ssa.OpSelect1:
+ // nothing to do
+ case ssa.OpARM64LoweredGetClosurePtr:
+ // Closure pointer is R26 (arm64.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64FlagEQ,
+ ssa.OpARM64FlagLT_ULT,
+ ssa.OpARM64FlagLT_UGT,
+ ssa.OpARM64FlagGT_ULT,
+ ssa.OpARM64FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpARM64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ default:
+ v.Unimplementedf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]int16{
+ ssa.OpARM64Equal: arm64.COND_EQ,
+ ssa.OpARM64NotEqual: arm64.COND_NE,
+ ssa.OpARM64LessThan: arm64.COND_LT,
+ ssa.OpARM64LessThanU: arm64.COND_LO,
+ ssa.OpARM64LessEqual: arm64.COND_LE,
+ ssa.OpARM64LessEqualU: arm64.COND_LS,
+ ssa.OpARM64GreaterThan: arm64.COND_GT,
+ ssa.OpARM64GreaterThanU: arm64.COND_HI,
+ ssa.OpARM64GreaterEqual: arm64.COND_GE,
+ ssa.OpARM64GreaterEqualU: arm64.COND_HS,
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
+ ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
+ ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
+ ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
+ ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
+ ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
+ ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
+ ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ s.SetLineno(b.Line)
+
+ switch b.Kind {
+ case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := gc.Prog(arm64.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm64.REG_R0
+ p = gc.Prog(arm64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := gc.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit:
+ gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+
+ case ssa.BlockRet:
+ gc.Prog(obj.ARET)
+
+ case ssa.BlockRetJmp:
+ p := gc.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
+
+ case ssa.BlockARM64EQ, ssa.BlockARM64NE,
+ ssa.BlockARM64LT, ssa.BlockARM64GE,
+ ssa.BlockARM64LE, ssa.BlockARM64GT,
+ ssa.BlockARM64ULT, ssa.BlockARM64UGT,
+ ssa.BlockARM64ULE, ssa.BlockARM64UGE:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = gc.Prog(jmp.invasm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ case b.Succs[1].Block():
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ default:
+ p = gc.Prog(jmp.asm)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ q := gc.Prog(obj.AJMP)
+ q.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
+ }
+
+ default:
+ b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
+ }
+}