aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/amd64
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2020-12-23 00:57:10 -0500
committerRuss Cox <rsc@golang.org>2020-12-23 06:39:29 +0000
commit6c34d2f42077bd7757c942c8d1b466366190b45a (patch)
treedf1ccc0d4b96b79fc74f123ea84edf924a142ad4 /src/cmd/compile/internal/amd64
parentde65151e507e7b3c8e46d74f223d7c562177bedc (diff)
downloadgo-6c34d2f42077bd7757c942c8d1b466366190b45a.tar.gz
go-6c34d2f42077bd7757c942c8d1b466366190b45a.zip
[dev.regabi] cmd/compile: split out package ssagen [generated]
[git-generate] cd src/cmd/compile/internal/gc rf ' # maxOpenDefers is declared in ssa.go but used only by walk. mv maxOpenDefers walk.go # gc.Arch -> ssagen.Arch # It is not as nice but will do for now. mv Arch ArchInfo mv thearch Arch mv Arch ArchInfo arch.go # Pull dwarf out of pgen.go. mv debuginfo declPos createDwarfVars preInliningDcls \ createSimpleVars createSimpleVar \ createComplexVars createComplexVar \ dwarf.go # Pull high-level compilation out of pgen.go, # leaving only the SSA code. mv compilequeue funccompile compile compilenow \ compileFunctions isInlinableButNotInlined \ initLSym \ compile.go mv BoundsCheckFunc GCWriteBarrierReg ssa.go mv largeStack largeStackFrames CheckLargeStacks pgen.go # All that is left in dcl.go is the nowritebarrierrecCheck mv dcl.go nowb.go # Export API and unexport non-API. mv initssaconfig InitConfig mv isIntrinsicCall IsIntrinsicCall mv ssaDumpInline DumpInline mv initSSATables InitTables mv initSSAEnv InitEnv mv compileSSA Compile mv stackOffset StackOffset mv canSSAType TypeOK mv SSAGenState State mv FwdRefAux fwdRefAux mv cgoSymABIs CgoSymABIs mv readSymABIs ReadSymABIs mv initLSym InitLSym mv useABIWrapGen symabiDefs CgoSymABIs ReadSymABIs InitLSym selectLSym makeABIWrapper setupTextLSym abi.go mv arch.go abi.go nowb.go phi.go pgen.go pgen_test.go ssa.go cmd/compile/internal/ssagen ' rm go.go gsubr.go Change-Id: I47fad6cbf1d1e583fd9139003a08401d7cd048a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/279476 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Diffstat (limited to 'src/cmd/compile/internal/amd64')
-rw-r--r--src/cmd/compile/internal/amd64/galign.go4
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go80
2 files changed, 42 insertions, 42 deletions
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index af58440502..ce1c402902 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -5,13 +5,13 @@
package amd64
import (
- "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssagen"
"cmd/internal/obj/x86"
)
var leaptr = x86.ALEAQ
-func Init(arch *gc.Arch) {
+func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &x86.Linkamd64
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 0150bd296a..da355c49d1 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -9,17 +9,17 @@ import (
"math"
"cmd/compile/internal/base"
- "cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@@ -112,7 +112,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@@ -166,7 +166,7 @@ func duff(size int64) (int64, int64) {
return off, adj
}
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpAMD64VFMADD231SD:
p := s.Prog(v.Op.Asm())
@@ -632,12 +632,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = o
}
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
@@ -673,7 +673,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
@@ -681,20 +681,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.From, v, sc.Off())
+ ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[2].Reg()
case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
sc := v.AuxValAndOff()
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
- gc.AddAux2(&p.From, v, sc.Off())
+ ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
@@ -734,14 +734,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
@@ -753,7 +753,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
@@ -765,7 +765,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
memIdx(&p.To, v)
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
@@ -788,7 +788,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(asm)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
@@ -803,7 +803,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ ssagen.AddAux2(&p.To, v, off)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
@@ -812,7 +812,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
@@ -837,7 +837,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_NONE
}
memIdx(&p.To, v)
- gc.AddAux2(&p.To, v, sc.Off())
+ ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
@@ -867,7 +867,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@@ -893,7 +893,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.From.Index = i
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@@ -951,7 +951,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
- gc.AddrAuto(&p.From, v.Args[0])
+ ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -963,16 +963,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
- gc.AddrAuto(&p.To, v)
+ ssagen.AddrAuto(&p.To, v)
case ssa.OpAMD64LoweredHasCPUFeature:
p := s.Prog(x86.AMOVBQZX)
p.From.Type = obj.TYPE_MEM
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LoweredGetClosurePtr:
// Closure pointer is DX.
- gc.CheckLoweredGetClosurePtr(v)
+ ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
@@ -1029,13 +1029,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
// arg0 is in DI. Set sym to match where regalloc put arg1.
- p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
+ p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
@@ -1117,7 +1117,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64SETNEF:
p := s.Prog(v.Op.Asm())
@@ -1173,7 +1173,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
- gc.AddAux(&p.From, v)
+ ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
@@ -1186,7 +1186,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
r := v.Reg0()
if r != v.Args[0].Reg() {
@@ -1198,7 +1198,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
@@ -1209,7 +1209,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
@@ -1220,20 +1220,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
- gc.AddAux(&p.To, v)
+ ssagen.AddAux(&p.To, v)
p.To.Offset += 4
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
@@ -1259,22 +1259,22 @@ var blockJump = [...]struct {
ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
}
-var eqfJumps = [2][2]gc.IndexJump{
+var eqfJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
-var nefJumps = [2][2]gc.IndexJump{
+var nefJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
@@ -1287,11 +1287,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet: