aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/mips64
diff options
context:
space:
mode:
authorJosh Bleecher Snyder <josharian@gmail.com>2017-03-20 08:01:28 -0700
committerJosh Bleecher Snyder <josharian@gmail.com>2017-03-22 17:18:40 +0000
commit0a94daa3789a52bea9856f9f8b3fa32477eab28a (patch)
tree4e5cdb454ad51d386167204e65b8c9bd38abe82d /src/cmd/compile/internal/mips64
parent3b39f523e1181499827321cedd8b7370b14ee762 (diff)
downloadgo-0a94daa3789a52bea9856f9f8b3fa32477eab28a.tar.gz
go-0a94daa3789a52bea9856f9f8b3fa32477eab28a.zip
cmd/compile: funnel SSA Prog creation through SSAGenState
Step one in eliminating Prog-related globals. Passes toolstash-check -all. Updates #15756 Change-Id: I3b777fb5a7716f2d9da3067fbd94c28ca894a465 Reviewed-on: https://go-review.googlesource.com/38450 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Diffstat (limited to 'src/cmd/compile/internal/mips64')
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go96
1 files changed, 48 insertions, 48 deletions
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index 0dd2c1e33b..fea50e88ec 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -96,7 +96,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(x) && isFPreg(y) {
as = mips.AMOVD
}
- p := gc.Prog(as)
+ p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
@@ -104,7 +104,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
// cannot move between special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVV)
+ p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
@@ -121,14 +121,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
r := v.Reg()
- p := gc.Prog(loadByType(v.Type, r))
+ p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
// cannot directly load, load to TMP and move
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVV)
+ p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
@@ -142,14 +142,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Args[0].Reg()
if isHILO(r) {
// cannot directly store, move to TMP and store
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
r = mips.REGTMP
}
- p := gc.Prog(storeByType(v.Type, r))
+ p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
@@ -170,7 +170,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MULD,
ssa.OpMIPS64DIVF,
ssa.OpMIPS64DIVD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
@@ -178,7 +178,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpMIPS64SGT,
ssa.OpMIPS64SGTU:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
@@ -195,7 +195,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64SRAVconst,
ssa.OpMIPS64SGTconst,
ssa.OpMIPS64SGTUconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
@@ -206,13 +206,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64DIVV,
ssa.OpMIPS64DIVVU:
// result in hi,lo
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
case ssa.OpMIPS64MOVVconst:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
@@ -220,7 +220,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if isFPreg(r) || isHILO(r) {
// cannot move into FP or special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVV)
+ p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
@@ -228,7 +228,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
case ssa.OpMIPS64MOVFconst,
ssa.OpMIPS64MOVDconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
@@ -239,12 +239,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64CMPGED,
ssa.OpMIPS64CMPGTF,
ssa.OpMIPS64CMPGTD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
case ssa.OpMIPS64MOVVaddr:
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR
var wantreg string
// MOVV $sym+off(base), R
@@ -281,7 +281,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVVload,
ssa.OpMIPS64MOVFload,
ssa.OpMIPS64MOVDload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
@@ -293,7 +293,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVVstore,
ssa.OpMIPS64MOVFstore,
ssa.OpMIPS64MOVDstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
@@ -303,7 +303,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero,
ssa.OpMIPS64MOVVstorezero:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
@@ -332,7 +332,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Reg() == v.Args[0].Reg() {
return
}
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
@@ -354,14 +354,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpMIPS64MOVDF,
ssa.OpMIPS64NEGF,
ssa.OpMIPS64NEGD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPS64NEGV:
// SUB from REGZERO
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO
@@ -369,13 +369,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpMIPS64DUFFZERO:
// runtime.duffzero expects start address - 8 in R1
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p = gc.Prog(obj.ADUFFZERO)
+ p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
@@ -402,23 +402,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = mips.AMOVB
}
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p2 := gc.Prog(mov)
+ p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGZERO
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = mips.REG_R1
p2.To.Offset = sz
- p3 := gc.Prog(mips.AADDVU)
+ p3 := s.Prog(mips.AADDVU)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = sz
p3.To.Type = obj.TYPE_REG
p3.To.Reg = mips.REG_R1
- p4 := gc.Prog(mips.ABNE)
+ p4 := s.Prog(mips.ABNE)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
@@ -448,33 +448,33 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
sz = 1
mov = mips.AMOVB
}
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p2 := gc.Prog(mov)
+ p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_MEM
p2.From.Reg = mips.REG_R1
p2.From.Offset = sz
p2.To.Type = obj.TYPE_REG
p2.To.Reg = mips.REGTMP
- p3 := gc.Prog(mov)
+ p3 := s.Prog(mov)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2
- p4 := gc.Prog(mips.AADDVU)
+ p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1
- p5 := gc.Prog(mips.AADDVU)
+ p5 := s.Prog(mips.AADDVU)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2
- p6 := gc.Prog(mips.ABNE)
+ p6 := s.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
@@ -484,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.Call(v)
case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
- p := gc.Prog(mips.AMOVB)
+ p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
@@ -502,19 +502,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if v.Op == ssa.OpMIPS64FPFlagFalse {
branch = mips.ABFPT
}
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- p2 := gc.Prog(branch)
+ p2 := s.Prog(branch)
p2.To.Type = obj.TYPE_BRANCH
- p3 := gc.Prog(mips.AMOVV)
+ p3 := s.Prog(mips.AMOVV)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = 1
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg()
- p4 := gc.Prog(obj.ANOP) // not a machine instruction, for branch to land
+ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
gc.Patch(p2, p4)
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
@@ -541,7 +541,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
@@ -549,23 +549,23 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
// defer returns in R1:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(mips.ABNE)
+ p := s.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.ARET)
+ p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
@@ -577,18 +577,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}