aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/gc/ssa.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/gc/ssa.go')
-rw-r--r--src/cmd/compile/internal/gc/ssa.go3267
1 files changed, 1627 insertions, 1640 deletions
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 0b38e70cd2..cb73532b48 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -7,6 +7,7 @@ package gc
import (
"encoding/binary"
"fmt"
+ "go/constant"
"html"
"os"
"path/filepath"
@@ -14,6 +15,8 @@ import (
"bufio"
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@@ -37,7 +40,7 @@ const ssaDumpFile = "ssa.html"
const maxOpenDefers = 8
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
-var ssaDumpInlined []*Node
+var ssaDumpInlined []ir.Node
func initssaconfig() {
types_ := ssa.NewTypes()
@@ -48,21 +51,21 @@ func initssaconfig() {
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
- _ = types.NewPtr(types.Types[TINTER]) // *interface{}
- _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
- _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
- _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
- _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
- _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
- _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
- _ = types.NewPtr(types.Types[TINT16]) // *int16
- _ = types.NewPtr(types.Types[TINT64]) // *int64
- _ = types.NewPtr(types.Errortype) // *error
+ _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
+ _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
+ _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
+ _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
+ _ = types.NewPtr(types.Types[types.TINT16]) // *int16
+ _ = types.NewPtr(types.Types[types.TINT64]) // *int64
+ _ = types.NewPtr(types.Errortype) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
- ssaConfig.Race = flag_race
- ssaCaches = make([]ssa.Cache, nBackendWorkers)
+ ssaConfig.Race = base.Flag.Race
+ ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
// Set up some runtime functions we'll need to call.
assertE2I = sysfunc("assertE2I")
@@ -183,9 +186,9 @@ func initssaconfig() {
// function/method/interface call), where the receiver of a method call is
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
-func getParam(n *Node, i int) *types.Field {
- t := n.Left.Type
- if n.Op == OCALLMETH {
+func getParam(n ir.Node, i int) *types.Field {
+ t := n.Left().Type()
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
return t.Recv()
}
@@ -239,8 +242,8 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// - Size of the argument
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
- x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer")
- s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x
+ x := base.Ctxt.Lookup(s.curfn.Func().LSym.Name + ".opendefer")
+ s.curfn.Func().LSym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
@@ -248,20 +251,20 @@ func (s *state) emitOpenDeferInfo() {
var maxargsize int64
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- argsize := r.n.Left.Type.ArgWidth()
+ argsize := r.n.Left().Type().ArgWidth()
if argsize > maxargsize {
maxargsize = argsize
}
}
off = dvarint(x, off, maxargsize)
- off = dvarint(x, off, -s.deferBitsTemp.Xoffset)
+ off = dvarint(x, off, -s.deferBitsTemp.Offset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.Left.Type.ArgWidth())
- off = dvarint(x, off, -r.closureNode.Xoffset)
+ off = dvarint(x, off, r.n.Left().Type().ArgWidth())
+ off = dvarint(x, off, -r.closureNode.Offset())
numArgs := len(r.argNodes)
if r.rcvrNode != nil {
// If there's an interface receiver, treat/place it as the first
@@ -271,13 +274,13 @@ func (s *state) emitOpenDeferInfo() {
}
off = dvarint(x, off, int64(numArgs))
if r.rcvrNode != nil {
- off = dvarint(x, off, -r.rcvrNode.Xoffset)
+ off = dvarint(x, off, -r.rcvrNode.Offset())
off = dvarint(x, off, s.config.PtrSize)
off = dvarint(x, off, 0)
}
for j, arg := range r.argNodes {
f := getParam(r.n, j)
- off = dvarint(x, off, -arg.Xoffset)
+ off = dvarint(x, off, -arg.Offset())
off = dvarint(x, off, f.Type.Size())
off = dvarint(x, off, f.Offset)
}
@@ -286,18 +289,18 @@ func (s *state) emitOpenDeferInfo() {
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn *Node, worker int) *ssa.Func {
- name := fn.funcname()
+func buildssa(fn ir.Node, worker int) *ssa.Func {
+ name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
- printssa = name == ssaDump || myimportpath+"."+name == ssaDump
+ printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
- fdumplist(astBuf, "buildssa-enter", fn.Func.Enter)
- fdumplist(astBuf, "buildssa-body", fn.Nbody)
- fdumplist(astBuf, "buildssa-exit", fn.Func.Exit)
+ ir.FDumpList(astBuf, "buildssa-enter", fn.Func().Enter)
+ ir.FDumpList(astBuf, "buildssa-body", fn.Body())
+ ir.FDumpList(astBuf, "buildssa-exit", fn.Func().Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
@@ -305,11 +308,11 @@ func buildssa(fn *Node, worker int) *ssa.Func {
}
var s state
- s.pushLine(fn.Pos)
+ s.pushLine(fn.Pos())
defer s.popLine()
- s.hasdefer = fn.Func.HasDefer()
- if fn.Func.Pragma&CgoUnsafeArgs != 0 {
+ s.hasdefer = fn.Func().HasDefer()
+ if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
@@ -321,14 +324,14 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
- s.f.Type = fn.Type
+ s.f.Type = fn.Type()
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
- if fn.Func.Pragma&Nosplit != 0 {
+ if fn.Func().Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.panics = map[funcLine]*ssa.Block{}
@@ -336,12 +339,12 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
- s.f.Entry.Pos = fn.Pos
+ s.f.Entry.Pos = fn.Pos()
if printssa {
ssaDF := ssaDumpFile
if ssaDir != "" {
- ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html")
+ ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
ssaD := filepath.Dir(ssaDF)
os.MkdirAll(ssaD, 0755)
}
@@ -353,20 +356,20 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// Allocate starting values
s.labels = map[string]*ssaLabel{}
- s.labeledNodes = map[*Node]*ssaLabel{}
- s.fwdVars = map[*Node]*ssa.Value{}
+ s.labeledNodes = map[ir.Node]*ssaLabel{}
+ s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
- s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
+ s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed()
switch {
- case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
+ case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that is generated by gencallret()
// that we don't track correctly.
s.hasOpenDefers = false
}
- if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 {
+ if s.hasOpenDefers && s.curfn.Func().Exit.Len() > 0 {
// Skip doing open defers if there is any extra exit code (likely
// copying heap-allocated return values or race detection), since
// we will not generate that code in the case of the extra
@@ -374,7 +377,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.hasOpenDefers = false
}
if s.hasOpenDefers &&
- s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 {
+ s.curfn.Func().NumReturns*s.curfn.Func().NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
@@ -383,54 +386,54 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.hasOpenDefers = false
}
- s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
- s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
+ s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
s.startBlock(s.f.Entry)
- s.vars[&memVar] = s.startmem
+ s.vars[memVar] = s.startmem
if s.hasOpenDefers {
// Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function
// have been activated.
- deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8])
+ deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default
- startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8])
- s.vars[&deferBitsVar] = startDeferBits
+ startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
+ s.vars[deferBitsVar] = startDeferBits
s.deferBitsAddr = s.addr(deferBitsTemp)
- s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
// Make sure that the deferBits stack slot is kept alive (for use
// by panics) and stores to deferBits are not eliminated, even if
// all checking code on deferBits in the function exit can be
// eliminated, because the defer statements were all
// unconditional.
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
}
// Generate addresses of local declarations
- s.decladdrs = map[*Node]*ssa.Value{}
+ s.decladdrs = map[ir.Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Func().Dcl {
switch n.Class() {
- case PPARAM:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
- case PPARAMOUT:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
+ case ir.PPARAM:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())})
+ case ir.PPARAMOUT:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())})
if s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
- case PAUTO:
+ case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
- case PAUTOHEAP:
+ case ir.PAUTOHEAP:
// moved to heap - already handled by frontend
- case PFUNC:
+ case ir.PFUNC:
// local function - already handled by frontend
default:
s.Fatalf("local variable with class %v unimplemented", n.Class())
@@ -438,21 +441,21 @@ func buildssa(fn *Node, worker int) *ssa.Func {
}
// Populate SSAable arguments.
- for _, n := range fn.Func.Dcl {
- if n.Class() == PPARAM && s.canSSA(n) {
- v := s.newValue0A(ssa.OpArg, n.Type, n)
+ for _, n := range fn.Func().Dcl {
+ if n.Class() == ir.PPARAM && s.canSSA(n) {
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
}
}
// Convert the AST-based IR to the SSA-based IR
- s.stmtList(fn.Func.Enter)
- s.stmtList(fn.Nbody)
+ s.stmtList(fn.Func().Enter)
+ s.stmtList(fn.Body())
// fallthrough to exit
if s.curBlock != nil {
- s.pushLine(fn.Func.Endlineno)
+ s.pushLine(fn.Func().Endlineno)
s.exit()
s.popLine()
}
@@ -475,10 +478,10 @@ func buildssa(fn *Node, worker int) *ssa.Func {
return s.f
}
-func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) {
// Read sources of target function fn.
- fname := Ctxt.PosTable.Pos(fn.Pos).Filename()
- targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line())
+ fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
+ targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
@@ -487,14 +490,14 @@ func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
var elno src.XPos
- if fi.Name.Defn == nil {
+ if fi.Name().Defn == nil {
// Endlineno is filled from exported data.
- elno = fi.Func.Endlineno
+ elno = fi.Func().Endlineno
} else {
- elno = fi.Name.Defn.Func.Endlineno
+ elno = fi.Name().Defn.Func().Endlineno
}
- fname := Ctxt.PosTable.Pos(fi.Pos).Filename()
- fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line())
+ fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
+ fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
continue
@@ -563,24 +566,24 @@ func (s *state) updateUnsetPredPos(b *ssa.Block) {
// Information about each open-coded defer.
type openDeferInfo struct {
// The ODEFER node representing the function call of the defer
- n *Node
+ n ir.Node
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
- closureNode *Node
+ closureNode ir.Node
// If defer call is interface call, the address of the argtmp where the
// receiver is stored
rcvr *ssa.Value
// The node representing the argtmp where the receiver is stored
- rcvrNode *Node
+ rcvrNode ir.Node
// The addresses of the argtmps where the evaluated arguments of the defer
// function call are stored.
argVals []*ssa.Value
// The nodes representing the argtmps where the args of the defer are stored
- argNodes []*Node
+ argNodes []ir.Node
}
type state struct {
@@ -591,11 +594,11 @@ type state struct {
f *ssa.Func
// Node for function
- curfn *Node
+ curfn ir.Node
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
- labeledNodes map[*Node]*ssaLabel
+ labeledNodes map[ir.Node]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
@@ -607,18 +610,18 @@ type state struct {
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
- vars map[*Node]*ssa.Value
+ vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
- fwdVars map[*Node]*ssa.Value
+ fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
- defvars []map[*Node]*ssa.Value
+ defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
- decladdrs map[*Node]*ssa.Value
+ decladdrs map[ir.Node]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
@@ -626,7 +629,7 @@ type state struct {
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
- deferBitsTemp *Node
+ deferBitsTemp ir.Node
// line number stack. The current line number is top of stack
line []src.XPos
@@ -638,7 +641,7 @@ type state struct {
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
- returns []*Node
+ returns []ir.Node
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
@@ -690,18 +693,22 @@ func (s *state) Fatalf(msg string, args ...interface{}) {
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
+func ssaMarker(name string) ir.Node {
+ return NewName(&types.Sym{Name: name})
+}
+
var (
- // dummy node for the memory variable
- memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
-
- // dummy nodes for temporary variables
- ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
- lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
- newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
- capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
- typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
- okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
- deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}}
+ // marker node for the memory variable
+ memVar = ssaMarker("mem")
+
+ // marker nodes for temporary variables
+ ptrVar = ssaMarker("ptr")
+ lenVar = ssaMarker("len")
+ newlenVar = ssaMarker("newlen")
+ capVar = ssaMarker("cap")
+ typVar = ssaMarker("typ")
+ okVar = ssaMarker("ok")
+ deferBitsVar = ssaMarker("deferBits")
)
// startBlock sets the current block we're generating code in to b.
@@ -710,7 +717,7 @@ func (s *state) startBlock(b *ssa.Block) {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
- s.vars = map[*Node]*ssa.Value{}
+ s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
@@ -747,8 +754,8 @@ func (s *state) pushLine(line src.XPos) {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
- if Debug.K != 0 {
- Warn("buildssa: unknown position (line 0)")
+ if base.Flag.K != 0 {
+ base.Warn("buildssa: unknown position (line 0)")
}
} else {
s.lastPos = line
@@ -914,7 +921,7 @@ func (s *state) constEmptyString(t *types.Type) *ssa.Value {
return s.f.ConstEmptyString(t)
}
func (s *state) constBool(c bool) *ssa.Value {
- return s.f.ConstBool(types.Types[TBOOL], c)
+ return s.f.ConstBool(types.Types[types.TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(t, c)
@@ -967,7 +974,7 @@ func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Valu
}
func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
- if !s.curfn.Func.InstrumentBody() {
+ if !s.curfn.Func().InstrumentBody() {
return
}
@@ -983,13 +990,13 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
var fn *obj.LSym
needWidth := false
- if flag_msan {
+ if base.Flag.MSan {
fn = msanread
if wr {
fn = msanwrite
}
needWidth = true
- } else if flag_race && t.NumComponents(types.CountBlankFields) > 1 {
+ } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
// for composite objects we have to write every address
// because a write might happen to any subobject.
// composites with only one element don't have subobjects, though.
@@ -998,7 +1005,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
fn = racewriterange
}
needWidth = true
- } else if flag_race {
+ } else if base.Flag.Race {
// for non-composite objects we can write just the start
// address, as any write must write the first byte.
fn = raceread
@@ -1011,7 +1018,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
args := []*ssa.Value{addr}
if needWidth {
- args = append(args, s.constInt(types.Types[TUINTPTR], w))
+ args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
}
s.rtcall(fn, true, nil, args...)
}
@@ -1026,14 +1033,14 @@ func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
}
func (s *state) store(t *types.Type, dst, val *ssa.Value) {
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
+ s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
}
func (s *state) zero(t *types.Type, dst *ssa.Value) {
s.instrument(t, dst, true)
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
store.Aux = t
- s.vars[&memVar] = store
+ s.vars[memVar] = store
}
func (s *state) move(t *types.Type, dst, src *ssa.Value) {
@@ -1041,52 +1048,52 @@ func (s *state) move(t *types.Type, dst, src *ssa.Value) {
s.instrument(t, dst, true)
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t
- s.vars[&memVar] = store
+ s.vars[memVar] = store
}
// stmtList converts the statement list n to SSA and adds it to s.
-func (s *state) stmtList(l Nodes) {
+func (s *state) stmtList(l ir.Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
-func (s *state) stmt(n *Node) {
- if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
+func (s *state) stmt(n ir.Node) {
+ if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
- if s.curBlock == nil && n.Op != OLABEL {
+ if s.curBlock == nil && n.Op() != ir.OLABEL {
return
}
- s.stmtList(n.Ninit)
- switch n.Op {
+ s.stmtList(n.Init())
+ switch n.Op() {
- case OBLOCK:
- s.stmtList(n.List)
+ case ir.OBLOCK:
+ s.stmtList(n.List())
// No-ops
- case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
+ case ir.OEMPTY, ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
// Expression statements
- case OCALLFUNC:
+ case ir.OCALLFUNC:
if isIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
- case OCALLMETH, OCALLINTER:
+ case ir.OCALLMETH, ir.OCALLINTER:
s.callResult(n, callNormal)
- if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
- if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
- n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC {
+ if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
@@ -1096,34 +1103,34 @@ func (s *state) stmt(n *Node) {
// go through SSA.
}
}
- case ODEFER:
- if Debug_defer > 0 {
+ case ir.ODEFER:
+ if base.Debug.Defer > 0 {
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
- } else if n.Esc == EscNever {
+ } else if n.Esc() == EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
}
- Warnl(n.Pos, "%s defer", defertype)
+ base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
- s.openDeferRecord(n.Left)
+ s.openDeferRecord(n.Left())
} else {
d := callDefer
- if n.Esc == EscNever {
+ if n.Esc() == EscNever {
d = callDeferStack
}
- s.callResult(n.Left, d)
+ s.callResult(n.Left(), d)
}
- case OGO:
- s.callResult(n.Left, callGo)
+ case ir.OGO:
+ s.callResult(n.Left(), callGo)
- case OAS2DOTTYPE:
- res, resok := s.dottype(n.Right, true)
+ case ir.OAS2DOTTYPE:
+ res, resok := s.dottype(n.Right(), true)
deref := false
- if !canSSAType(n.Right.Type) {
+ if !canSSAType(n.Right().Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
@@ -1137,33 +1144,33 @@ func (s *state) stmt(n *Node) {
deref = true
res = res.Args[0]
}
- s.assign(n.List.First(), res, deref, 0)
- s.assign(n.List.Second(), resok, false, 0)
+ s.assign(n.List().First(), res, deref, 0)
+ s.assign(n.List().Second(), resok, false, 0)
return
- case OAS2FUNC:
+ case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
- if !isIntrinsicCall(n.Right) {
- s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right)
- }
- v := s.intrinsicCall(n.Right)
- v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
- v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
- s.assign(n.List.First(), v1, false, 0)
- s.assign(n.List.Second(), v2, false, 0)
+ if !isIntrinsicCall(n.Right()) {
+ s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right())
+ }
+ v := s.intrinsicCall(n.Right())
+ v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v)
+ v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v)
+ s.assign(n.List().First(), v1, false, 0)
+ s.assign(n.List().Second(), v2, false, 0)
return
- case ODCL:
- if n.Left.Class() == PAUTOHEAP {
+ case ir.ODCL:
+ if n.Left().Class() == ir.PAUTOHEAP {
s.Fatalf("DCL %v", n)
}
- case OLABEL:
- sym := n.Sym
+ case ir.OLABEL:
+ sym := n.Sym()
lab := s.label(sym)
// Associate label with its control flow node, if any
- if ctl := n.labeledControl(); ctl != nil {
+ if ctl := labeledControl(n); ctl != nil {
s.labeledNodes[ctl] = lab
}
@@ -1180,8 +1187,8 @@ func (s *state) stmt(n *Node) {
}
s.startBlock(lab.target)
- case OGOTO:
- sym := n.Sym
+ case ir.OGOTO:
+ sym := n.Sym()
lab := s.label(sym)
if lab.target == nil {
@@ -1192,8 +1199,8 @@ func (s *state) stmt(n *Node) {
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(lab.target)
- case OAS:
- if n.Left == n.Right && n.Left.Op == ONAME {
+ case ir.OAS:
+ if n.Left() == n.Right() && n.Left().Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
@@ -1205,10 +1212,10 @@ func (s *state) stmt(n *Node) {
}
// Evaluate RHS.
- rhs := n.Right
+ rhs := n.Right()
if rhs != nil {
- switch rhs.Op {
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
+ switch rhs.Op() {
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
@@ -1216,31 +1223,31 @@ func (s *state) stmt(n *Node) {
s.Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
- case OAPPEND:
+ case ir.OAPPEND:
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
- if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
+ if !samesafeexpr(n.Left(), rhs.List().First()) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
- if s.canSSA(n.Left) {
- if Debug_append > 0 { // replicating old diagnostic message
- Warnl(n.Pos, "append: len-only update (in local slice)")
+ if s.canSSA(n.Left()) {
+ if base.Debug.Append > 0 { // replicating old diagnostic message
+ base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
break
}
- if Debug_append > 0 {
- Warnl(n.Pos, "append: len-only update")
+ if base.Debug.Append > 0 {
+ base.WarnfAt(n.Pos(), "append: len-only update")
}
s.append(rhs, true)
return
}
}
- if n.Left.isBlank() {
+ if ir.IsBlank(n.Left()) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
@@ -1250,10 +1257,10 @@ func (s *state) stmt(n *Node) {
}
var t *types.Type
- if n.Right != nil {
- t = n.Right.Type
+ if n.Right() != nil {
+ t = n.Right().Type()
} else {
- t = n.Left.Type
+ t = n.Left().Type()
}
var r *ssa.Value
@@ -1273,11 +1280,11 @@ func (s *state) stmt(n *Node) {
}
var skip skipMask
- if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
+ if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.Left(), n.Left()) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
- if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) {
+ if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) {
// [0:...] is the same as [:...]
i = nil
}
@@ -1302,15 +1309,15 @@ func (s *state) stmt(n *Node) {
}
}
- s.assign(n.Left, r, deref, skip)
+ s.assign(n.Left(), r, deref, skip)
- case OIF:
- if Isconst(n.Left, CTBOOL) {
- s.stmtList(n.Left.Ninit)
- if n.Left.BoolVal() {
- s.stmtList(n.Nbody)
+ case ir.OIF:
+ if ir.IsConst(n.Left(), constant.Bool) {
+ s.stmtList(n.Left().Init())
+ if n.Left().BoolVal() {
+ s.stmtList(n.Body())
} else {
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
}
break
}
@@ -1321,64 +1328,64 @@ func (s *state) stmt(n *Node) {
likely = 1
}
var bThen *ssa.Block
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
- s.condBranch(n.Left, bThen, bElse, likely)
+ s.condBranch(n.Left(), bThen, bElse, likely)
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
s.startBlock(bThen)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
s.startBlock(bElse)
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
- case ORETURN:
- s.stmtList(n.List)
+ case ir.ORETURN:
+ s.stmtList(n.List())
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
- case ORETJMP:
- s.stmtList(n.List)
+ case ir.ORETJMP:
+ s.stmtList(n.List())
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = n.Sym.Linksym()
+ b.Aux = n.Sym().Linksym()
- case OCONTINUE, OBREAK:
+ case ir.OCONTINUE, ir.OBREAK:
var to *ssa.Block
- if n.Sym == nil {
+ if n.Sym() == nil {
// plain break/continue
- switch n.Op {
- case OCONTINUE:
+ switch n.Op() {
+ case ir.OCONTINUE:
to = s.continueTo
- case OBREAK:
+ case ir.OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
- sym := n.Sym
+ sym := n.Sym()
lab := s.label(sym)
- switch n.Op {
- case OCONTINUE:
+ switch n.Op() {
+ case ir.OCONTINUE:
to = lab.continueTarget
- case OBREAK:
+ case ir.OBREAK:
to = lab.breakTarget
}
}
@@ -1387,7 +1394,7 @@ func (s *state) stmt(n *Node) {
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(to)
- case OFOR, OFORUNTIL:
+ case ir.OFOR, ir.OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// cond (Left); body (Nbody); incr (Right)
//
@@ -1399,16 +1406,16 @@ func (s *state) stmt(n *Node) {
bEnd := s.f.NewBlock(ssa.BlockPlain)
// ensure empty for loops have correct position; issue #30167
- bBody.Pos = n.Pos
+ bBody.Pos = n.Pos()
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
- if n.Op == OFOR {
+ if n.Op() == ir.OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
- if n.Left != nil {
- s.condBranch(n.Left, bBody, bEnd, 1)
+ if n.Left() != nil {
+ s.condBranch(n.Left(), bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
@@ -1433,7 +1440,7 @@ func (s *state) stmt(n *Node) {
// generate body
s.startBlock(bBody)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// tear down continue/break
s.continueTo = prevContinue
@@ -1450,15 +1457,15 @@ func (s *state) stmt(n *Node) {
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
- if n.Right != nil {
- s.stmt(n.Right)
+ if n.Right() != nil {
+ s.stmt(n.Right())
}
- if n.Op == OFOR {
+ if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
// It can happen that bIncr ends in a block containing only VARKILL,
// and that muddles the debugging experience.
- if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
+ if n.Op() != ir.OFORUNTIL && b.Pos == src.NoXPos {
b.Pos = bCond.Pos
}
}
@@ -1466,16 +1473,16 @@ func (s *state) stmt(n *Node) {
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
- s.condBranch(n.Left, bLateIncr, bEnd, 1)
+ s.condBranch(n.Left(), bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
- s.stmtList(n.List)
+ s.stmtList(n.List())
s.endBlock().AddEdgeTo(bBody)
}
s.startBlock(bEnd)
- case OSWITCH, OSELECT:
+ case ir.OSWITCH, ir.OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
@@ -1489,7 +1496,7 @@ func (s *state) stmt(n *Node) {
}
// generate body code
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
s.breakTo = prevBreak
if lab != nil {
@@ -1506,40 +1513,40 @@ func (s *state) stmt(n *Node) {
}
s.startBlock(bEnd)
- case OVARDEF:
- if !s.canSSA(n.Left) {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
+ case ir.OVARDEF:
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left(), s.mem(), false)
}
- case OVARKILL:
+ case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
- if !s.canSSA(n.Left) {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left(), s.mem(), false)
}
- case OVARLIVE:
+ case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
- if !n.Left.Name.Addrtaken() {
- s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
+ if !n.Left().Name().Addrtaken() {
+ s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left())
}
- switch n.Left.Class() {
- case PAUTO, PPARAM, PPARAMOUT:
+ switch n.Left().Class() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
- s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
+ s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left())
}
- s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left(), s.mem())
- case OCHECKNIL:
- p := s.expr(n.Left)
+ case ir.OCHECKNIL:
+ p := s.expr(n.Left())
s.nilCheck(p)
- case OINLMARK:
- s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem())
+ case ir.OINLMARK:
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem())
default:
- s.Fatalf("unhandled stmt %v", n.Op)
+ s.Fatalf("unhandled stmt %v", n.Op())
}
}
@@ -1569,14 +1576,14 @@ func (s *state) exit() *ssa.Block {
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
- s.stmtList(s.curfn.Func.Exit)
+ s.stmtList(s.curfn.Func().Exit)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
- val := s.variable(n, n.Type)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
- s.store(n.Type, addr, val)
+ val := s.variable(n, n.Type())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.store(n.Type(), addr, val)
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
@@ -1594,180 +1601,180 @@ func (s *state) exit() *ssa.Block {
}
type opAndType struct {
- op Op
+ op ir.Op
etype types.EType
}
var opToSSA = map[opAndType]ssa.Op{
- opAndType{OADD, TINT8}: ssa.OpAdd8,
- opAndType{OADD, TUINT8}: ssa.OpAdd8,
- opAndType{OADD, TINT16}: ssa.OpAdd16,
- opAndType{OADD, TUINT16}: ssa.OpAdd16,
- opAndType{OADD, TINT32}: ssa.OpAdd32,
- opAndType{OADD, TUINT32}: ssa.OpAdd32,
- opAndType{OADD, TINT64}: ssa.OpAdd64,
- opAndType{OADD, TUINT64}: ssa.OpAdd64,
- opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
- opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
-
- opAndType{OSUB, TINT8}: ssa.OpSub8,
- opAndType{OSUB, TUINT8}: ssa.OpSub8,
- opAndType{OSUB, TINT16}: ssa.OpSub16,
- opAndType{OSUB, TUINT16}: ssa.OpSub16,
- opAndType{OSUB, TINT32}: ssa.OpSub32,
- opAndType{OSUB, TUINT32}: ssa.OpSub32,
- opAndType{OSUB, TINT64}: ssa.OpSub64,
- opAndType{OSUB, TUINT64}: ssa.OpSub64,
- opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
- opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
-
- opAndType{ONOT, TBOOL}: ssa.OpNot,
-
- opAndType{ONEG, TINT8}: ssa.OpNeg8,
- opAndType{ONEG, TUINT8}: ssa.OpNeg8,
- opAndType{ONEG, TINT16}: ssa.OpNeg16,
- opAndType{ONEG, TUINT16}: ssa.OpNeg16,
- opAndType{ONEG, TINT32}: ssa.OpNeg32,
- opAndType{ONEG, TUINT32}: ssa.OpNeg32,
- opAndType{ONEG, TINT64}: ssa.OpNeg64,
- opAndType{ONEG, TUINT64}: ssa.OpNeg64,
- opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F,
- opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F,
-
- opAndType{OBITNOT, TINT8}: ssa.OpCom8,
- opAndType{OBITNOT, TUINT8}: ssa.OpCom8,
- opAndType{OBITNOT, TINT16}: ssa.OpCom16,
- opAndType{OBITNOT, TUINT16}: ssa.OpCom16,
- opAndType{OBITNOT, TINT32}: ssa.OpCom32,
- opAndType{OBITNOT, TUINT32}: ssa.OpCom32,
- opAndType{OBITNOT, TINT64}: ssa.OpCom64,
- opAndType{OBITNOT, TUINT64}: ssa.OpCom64,
-
- opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
- opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
- opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
- opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
-
- opAndType{OMUL, TINT8}: ssa.OpMul8,
- opAndType{OMUL, TUINT8}: ssa.OpMul8,
- opAndType{OMUL, TINT16}: ssa.OpMul16,
- opAndType{OMUL, TUINT16}: ssa.OpMul16,
- opAndType{OMUL, TINT32}: ssa.OpMul32,
- opAndType{OMUL, TUINT32}: ssa.OpMul32,
- opAndType{OMUL, TINT64}: ssa.OpMul64,
- opAndType{OMUL, TUINT64}: ssa.OpMul64,
- opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
- opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
-
- opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
- opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
-
- opAndType{ODIV, TINT8}: ssa.OpDiv8,
- opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
- opAndType{ODIV, TINT16}: ssa.OpDiv16,
- opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
- opAndType{ODIV, TINT32}: ssa.OpDiv32,
- opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
- opAndType{ODIV, TINT64}: ssa.OpDiv64,
- opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
-
- opAndType{OMOD, TINT8}: ssa.OpMod8,
- opAndType{OMOD, TUINT8}: ssa.OpMod8u,
- opAndType{OMOD, TINT16}: ssa.OpMod16,
- opAndType{OMOD, TUINT16}: ssa.OpMod16u,
- opAndType{OMOD, TINT32}: ssa.OpMod32,
- opAndType{OMOD, TUINT32}: ssa.OpMod32u,
- opAndType{OMOD, TINT64}: ssa.OpMod64,
- opAndType{OMOD, TUINT64}: ssa.OpMod64u,
-
- opAndType{OAND, TINT8}: ssa.OpAnd8,
- opAndType{OAND, TUINT8}: ssa.OpAnd8,
- opAndType{OAND, TINT16}: ssa.OpAnd16,
- opAndType{OAND, TUINT16}: ssa.OpAnd16,
- opAndType{OAND, TINT32}: ssa.OpAnd32,
- opAndType{OAND, TUINT32}: ssa.OpAnd32,
- opAndType{OAND, TINT64}: ssa.OpAnd64,
- opAndType{OAND, TUINT64}: ssa.OpAnd64,
-
- opAndType{OOR, TINT8}: ssa.OpOr8,
- opAndType{OOR, TUINT8}: ssa.OpOr8,
- opAndType{OOR, TINT16}: ssa.OpOr16,
- opAndType{OOR, TUINT16}: ssa.OpOr16,
- opAndType{OOR, TINT32}: ssa.OpOr32,
- opAndType{OOR, TUINT32}: ssa.OpOr32,
- opAndType{OOR, TINT64}: ssa.OpOr64,
- opAndType{OOR, TUINT64}: ssa.OpOr64,
-
- opAndType{OXOR, TINT8}: ssa.OpXor8,
- opAndType{OXOR, TUINT8}: ssa.OpXor8,
- opAndType{OXOR, TINT16}: ssa.OpXor16,
- opAndType{OXOR, TUINT16}: ssa.OpXor16,
- opAndType{OXOR, TINT32}: ssa.OpXor32,
- opAndType{OXOR, TUINT32}: ssa.OpXor32,
- opAndType{OXOR, TINT64}: ssa.OpXor64,
- opAndType{OXOR, TUINT64}: ssa.OpXor64,
-
- opAndType{OEQ, TBOOL}: ssa.OpEqB,
- opAndType{OEQ, TINT8}: ssa.OpEq8,
- opAndType{OEQ, TUINT8}: ssa.OpEq8,
- opAndType{OEQ, TINT16}: ssa.OpEq16,
- opAndType{OEQ, TUINT16}: ssa.OpEq16,
- opAndType{OEQ, TINT32}: ssa.OpEq32,
- opAndType{OEQ, TUINT32}: ssa.OpEq32,
- opAndType{OEQ, TINT64}: ssa.OpEq64,
- opAndType{OEQ, TUINT64}: ssa.OpEq64,
- opAndType{OEQ, TINTER}: ssa.OpEqInter,
- opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
- opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
- opAndType{OEQ, TMAP}: ssa.OpEqPtr,
- opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
- opAndType{OEQ, TPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
- opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
-
- opAndType{ONE, TBOOL}: ssa.OpNeqB,
- opAndType{ONE, TINT8}: ssa.OpNeq8,
- opAndType{ONE, TUINT8}: ssa.OpNeq8,
- opAndType{ONE, TINT16}: ssa.OpNeq16,
- opAndType{ONE, TUINT16}: ssa.OpNeq16,
- opAndType{ONE, TINT32}: ssa.OpNeq32,
- opAndType{ONE, TUINT32}: ssa.OpNeq32,
- opAndType{ONE, TINT64}: ssa.OpNeq64,
- opAndType{ONE, TUINT64}: ssa.OpNeq64,
- opAndType{ONE, TINTER}: ssa.OpNeqInter,
- opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
- opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
- opAndType{ONE, TMAP}: ssa.OpNeqPtr,
- opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
- opAndType{ONE, TPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
- opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
-
- opAndType{OLT, TINT8}: ssa.OpLess8,
- opAndType{OLT, TUINT8}: ssa.OpLess8U,
- opAndType{OLT, TINT16}: ssa.OpLess16,
- opAndType{OLT, TUINT16}: ssa.OpLess16U,
- opAndType{OLT, TINT32}: ssa.OpLess32,
- opAndType{OLT, TUINT32}: ssa.OpLess32U,
- opAndType{OLT, TINT64}: ssa.OpLess64,
- opAndType{OLT, TUINT64}: ssa.OpLess64U,
- opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
- opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
-
- opAndType{OLE, TINT8}: ssa.OpLeq8,
- opAndType{OLE, TUINT8}: ssa.OpLeq8U,
- opAndType{OLE, TINT16}: ssa.OpLeq16,
- opAndType{OLE, TUINT16}: ssa.OpLeq16U,
- opAndType{OLE, TINT32}: ssa.OpLeq32,
- opAndType{OLE, TUINT32}: ssa.OpLeq32U,
- opAndType{OLE, TINT64}: ssa.OpLeq64,
- opAndType{OLE, TUINT64}: ssa.OpLeq64U,
- opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
- opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
+ opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
+ opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
+
+ opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
+ opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
+
+ opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
+
+ opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
+ opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
+
+ opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
+ opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
+
+ opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
+ opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
+ opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
+ opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
+
+ opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
+ opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
+
+ opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
+ opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
+
+ opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
+ opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
+ opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
+ opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
+ opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
+ opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
+ opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
+ opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
+
+ opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
+ opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
+ opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
+ opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
+ opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
+ opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
+ opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
+ opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
+
+ opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
+ opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
+
+ opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
+ opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
+
+ opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
+ opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
+
+ opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
+ opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
+ opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
+ opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
+ opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
+
+ opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
+ opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
+ opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
+ opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
+ opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
+
+ opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
+ opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
+ opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
+ opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
+ opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
+ opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
+ opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
+ opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
+ opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
+ opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
+
+ opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
+ opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
+ opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
+ opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
+ opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
+ opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
+ opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
+ opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
+ opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
+ opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
}
func (s *state) concreteEtype(t *types.Type) types.EType {
@@ -1775,25 +1782,25 @@ func (s *state) concreteEtype(t *types.Type) types.EType {
switch e {
default:
return e
- case TINT:
+ case types.TINT:
if s.config.PtrSize == 8 {
- return TINT64
+ return types.TINT64
}
- return TINT32
- case TUINT:
+ return types.TINT32
+ case types.TUINT:
if s.config.PtrSize == 8 {
- return TUINT64
+ return types.TUINT64
}
- return TUINT32
- case TUINTPTR:
+ return types.TUINT32
+ case types.TUINTPTR:
if s.config.PtrSize == 8 {
- return TUINT64
+ return types.TUINT64
}
- return TUINT32
+ return types.TUINT32
}
}
-func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
+func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
@@ -1804,28 +1811,28 @@ func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
func floatForComplex(t *types.Type) *types.Type {
switch t.Etype {
- case TCOMPLEX64:
- return types.Types[TFLOAT32]
- case TCOMPLEX128:
- return types.Types[TFLOAT64]
+ case types.TCOMPLEX64:
+ return types.Types[types.TFLOAT32]
+ case types.TCOMPLEX128:
+ return types.Types[types.TFLOAT64]
}
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
return nil
}
func complexForFloat(t *types.Type) *types.Type {
switch t.Etype {
- case TFLOAT32:
- return types.Types[TCOMPLEX64]
- case TFLOAT64:
- return types.Types[TCOMPLEX128]
+ case types.TFLOAT32:
+ return types.Types[types.TCOMPLEX64]
+ case types.TFLOAT64:
+ return types.Types[types.TCOMPLEX128]
}
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
return nil
}
type opAndTwoTypes struct {
- op Op
+ op ir.Op
etype1 types.EType
etype2 types.EType
}
@@ -1843,145 +1850,145 @@ type twoOpsAndType struct {
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
- twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
-
- twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
-
- twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
- twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
-
- twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
- twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
+ twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
+
+ twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
+
+ twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
+
+ twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
// unsigned
- twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
- twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
-
- twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
- twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
-
- twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
- twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
-
- twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
- twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
+ twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
+
+ twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
+
+ twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
+
+ twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
- twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
- twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
- twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
- twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
+ twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
+ twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
- twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
- twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
- twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
- twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
}
// uint64<->float conversions, only on machines that have instructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
- twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
- twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
- twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
- twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
- opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
- opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
- opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
- opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
- opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
- opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
- opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
- opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
-
- opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
- opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
- opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
- opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
- opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
- opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
- opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
- opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
-
- opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
- opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
- opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
- opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
- opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
- opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
- opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
- opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
-
- opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
- opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
- opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
- opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
- opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
- opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
- opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
- opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
-
- opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
- opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
- opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
- opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
- opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
- opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
- opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
- opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
-
- opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
- opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
- opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
- opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
- opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
- opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
- opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
- opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
-
- opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
- opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
- opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
- opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
- opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
- opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
- opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
- opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
-
- opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
- opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
- opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
- opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
- opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
- opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
- opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
- opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
-}
-
-func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
@@ -1992,117 +1999,121 @@ func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
-func (s *state) expr(n *Node) *ssa.Value {
- if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
+func (s *state) expr(n ir.Node) *ssa.Value {
+ if hasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
- s.stmtList(n.Ninit)
- switch n.Op {
- case OBYTES2STRTMP:
- slice := s.expr(n.Left)
+ s.stmtList(n.Init())
+ switch n.Op() {
+ case ir.OBYTES2STRTMP:
+ slice := s.expr(n.Left())
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
- return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
- case OSTR2BYTESTMP:
- str := s.expr(n.Left)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
+ case ir.OSTR2BYTESTMP:
+ str := s.expr(n.Left())
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
- return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
- case OCFUNC:
- aux := n.Left.Sym.Linksym()
- return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
- case ONAME:
- if n.Class() == PFUNC {
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
+ case ir.OCFUNC:
+ aux := n.Left().Sym().Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
+ case ir.OMETHEXPR:
+ sym := funcsym(n.Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC {
// "value" of a function is the address of the function's closure
- sym := funcsym(n.Sym).Linksym()
- return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
+ sym := funcsym(n.Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
addr := s.addr(n)
- return s.load(n.Type, addr)
- case OCLOSUREVAR:
+ return s.load(n.Type(), addr)
+ case ir.OCLOSUREVAR:
addr := s.addr(n)
- return s.load(n.Type, addr)
- case OLITERAL:
- switch u := n.Val().U.(type) {
- case *Mpint:
- i := u.Int64()
- switch n.Type.Size() {
+ return s.load(n.Type(), addr)
+ case ir.ONIL:
+ t := n.Type()
+ switch {
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsInterface():
+ return s.constInterface(t)
+ default:
+ return s.constNil(t)
+ }
+ case ir.OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.Int:
+ i := ir.Int64Val(n.Type(), u)
+ switch n.Type().Size() {
case 1:
- return s.constInt8(n.Type, int8(i))
+ return s.constInt8(n.Type(), int8(i))
case 2:
- return s.constInt16(n.Type, int16(i))
+ return s.constInt16(n.Type(), int16(i))
case 4:
- return s.constInt32(n.Type, int32(i))
+ return s.constInt32(n.Type(), int32(i))
case 8:
- return s.constInt64(n.Type, i)
+ return s.constInt64(n.Type(), i)
default:
- s.Fatalf("bad integer size %d", n.Type.Size())
+ s.Fatalf("bad integer size %d", n.Type().Size())
return nil
}
- case string:
- if u == "" {
- return s.constEmptyString(n.Type)
+ case constant.String:
+ i := constant.StringVal(u)
+ if i == "" {
+ return s.constEmptyString(n.Type())
}
- return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
- case bool:
- return s.constBool(u)
- case *NilVal:
- t := n.Type
- switch {
- case t.IsSlice():
- return s.constSlice(t)
- case t.IsInterface():
- return s.constInterface(t)
- default:
- return s.constNil(t)
- }
- case *Mpflt:
- switch n.Type.Size() {
+ return s.entryNewValue0A(ssa.OpConstString, n.Type(), i)
+ case constant.Bool:
+ return s.constBool(constant.BoolVal(u))
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch n.Type().Size() {
case 4:
- return s.constFloat32(n.Type, u.Float32())
+ return s.constFloat32(n.Type(), f)
case 8:
- return s.constFloat64(n.Type, u.Float64())
+ return s.constFloat64(n.Type(), f)
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad float size %d", n.Type().Size())
return nil
}
- case *Mpcplx:
- r := &u.Real
- i := &u.Imag
- switch n.Type.Size() {
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch n.Type().Size() {
case 8:
- pt := types.Types[TFLOAT32]
- return s.newValue2(ssa.OpComplexMake, n.Type,
- s.constFloat32(pt, r.Float32()),
- s.constFloat32(pt, i.Float32()))
+ pt := types.Types[types.TFLOAT32]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat32(pt, re),
+ s.constFloat32(pt, im))
case 16:
- pt := types.Types[TFLOAT64]
- return s.newValue2(ssa.OpComplexMake, n.Type,
- s.constFloat64(pt, r.Float64()),
- s.constFloat64(pt, i.Float64()))
+ pt := types.Types[types.TFLOAT64]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat64(pt, re),
+ s.constFloat64(pt, im))
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad complex size %d", n.Type().Size())
return nil
}
-
default:
- s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
+ s.Fatalf("unhandled OLITERAL %v", u.Kind())
return nil
}
- case OCONVNOP:
- to := n.Type
- from := n.Left.Type
+ case ir.OCONVNOP:
+ to := n.Type()
+ from := n.Left().Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
- x := s.expr(n.Left)
+ x := s.expr(n.Left())
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
@@ -2115,7 +2126,7 @@ func (s *state) expr(n *Node) *ssa.Value {
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
- if to.Etype == TFUNC && from.IsPtrShaped() {
+ if to.Etype == types.TFUNC && from.IsPtrShaped() {
return v
}
@@ -2130,7 +2141,7 @@ func (s *state) expr(n *Node) *ssa.Value {
}
// map <--> *hmap
- if to.Etype == TMAP && from.IsPtr() &&
+ if to.Etype == types.TMAP && from.IsPtr() &&
to.MapType().Hmap == from.Elem() {
return v
}
@@ -2161,13 +2172,13 @@ func (s *state) expr(n *Node) *ssa.Value {
// integer, same width, same sign
return v
- case OCONV:
- x := s.expr(n.Left)
- ft := n.Left.Type // from type
- tt := n.Type // to type
- if ft.IsBoolean() && tt.IsKind(TUINT8) {
+ case ir.OCONV:
+ x := s.expr(n.Left())
+ ft := n.Left().Type() // from type
+ tt := n.Type() // to type
+ if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
- return s.newValue1(ssa.OpCopy, n.Type, x)
+ return s.newValue1(ssa.OpCopy, n.Type(), x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
@@ -2228,7 +2239,7 @@ func (s *state) expr(n *Node) *ssa.Value {
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
- return s.newValue1(op, n.Type, x)
+ return s.newValue1(op, n.Type(), x)
}
if ft.IsFloat() || tt.IsFloat() {
@@ -2275,12 +2286,12 @@ func (s *state) expr(n *Node) *ssa.Value {
if op2 == ssa.OpCopy {
return x
}
- return s.newValueOrSfCall1(op2, n.Type, x)
+ return s.newValueOrSfCall1(op2, n.Type(), x)
}
if op2 == ssa.OpCopy {
- return s.newValueOrSfCall1(op1, n.Type, x)
+ return s.newValueOrSfCall1(op1, n.Type(), x)
}
- return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
+ return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
@@ -2329,56 +2340,56 @@ func (s *state) expr(n *Node) *ssa.Value {
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
- s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
+ s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Etype, n.Type().Etype)
return nil
- case ODOTTYPE:
+ case ir.ODOTTYPE:
res, _ := s.dottype(n, false)
return res
// binary ops
- case OLT, OEQ, ONE, OLE, OGE, OGT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Left.Type.IsComplex() {
- pt := floatForComplex(n.Left.Type)
- op := s.ssaOp(OEQ, pt)
- r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
- i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
- c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
- switch n.Op {
- case OEQ:
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Left().Type().IsComplex() {
+ pt := floatForComplex(n.Left().Type())
+ op := s.ssaOp(ir.OEQ, pt)
+ r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
+ switch n.Op() {
+ case ir.OEQ:
return c
- case ONE:
- return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
+ case ir.ONE:
+ return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
default:
- s.Fatalf("ordered complex compare %v", n.Op)
+ s.Fatalf("ordered complex compare %v", n.Op())
}
}
// Convert OGE and OGT into OLE and OLT.
- op := n.Op
+ op := n.Op()
switch op {
- case OGE:
- op, a, b = OLE, b, a
- case OGT:
- op, a, b = OLT, b, a
+ case ir.OGE:
+ op, a, b = ir.OLE, b, a
+ case ir.OGT:
+ op, a, b = ir.OLT, b, a
}
- if n.Left.Type.IsFloat() {
+ if n.Left().Type().IsFloat() {
// float comparison
- return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
+ return s.newValueOrSfCall2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
- return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
- case OMUL:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
+ case ir.OMUL:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
@@ -2400,19 +2411,19 @@ func (s *state) expr(n *Node) *ssa.Value {
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
- case ODIV:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ case ir.ODIV:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
@@ -2420,8 +2431,8 @@ func (s *state) expr(n *Node) *ssa.Value {
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
@@ -2450,50 +2461,50 @@ func (s *state) expr(n *Node) *ssa.Value {
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.intDivide(n, a, b)
- case OMOD:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ case ir.OMOD:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
return s.intDivide(n, a, b)
- case OADD, OSUB:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
- pt := floatForComplex(n.Type)
- op := s.ssaOp(n.Op, pt)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ case ir.OADD, ir.OSUB:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
+ pt := floatForComplex(n.Type())
+ op := s.ssaOp(n.Op(), pt)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- }
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- case OAND, OOR, OXOR:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- case OANDNOT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
- return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
- case OLSH, ORSH:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OAND, ir.OOR, ir.OXOR:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OANDNOT:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
+ case ir.OLSH, ir.ORSH:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
bt := b.Type
if bt.IsSigned() {
- cmp := s.newValue2(s.ssaOp(OLE, bt), types.Types[TBOOL], s.zeroVal(bt), b)
+ cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
s.check(cmp, panicshift)
bt = bt.ToUnsigned()
}
- return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b)
- case OANDAND, OOROR:
+ return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
+ case ir.OANDAND, ir.OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
@@ -2507,7 +2518,7 @@ func (s *state) expr(n *Node) *ssa.Value {
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
- el := s.expr(n.Left)
+ el := s.expr(n.Left())
s.vars[n] = el
b := s.endBlock()
@@ -2520,83 +2531,83 @@ func (s *state) expr(n *Node) *ssa.Value {
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
- if n.Op == OANDAND {
+ if n.Op() == ir.OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
- } else if n.Op == OOROR {
+ } else if n.Op() == ir.OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
- er := s.expr(n.Right)
+ er := s.expr(n.Right())
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
- return s.variable(n, types.Types[TBOOL])
- case OCOMPLEX:
- r := s.expr(n.Left)
- i := s.expr(n.Right)
- return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
+ return s.variable(n, types.Types[types.TBOOL])
+ case ir.OCOMPLEX:
+ r := s.expr(n.Left())
+ i := s.expr(n.Right())
+ return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
- case ONEG:
- a := s.expr(n.Left)
- if n.Type.IsComplex() {
- tp := floatForComplex(n.Type)
- negop := s.ssaOp(n.Op, tp)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ case ir.ONEG:
+ a := s.expr(n.Left())
+ if n.Type().IsComplex() {
+ tp := floatForComplex(n.Type())
+ negop := s.ssaOp(n.Op(), tp)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
- case ONOT, OBITNOT:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
- case OIMAG, OREAL:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
- case OPLUS:
- return s.expr(n.Left)
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.ONOT, ir.OBITNOT:
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.OIMAG, ir.OREAL:
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a)
+ case ir.OPLUS:
+ return s.expr(n.Left())
- case OADDR:
- return s.addr(n.Left)
+ case ir.OADDR:
+ return s.addr(n.Left())
- case ORESULT:
+ case ir.ORESULT:
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
// Do the old thing
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- if canSSAType(n.Type) {
- return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
+ if canSSAType(n.Type()) {
+ return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall)
} else {
- addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
- return s.rawLoad(n.Type, addr)
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall)
+ return s.rawLoad(n.Type(), addr)
}
- case ODEREF:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.load(n.Type, p)
+ case ir.ODEREF:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.load(n.Type(), p)
- case ODOT:
- if n.Left.Op == OSTRUCTLIT {
+ case ir.ODOT:
+ if n.Left().Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
- if !isZero(n.Left) {
- s.Fatalf("literal with nonzero value in SSA: %v", n.Left)
+ if !isZero(n.Left()) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.Left())
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
@@ -2604,110 +2615,110 @@ func (s *state) expr(n *Node) *ssa.Value {
// instrumentation.
if islvalue(n) && !s.canSSA(n) {
p := s.addr(n)
- return s.load(n.Type, p)
+ return s.load(n.Type(), p)
}
- v := s.expr(n.Left)
- return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
+ v := s.expr(n.Left())
+ return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
- case ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
- return s.load(n.Type, p)
+ case ir.ODOTPTR:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+ return s.load(n.Type(), p)
- case OINDEX:
+ case ir.OINDEX:
switch {
- case n.Left.Type.IsString():
- if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
+ case n.Left().Type().IsString():
+ if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
+ return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left().StringVal()[n.Right().Int64Val()])))
}
- a := s.expr(n.Left)
- i := s.expr(n.Right)
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
- if Isconst(n.Right, CTINT) {
- ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
+ if ir.IsConst(n.Right(), constant.Int) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right().Int64Val(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
- return s.load(types.Types[TUINT8], ptr)
- case n.Left.Type.IsSlice():
+ return s.load(types.Types[types.TUINT8], ptr)
+ case n.Left().Type().IsSlice():
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
- case n.Left.Type.IsArray():
- if canSSAType(n.Left.Type) {
+ return s.load(n.Left().Type().Elem(), p)
+ case n.Left().Type().IsArray():
+ if canSSAType(n.Left().Type()) {
// SSA can handle arrays of length at most 1.
- bound := n.Left.Type.NumElem()
- a := s.expr(n.Left)
- i := s.expr(n.Right)
+ bound := n.Left().Type().NumElem()
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
- z := s.constInt(types.Types[TINT], 0)
+ z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
// The return value won't be live, return junk.
- return s.newValue0(ssa.OpUnknown, n.Type)
+ return s.newValue0(ssa.OpUnknown, n.Type())
}
- len := s.constInt(types.Types[TINT], bound)
+ len := s.constInt(types.Types[types.TINT], bound)
s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
- return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
+ return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
+ return s.load(n.Left().Type().Elem(), p)
default:
- s.Fatalf("bad type for index %v", n.Left.Type)
+ s.Fatalf("bad type for index %v", n.Left().Type())
return nil
}
- case OLEN, OCAP:
+ case ir.OLEN, ir.OCAP:
switch {
- case n.Left.Type.IsSlice():
+ case n.Left().Type().IsSlice():
op := ssa.OpSliceLen
- if n.Op == OCAP {
+ if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
- return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
- case n.Left.Type.IsString(): // string; not reachable for OCAP
- return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
- case n.Left.Type.IsMap(), n.Left.Type.IsChan():
- return s.referenceTypeBuiltin(n, s.expr(n.Left))
+ return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsMap(), n.Left().Type().IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.Left()))
default: // array
- return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ return s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
}
- case OSPTR:
- a := s.expr(n.Left)
- if n.Left.Type.IsSlice() {
- return s.newValue1(ssa.OpSlicePtr, n.Type, a)
+ case ir.OSPTR:
+ a := s.expr(n.Left())
+ if n.Left().Type().IsSlice() {
+ return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
- return s.newValue1(ssa.OpStringPtr, n.Type, a)
+ return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
- case OITAB:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpITab, n.Type, a)
+ case ir.OITAB:
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpITab, n.Type(), a)
- case OIDATA:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpIData, n.Type, a)
+ case ir.OIDATA:
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpIData, n.Type(), a)
- case OEFACE:
- tab := s.expr(n.Left)
- data := s.expr(n.Right)
- return s.newValue2(ssa.OpIMake, n.Type, tab, data)
+ case ir.OEFACE:
+ tab := s.expr(n.Left())
+ data := s.expr(n.Right())
+ return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
- case OSLICEHEADER:
- p := s.expr(n.Left)
- l := s.expr(n.List.First())
- c := s.expr(n.List.Second())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ case ir.OSLICEHEADER:
+ p := s.expr(n.Left())
+ l := s.expr(n.List().First())
+ c := s.expr(n.List().Second())
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
- case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
- v := s.expr(n.Left)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
+ v := s.expr(n.Left())
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
@@ -2720,10 +2731,10 @@ func (s *state) expr(n *Node) *ssa.Value {
k = s.expr(max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
- case OSLICESTR:
- v := s.expr(n.Left)
+ case ir.OSLICESTR:
+ v := s.expr(n.Left())
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
@@ -2733,42 +2744,42 @@ func (s *state) expr(n *Node) *ssa.Value {
j = s.expr(high)
}
p, l, _ := s.slice(v, i, j, nil, n.Bounded())
- return s.newValue2(ssa.OpStringMake, n.Type, p, l)
+ return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
- case OCALLFUNC:
+ case ir.OCALLFUNC:
if isIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
- case OCALLINTER, OCALLMETH:
+ case ir.OCALLINTER, ir.OCALLMETH:
return s.callResult(n, callNormal)
- case OGETG:
- return s.newValue1(ssa.OpGetG, n.Type, s.mem())
+ case ir.OGETG:
+ return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
- case OAPPEND:
+ case ir.OAPPEND:
return s.append(n, false)
- case OSTRUCTLIT, OARRAYLIT:
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !isZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
- case ONEWOBJ:
- if n.Type.Elem().Size() == 0 {
- return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb)
+ case ir.ONEWOBJ:
+ if n.Type().Elem().Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb)
}
- typ := s.expr(n.Left)
- vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ)
+ typ := s.expr(n.Left())
+ vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ)
return vv[0]
default:
- s.Fatalf("unhandled expr %v", n.Op)
+ s.Fatalf("unhandled expr %v", n.Op())
return nil
}
}
@@ -2779,7 +2790,7 @@ func (s *state) expr(n *Node) *ssa.Value {
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
-func (s *state) append(n *Node, inplace bool) *ssa.Value {
+func (s *state) append(n ir.Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
@@ -2813,16 +2824,16 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
- et := n.Type.Elem()
+ et := n.Type().Elem()
pt := types.NewPtr(et)
// Evaluate slice
- sn := n.List.First() // the slice node is the first in the list
+ sn := n.List().First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn)
- slice = s.load(n.Type, addr)
+ slice = s.load(n.Type(), addr)
} else {
slice = s.expr(sn)
}
@@ -2832,20 +2843,20 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
- nargs := int64(n.List.Len() - 1)
+ nargs := int64(n.List().Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
- l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
- c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
- nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+ l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
+ nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
- cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[TBOOL], c, nl)
- s.vars[&ptrVar] = p
+ cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
+ s.vars[ptrVar] = p
if !inplace {
- s.vars[&newlenVar] = nl
- s.vars[&capVar] = c
+ s.vars[newlenVar] = nl
+ s.vars[capVar] = c
} else {
- s.vars[&lenVar] = l
+ s.vars[lenVar] = l
}
b := s.endBlock()
@@ -2857,24 +2868,24 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
// Call growslice
s.startBlock(grow)
- taddr := s.expr(n.Left)
- r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
+ taddr := s.expr(n.Left())
+ r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
- if sn.Op == ONAME && sn.Class() != PEXTERN {
+ if sn.Op() == ir.ONAME && sn.Class() != ir.PEXTERN {
// Tell liveness we're about to build a new slice
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr)
- s.store(types.Types[TINT], capaddr, r[2])
+ s.store(types.Types[types.TINT], capaddr, r[2])
s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it
- s.vars[&ptrVar] = s.load(pt, addr)
- s.vars[&lenVar] = r[1] // avoid a spill in the fast path
+ s.vars[ptrVar] = s.load(pt, addr)
+ s.vars[lenVar] = r[1] // avoid a spill in the fast path
} else {
- s.vars[&ptrVar] = r[0]
- s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
- s.vars[&capVar] = r[2]
+ s.vars[ptrVar] = r[0]
+ s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
+ s.vars[capVar] = r[2]
}
b = s.endBlock()
@@ -2884,10 +2895,10 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
s.startBlock(assign)
if inplace {
- l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
- nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+ l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
+ nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr)
- s.store(types.Types[TINT], lenaddr, nl)
+ s.store(types.Types[types.TINT], lenaddr, nl)
}
// Evaluate args
@@ -2898,8 +2909,8 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
store bool
}
args := make([]argRec, 0, nargs)
- for _, n := range n.List.Slice()[1:] {
- if canSSAType(n.Type) {
+ for _, n := range n.List().Slice()[1:] {
+ if canSSAType(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
@@ -2907,14 +2918,14 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
}
}
- p = s.variable(&ptrVar, pt) // generates phi for ptr
+ p = s.variable(ptrVar, pt) // generates phi for ptr
if !inplace {
- nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
- c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap
+ nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
+ c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
- addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
+ addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0, true)
} else {
@@ -2922,29 +2933,29 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
}
}
- delete(s.vars, &ptrVar)
+ delete(s.vars, ptrVar)
if inplace {
- delete(s.vars, &lenVar)
+ delete(s.vars, lenVar)
return nil
}
- delete(s.vars, &newlenVar)
- delete(s.vars, &capVar)
+ delete(s.vars, newlenVar)
+ delete(s.vars, capVar)
// make result
- return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
-func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
- switch cond.Op {
- case OANDAND:
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
+ switch cond.Op() {
+ case ir.OANDAND:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, mid, no, max8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), mid, no, max8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
@@ -2952,19 +2963,19 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
- case OOROR:
+ case ir.OOROR:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, yes, mid, min8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), yes, mid, min8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
- case ONOT:
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, no, yes, -likely)
+ case ir.ONOT:
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), no, yes, -likely)
return
}
c := s.expr(cond)
@@ -2989,17 +3000,17 @@ const (
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
-func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
- if left.Op == ONAME && left.isBlank() {
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+ if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
- t := left.Type
+ t := left.Type()
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
- if left.Op == ODOT {
+ if left.Op() == ir.ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
@@ -3010,12 +3021,12 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
- t := left.Left.Type
+ t := left.Left().Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
- old := s.expr(left.Left)
+ old := s.expr(left.Left())
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
@@ -3030,23 +3041,23 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
}
// Recursively assign the new value we've made to the base of the dot op.
- s.assign(left.Left, new, false, 0)
+ s.assign(left.Left(), new, false, 0)
// TODO: do we need to update named values here?
return
}
- if left.Op == OINDEX && left.Left.Type.IsArray() {
- s.pushLine(left.Pos)
+ if left.Op() == ir.OINDEX && left.Left().Type().IsArray() {
+ s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
- t := left.Left.Type
+ t := left.Left().Type()
n := t.NumElem()
- i := s.expr(left.Right) // index
+ i := s.expr(left.Right()) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
- z := s.constInt(types.Types[TINT], 0)
+ z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
return
}
@@ -3054,10 +3065,10 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
- len := s.constInt(types.Types[TINT], 1)
+ len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
- s.assign(left.Left, v, false, 0)
+ s.assign(left.Left(), v, false, 0)
return
}
// Update variable assignment.
@@ -3068,8 +3079,8 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
- if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp())
+ if base := clobberBase(left); base.Op() == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
// Left is not ssa-able. Compute its address.
@@ -3080,7 +3091,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
- t = types.Types[TUNSAFEPTR]
+ t = types.Types[types.TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
@@ -3092,7 +3103,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
return
}
// Treat as a store.
- s.storeType(t, addr, right, skip, !left.IsAutoTmp())
+ s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
}
// zeroVal returns the zero value for type t.
@@ -3123,10 +3134,10 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value {
case t.IsComplex():
switch t.Size() {
case 8:
- z := s.constFloat32(types.Types[TFLOAT32], 0)
+ z := s.constFloat32(types.Types[types.TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
- z := s.constFloat64(types.Types[TFLOAT64], 0)
+ z := s.constFloat64(types.Types[types.TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
@@ -3180,38 +3191,38 @@ var softFloatOps map[ssa.Op]sfRtCallDef
func softfloatInit() {
// Some of these operations get transformed by sfcall.
softFloatOps = map[ssa.Op]sfRtCallDef{
- ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
- ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
- ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
- ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
- ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
- ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
- ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
- ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
-
- ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
- ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
- ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
- ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
- ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
- ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
- ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL},
- ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL},
-
- ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
- ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32},
- ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
- ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64},
- ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
- ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
- ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
- ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32},
- ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
- ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64},
- ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
- ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
- ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
- ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
+ ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
+ ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
+ ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
+ ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
+ ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), types.TFLOAT32},
+ ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), types.TFLOAT64},
+ ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), types.TFLOAT32},
+ ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), types.TFLOAT64},
+
+ ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
+ ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
+ ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
+ ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
+ ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), types.TBOOL},
+ ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), types.TBOOL},
+ ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), types.TBOOL},
+ ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), types.TBOOL},
+
+ ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), types.TINT32},
+ ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), types.TINT64},
+ ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), types.TUINT64},
+ ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), types.TINT32},
+ ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), types.TINT64},
+ ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), types.TUINT64},
+ ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), types.TFLOAT32},
}
}
@@ -3227,7 +3238,7 @@ func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
args[0], args[1] = args[1], args[0]
case ssa.OpSub32F,
ssa.OpSub64F:
- args[1] = s.newValue1(s.ssaOp(ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
+ args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
@@ -3243,7 +3254,7 @@ var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
-type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
+type intrinsicBuilder func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
@@ -3308,173 +3319,173 @@ func init() {
/******** runtime ********/
if !instrumenting {
add("runtime", "slicebytetostringtmp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
- return s.newValue2(ssa.OpStringMake, n.Type, args[0], args[1])
+ return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
},
all...)
}
addF("runtime/internal/math", "MulUintptr",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
- return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
- s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
+ s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
- type atomicOpEmitter func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType)
+ type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
- v := s.load(types.Types[TBOOL], addr)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3497,7 +3508,7 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- if rtyp == TNIL {
+ if rtyp == types.TNIL {
return nil
} else {
return s.variable(n, types.Types[rtyp])
@@ -3505,115 +3516,115 @@ func init() {
}
}
- atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Xchg",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xchg64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
- atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
- v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Cas",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, TUINT32, TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, TUINT64, TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
- atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
- s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
+ atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
addF("runtime/internal/atomic", "And8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
@@ -3648,57 +3659,57 @@ func init() {
/******** math ********/
addF("math", "Sqrt",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
addF("math", "Copysign",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.Wasm)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasFMA)
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasFMA)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3711,7 +3722,7 @@ func init() {
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3721,17 +3732,17 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
},
sys.AMD64)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), armHasVFPv4, s.sb)
- v := s.load(types.Types[TBOOL], addr)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), armHasVFPv4, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3744,7 +3755,7 @@ func init() {
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3754,13 +3765,13 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
},
sys.ARM)
- makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasSSE41)
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3773,7 +3784,7 @@ func init() {
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
+ s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3783,7 +3794,7 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
}
addF("math", "RoundToEven",
@@ -3801,55 +3812,55 @@ func init() {
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
- c := s.constInt32(types.Types[TUINT32], 1<<16)
- y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<16)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
- c := s.constInt64(types.Types[TUINT64], 1<<16)
- y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<16)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
- c := s.constInt32(types.Types[TUINT32], 1<<8)
- y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<8)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
- c := s.constInt64(types.Types[TUINT64], 1<<8)
- y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<8)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
@@ -3857,116 +3868,116 @@ func init() {
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
- x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
- return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
- makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasPOPCNT)
+ makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3983,7 +3994,7 @@ func init() {
if s.config.PtrSize == 4 {
op = op32
}
- s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
+ s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3993,67 +4004,67 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TINT])
+ return s.variable(n, types.Types[types.TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "Mul64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
addF("math/bits", "Add64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
addF("math/bits", "Sub64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
- cmpZero := s.newValue2(s.ssaOp(ONE, types.Types[TUINT64]), types.Types[TBOOL], args[2], s.zeroVal(types.Types[TUINT64]))
+ cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, panicdivide)
- cmpOverflow := s.newValue2(s.ssaOp(OLT, types.Types[TUINT64]), types.Types[TBOOL], args[0], args[2])
+ cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
s.check(cmpOverflow, panicoverflow)
- return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64)
alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
@@ -4107,8 +4118,8 @@ func init() {
/******** math/big ********/
add("math/big", "mulWW",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
}
@@ -4120,10 +4131,10 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
return nil
}
pkg := sym.Pkg.Path
- if sym.Pkg == localpkg {
- pkg = myimportpath
+ if sym.Pkg == ir.LocalPkg {
+ pkg = base.Ctxt.Pkgpath
}
- if flag_race && pkg == "sync/atomic" {
+ if base.Flag.Race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
@@ -4145,16 +4156,16 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
-func isIntrinsicCall(n *Node) bool {
- if n == nil || n.Left == nil {
+func isIntrinsicCall(n ir.Node) bool {
+ if n == nil || n.Left() == nil {
return false
}
- return findIntrinsic(n.Left.Sym) != nil
+ return findIntrinsic(n.Left().Sym()) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
-func (s *state) intrinsicCall(n *Node) *ssa.Value {
- v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
+func (s *state) intrinsicCall(n ir.Node) *ssa.Value {
+ v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
@@ -4163,29 +4174,29 @@ func (s *state) intrinsicCall(n *Node) *ssa.Value {
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
- Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Left().Sym().Name, x.LongString())
}
return v
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
-func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
+func (s *state) intrinsicArgs(n ir.Node) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
- temps := map[*Node]*ssa.Value{}
- for _, a := range n.List.Slice() {
- if a.Op != OAS {
- s.Fatalf("non-assignment as a temp function argument %v", a.Op)
+ temps := map[ir.Node]*ssa.Value{}
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OAS {
+ s.Fatalf("non-assignment as a temp function argument %v", a.Op())
}
- l, r := a.Left, a.Right
- if l.Op != ONAME {
- s.Fatalf("non-ONAME temp function argument %v", a.Op)
+ l, r := a.Left(), a.Right()
+ if l.Op() != ir.ONAME {
+ s.Fatalf("non-ONAME temp function argument %v", a.Op())
}
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
}
- args := make([]*ssa.Value, n.Rlist.Len())
- for i, n := range n.Rlist.Slice() {
+ args := make([]*ssa.Value, n.Rlist().Len())
+ for i, n := range n.Rlist().Slice() {
// Store a value to an argument slot.
if x, ok := temps[n]; ok {
// This is a previously computed temporary.
@@ -4204,62 +4215,62 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
// call. We will also record funcdata information on where the args are stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
-func (s *state) openDeferRecord(n *Node) {
+func (s *state) openDeferRecord(n ir.Node) {
// Do any needed expression evaluation for the args (including the
// receiver, if any). This may be evaluating something like 'autotmp_3 =
// once.mutex'. Such a statement will create a mapping in s.vars[] from
// the autotmp name to the evaluated SSA arg value, but won't do any
// stores to the stack.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var args []*ssa.Value
- var argNodes []*Node
+ var argNodes []ir.Node
opendefer := &openDeferInfo{
n: n,
}
- fn := n.Left
- if n.Op == OCALLFUNC {
+ fn := n.Left()
+ if n.Op() == ir.OCALLFUNC {
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
- closure := s.openDeferSave(nil, fn.Type, closureVal)
- opendefer.closureNode = closure.Aux.(*Node)
- if !(fn.Op == ONAME && fn.Class() == PFUNC) {
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(ir.Node)
+ if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) {
opendefer.closure = closure
}
- } else if n.Op == OCALLMETH {
- if fn.Op != ODOTMETH {
- Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
+ } else if n.Op() == ir.OCALLMETH {
+ if fn.Op() != ir.ODOTMETH {
+ base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
closureVal := s.getMethodClosure(fn)
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the method directly.
- closure := s.openDeferSave(nil, fn.Type, closureVal)
- opendefer.closureNode = closure.Aux.(*Node)
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(ir.Node)
} else {
- if fn.Op != ODOTINTER {
- Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ if fn.Op() != ir.ODOTINTER {
+ base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
closure, rcvr := s.getClosureAndRcvr(fn)
opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
- opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*Node)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*Node)
+ opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
+ opendefer.closureNode = opendefer.closure.Aux.(ir.Node)
+ opendefer.rcvrNode = opendefer.rcvr.Aux.(ir.Node)
}
- for _, argn := range n.Rlist.Slice() {
+ for _, argn := range n.Rlist().Slice() {
var v *ssa.Value
- if canSSAType(argn.Type) {
- v = s.openDeferSave(nil, argn.Type, s.expr(argn))
+ if canSSAType(argn.Type()) {
+ v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
- v = s.openDeferSave(argn, argn.Type, nil)
+ v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*Node))
+ argNodes = append(argNodes, v.Aux.(ir.Node))
}
opendefer.argVals = args
opendefer.argNodes = argNodes
@@ -4268,10 +4279,10 @@ func (s *state) openDeferRecord(n *Node) {
// Update deferBits only after evaluation and storage to stack of
// args/receiver/interface is successful.
- bitvalue := s.constInt8(types.Types[TUINT8], 1<<uint(index))
- newDeferBits := s.newValue2(ssa.OpOr8, types.Types[TUINT8], s.variable(&deferBitsVar, types.Types[TUINT8]), bitvalue)
- s.vars[&deferBitsVar] = newDeferBits
- s.store(types.Types[TUINT8], s.deferBitsAddr, newDeferBits)
+ bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
+ newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
+ s.vars[deferBitsVar] = newDeferBits
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
}
// openDeferSave generates SSA nodes to store a value (with type t) for an
@@ -4281,16 +4292,16 @@ func (s *state) openDeferRecord(n *Node) {
// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value {
+func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := canSSAType(t)
var pos src.XPos
if canSSA {
pos = val.Pos
} else {
- pos = n.Pos
+ pos = n.Pos()
}
argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.Name.SetOpenDeferSlot(true)
+ argTemp.Name().SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// removed by dead-store elimination
@@ -4299,16 +4310,16 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
// declared in the entry block, so that it will be live for the
// defer exit code (which will actually access it only if the
// associated defer call has been activated).
- s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
- s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
- addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][&memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+ addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
- addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
+ addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
}
if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
@@ -4316,7 +4327,7 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
- argTemp.Name.SetNeedzero(true)
+ argTemp.Name().SetNeedzero(true)
}
if !canSSA {
a := s.addr(n)
@@ -4339,7 +4350,7 @@ func (s *state) openDeferExit() {
s.startBlock(deferExit)
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
- zeroval := s.constInt8(types.Types[TUINT8], 0)
+ zeroval := s.constInt8(types.Types[types.TUINT8], 0)
testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
@@ -4347,12 +4358,12 @@ func (s *state) openDeferExit() {
bCond := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
- deferBits := s.variable(&deferBitsVar, types.Types[TUINT8])
+ deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
// Generate code to check if the bit associated with the current
// defer is set.
- bitval := s.constInt8(types.Types[TUINT8], 1<<uint(i))
- andval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, bitval)
- eqVal := s.newValue2(ssa.OpEq8, types.Types[TBOOL], andval, zeroval)
+ bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
+ andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
+ eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(eqVal)
@@ -4363,19 +4374,19 @@ func (s *state) openDeferExit() {
// Clear this bit in deferBits and force store back to stack, so
// we will not try to re-run this defer call if this defer call panics.
- nbitval := s.newValue1(ssa.OpCom8, types.Types[TUINT8], bitval)
- maskedval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, nbitval)
- s.store(types.Types[TUINT8], s.deferBitsAddr, maskedval)
+ nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
+ maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
// Use this value for following tests, so we keep previous
// bits cleared.
- s.vars[&deferBitsVar] = maskedval
+ s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
// closure/receiver/args that were stored in argtmps at the point
// of the defer statement.
- argStart := Ctxt.FixedFrameSize()
- fn := r.n.Left
- stksize := fn.Type.ArgWidth()
+ argStart := base.Ctxt.FixedFrameSize()
+ fn := r.n.Left()
+ stksize := fn.Type().ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
@@ -4383,11 +4394,11 @@ func (s *state) openDeferExit() {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, v)
} else {
- s.store(types.Types[TUINTPTR], addr, v)
+ s.store(types.Types[types.TUINTPTR], addr, v)
}
}
for j, argAddrVal := range r.argVals {
@@ -4416,7 +4427,7 @@ func (s *state) openDeferExit() {
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
- codeptr := s.rawLoad(types.Types[TUINTPTR], v)
+ codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, s.mem())
@@ -4426,7 +4437,7 @@ func (s *state) openDeferExit() {
call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
}
} else {
- aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(fn.Sym().Linksym(), ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
@@ -4438,25 +4449,25 @@ func (s *state) openDeferExit() {
}
call.AuxInt = stksize
if testLateExpansion {
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
// for the deferreturn, so we want all stack slots to be live.
if r.closureNode != nil {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
- if r.rcvrNode.Type.HasPointers() {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
+ if r.rcvrNode.Type().HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
- if argNode.Type.HasPointers() {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
+ if argNode.Type().HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
@@ -4465,42 +4476,42 @@ func (s *state) openDeferExit() {
}
}
-func (s *state) callResult(n *Node, k callKind) *ssa.Value {
+func (s *state) callResult(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, false)
}
-func (s *state) callAddr(n *Node, k callKind) *ssa.Value {
+func (s *state) callAddr(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
-func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
+func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
- fn := n.Left
+ fn := n.Left()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
- res := n.Left.Type.Results()
+ res := n.Left().Type().Results()
if k == callNormal {
nf := res.NumFields()
for i := 0; i < nf; i++ {
fp := res.Field(i)
- ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())})
+ ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + base.Ctxt.FixedFrameSize())})
}
}
testLateExpansion := false
- switch n.Op {
- case OCALLFUNC:
+ switch n.Op() {
+ case ir.OCALLFUNC:
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
- if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
- sym = fn.Sym
+ if k == callNormal && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC {
+ sym = fn.Sym()
break
}
closure = s.expr(fn)
@@ -4509,54 +4520,54 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
- case OCALLMETH:
- if fn.Op != ODOTMETH {
+ case ir.OCALLMETH:
+ if fn.Op() != ir.ODOTMETH {
s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal {
- sym = fn.Sym
+ sym = fn.Sym()
break
}
closure = s.getMethodClosure(fn)
// Note: receiver is already present in n.Rlist, so we don't
// want to set it here.
- case OCALLINTER:
- if fn.Op != ODOTINTER {
- s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ case ir.OCALLINTER:
+ if fn.Op() != ir.ODOTINTER {
+ s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
- codeptr = s.load(types.Types[TUINTPTR], iclosure)
+ codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
} else {
closure = iclosure
}
}
- dowidth(fn.Type)
- stksize := fn.Type.ArgWidth() // includes receiver, args, and results
+ dowidth(fn.Type())
+ stksize := fn.Type().ArgWidth() // includes receiver, args, and results
// Run all assignments of temps.
// The temps are introduced to avoid overwriting argument
// slots when arguments themselves require function calls.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var call *ssa.Value
if k == callDeferStack {
testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
- d := tempAt(n.Pos, s.curfn, t)
+ d := tempAt(n.Pos(), s.curfn, t)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
// 0: siz
- s.store(types.Types[TUINT32],
- s.newValue1I(ssa.OpOffPtr, types.Types[TUINT32].PtrTo(), t.FieldOff(0), addr),
- s.constInt32(types.Types[TUINT32], int32(stksize)))
+ s.store(types.Types[types.TUINT32],
+ s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
+ s.constInt32(types.Types[types.TUINT32], int32(stksize)))
// 1: started, set in deferprocStack
// 2: heap, set in deferprocStack
// 3: openDefer
@@ -4573,17 +4584,17 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// 11: fd
// Then, store all the arguments of the defer call.
- ft := fn.Type
+ ft := fn.Type()
off := t.FieldOff(12)
- args := n.Rlist.Slice()
+ args := n.Rlist().Slice()
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
- s.store(types.Types[TUINTPTR], p, rcvr)
+ s.store(types.Types[types.TUINTPTR], p, rcvr)
}
// Set receiver (for method calls).
- if n.Op == OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
f := ft.Recv()
s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
args = args[1:]
@@ -4595,15 +4606,15 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
}
// Call runtime.deferprocStack with pointer to _defer record.
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
- arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
- s.store(types.Types[TUINTPTR], arg0, addr)
+ arg0 := s.constOffPtrSP(types.Types[types.TUINTPTR], base.Ctxt.FixedFrameSize())
+ s.store(types.Types[types.TUINTPTR], arg0, addr)
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
}
if stksize < int64(Widthptr) {
@@ -4616,24 +4627,24 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
- argStart := Ctxt.FixedFrameSize()
+ argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
// Write argsize and closure (args to newproc/deferproc).
- argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINT32], Offset: int32(argStart)})
+ argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINT32], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, argsize)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
- s.store(types.Types[TUINT32], addr, argsize)
+ s.store(types.Types[types.TUINT32], addr, argsize)
}
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
if testLateExpansion {
callArgs = append(callArgs, closure)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
- s.store(types.Types[TUINTPTR], addr, closure)
+ s.store(types.Types[types.TUINTPTR], addr, closure)
}
stksize += 2 * int64(Widthptr)
argStart += 2 * int64(Widthptr)
@@ -4642,18 +4653,18 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// Set receiver (for interface calls).
if rcvr != nil {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, rcvr)
} else {
- s.store(types.Types[TUINTPTR], addr, rcvr)
+ s.store(types.Types[types.TUINTPTR], addr, rcvr)
}
}
// Write args.
- t := n.Left.Type
- args := n.Rlist.Slice()
- if n.Op == OCALLMETH {
+ t := n.Left().Type()
+ args := n.Rlist().Slice()
+ if n.Op() == ir.OCALLMETH {
f := t.Recv()
ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
ACArgs = append(ACArgs, ACArg)
@@ -4693,7 +4704,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// can't always figure that out currently, and it's
// critical that we not clobber any arguments already
// stored onto the stack.
- codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
+ codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
if testLateExpansion {
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
@@ -4718,18 +4729,18 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
}
default:
- s.Fatalf("bad call type %v %v", n.Op, n)
+ s.Fatalf("bad call type %v %v", n.Op(), n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
if testLateExpansion {
s.prevCall = call
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
// Insert OVARLIVE nodes
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// Finish block for defers
if k == callDefer || k == callDeferStack {
@@ -4757,13 +4768,13 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
if testLateExpansion {
return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
}
- return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize())
+ return s.constOffPtrSP(pt, fp.Offset+base.Ctxt.FixedFrameSize())
}
if testLateExpansion {
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
- return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()))
+ return s.load(n.Type(), s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize()))
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
@@ -4777,28 +4788,28 @@ func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
}
// getMethodClosure returns a value representing the closure for a method call
-func (s *state) getMethodClosure(fn *Node) *ssa.Value {
+func (s *state) getMethodClosure(fn ir.Node) *ssa.Value {
// Make a name n2 for the function.
// fn.Sym might be sync.(*Mutex).Unlock.
// Make a PFUNC node out of that, then evaluate it.
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
- n2 := newnamel(fn.Pos, fn.Sym)
- n2.Name.Curfn = s.curfn
- n2.SetClass(PFUNC)
+ n2 := ir.NewNameAt(fn.Pos(), fn.Sym())
+ n2.Name().Curfn = s.curfn
+ n2.SetClass(ir.PFUNC)
// n2.Sym already existed, so it's already marked as a function.
- n2.Pos = fn.Pos
- n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
+ n2.SetPos(fn.Pos())
+ n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it.
return s.expr(n2)
}
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
-func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
- i := s.expr(fn.Left)
- itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
+func (s *state) getClosureAndRcvr(fn ir.Node) (*ssa.Value, *ssa.Value) {
+ i := s.expr(fn.Left())
+ itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
- itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
+ itabidx := fn.Offset() + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
@@ -4808,9 +4819,9 @@ func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e types.EType) int8 {
switch e {
- case TINT8, TINT16, TINT32, TINT64, TINT:
+ case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
return -1
- case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
+ case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
return +1
}
return 0
@@ -4818,25 +4829,25 @@ func etypesign(e types.EType) int8 {
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
-func (s *state) addr(n *Node) *ssa.Value {
- if n.Op != ONAME {
- s.pushLine(n.Pos)
+func (s *state) addr(n ir.Node) *ssa.Value {
+ if n.Op() != ir.ONAME {
+ s.pushLine(n.Pos())
defer s.popLine()
}
- t := types.NewPtr(n.Type)
- switch n.Op {
- case ONAME:
+ t := types.NewPtr(n.Type())
+ switch n.Op() {
+ case ir.ONAME:
switch n.Class() {
- case PEXTERN:
+ case ir.PEXTERN:
// global variable
- v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
+ v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
- if n.Xoffset != 0 {
- v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
+ if n.Offset() != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Offset(), v)
}
return v
- case PPARAM:
+ case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
@@ -4848,10 +4859,10 @@ func (s *state) addr(n *Node) *ssa.Value {
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
- case PAUTO:
- return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp())
+ case ir.PAUTO:
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
- case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
+ case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
@@ -4859,51 +4870,51 @@ func (s *state) addr(n *Node) *ssa.Value {
s.Fatalf("variable address class %v not implemented", n.Class())
return nil
}
- case ORESULT:
+ case ir.ORESULT:
// load return from callee
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
return x
- case OINDEX:
- if n.Left.Type.IsSlice() {
- a := s.expr(n.Left)
- i := s.expr(n.Right)
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
+ case ir.OINDEX:
+ if n.Left().Type().IsSlice() {
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
- a := s.addr(n.Left)
- i := s.expr(n.Right)
- len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ a := s.addr(n.Left())
+ i := s.expr(n.Right())
+ len := s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
- return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
- }
- case ODEREF:
- return s.exprPtr(n.Left, n.Bounded(), n.Pos)
- case ODOT:
- p := s.addr(n.Left)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
- case ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
- case OCLOSUREVAR:
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i)
+ }
+ case ir.ODEREF:
+ return s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ case ir.ODOT:
+ p := s.addr(n.Left())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.ODOTPTR:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.OCLOSUREVAR:
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(),
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
- case OCONVNOP:
- addr := s.addr(n.Left)
+ case ir.OCONVNOP:
+ addr := s.addr(n.Left())
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
- case OCALLFUNC, OCALLINTER, OCALLMETH:
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
return s.callAddr(n, callNormal)
- case ODOTTYPE:
+ case ir.ODOTTYPE:
v, _ := s.dottype(n, false)
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
@@ -4913,36 +4924,36 @@ func (s *state) addr(n *Node) *ssa.Value {
}
return v.Args[0]
default:
- s.Fatalf("unhandled addr %v", n.Op)
+ s.Fatalf("unhandled addr %v", n.Op())
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
-func (s *state) canSSA(n *Node) bool {
- if Debug.N != 0 {
+func (s *state) canSSA(n ir.Node) bool {
+ if base.Flag.N != 0 {
return false
}
- for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
- n = n.Left
+ for n.Op() == ir.ODOT || (n.Op() == ir.OINDEX && n.Left().Type().IsArray()) {
+ n = n.Left()
}
- if n.Op != ONAME {
+ if n.Op() != ir.ONAME {
return false
}
- if n.Name.Addrtaken() {
+ if n.Name().Addrtaken() {
return false
}
- if n.isParamHeapCopy() {
+ if isParamHeapCopy(n) {
return false
}
- if n.Class() == PAUTOHEAP {
+ if n.Class() == ir.PAUTOHEAP {
s.Fatalf("canSSA of PAUTOHEAP %v", n)
}
switch n.Class() {
- case PEXTERN:
+ case ir.PEXTERN:
return false
- case PPARAMOUT:
+ case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
@@ -4957,13 +4968,13 @@ func (s *state) canSSA(n *Node) bool {
return false
}
}
- if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
+ if n.Class() == ir.PPARAM && n.Sym() != nil && n.Sym().Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARAMOUT?
return false
}
- return canSSAType(n.Type)
+ return canSSAType(n.Type())
// TODO: try to make more variables SSAable?
}
@@ -4977,7 +4988,7 @@ func canSSAType(t *types.Type) bool {
return false
}
switch t.Etype {
- case TARRAY:
+ case types.TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
@@ -4985,7 +4996,7 @@ func canSSAType(t *types.Type) bool {
return canSSAType(t.Elem())
}
return false
- case TSTRUCT:
+ case types.TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
@@ -5001,7 +5012,7 @@ func canSSAType(t *types.Type) bool {
}
// exprPtr evaluates n to a pointer and nil-checks it.
-func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
@@ -5017,7 +5028,7 @@ func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
- if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
+ if base.Debug.DisableNil != 0 || s.curfn.Func().NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
@@ -5032,7 +5043,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
- if bounded || Debug.B != 0 {
+ if bounded || base.Flag.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
@@ -5082,9 +5093,9 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
var cmp *ssa.Value
if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
- cmp = s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
+ cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
} else {
- cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
+ cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
}
b := s.endBlock()
b.Kind = ssa.BlockIf
@@ -5105,12 +5116,12 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
s.startBlock(bNext)
// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
- if spectreIndex {
+ if base.Flag.Cfg.SpectreIndex {
op := ssa.OpSpectreIndex
if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
op = ssa.OpSpectreSliceIndex
}
- idx = s.newValue2(op, types.Types[TINT], idx, len)
+ idx = s.newValue2(op, types.Types[types.TINT], idx, len)
}
return idx
@@ -5124,7 +5135,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
- pos := Ctxt.PosTable.Pos(line)
+ pos := base.Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
@@ -5140,7 +5151,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
s.startBlock(bNext)
}
-func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
@@ -5150,10 +5161,10 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
}
if needcheck {
// do a size-appropriate check for zero
- cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
+ cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
s.check(cmp, panicdivide)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
@@ -5163,7 +5174,7 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
- off := Ctxt.FixedFrameSize()
+ off := base.Ctxt.FixedFrameSize()
testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
var ACArgs []ssa.Param
var ACResults []ssa.Param
@@ -5199,10 +5210,10 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
if !returns {
@@ -5210,7 +5221,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
- call.AuxInt = off - Ctxt.FixedFrameSize()
+ call.AuxInt = off - base.Ctxt.FixedFrameSize()
if len(results) > 0 {
s.Fatalf("panic call can't have results")
}
@@ -5252,7 +5263,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask,
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
- s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
+ s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
}
@@ -5281,24 +5292,24 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
if skip&skipLen != 0 {
return
}
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.store(types.Types[TINT], lenAddr, len)
+ s.store(types.Types[types.TINT], lenAddr, len)
case t.IsSlice():
if skip&skipLen == 0 {
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.store(types.Types[TINT], lenAddr, len)
+ s.store(types.Types[types.TINT], lenAddr, len)
}
if skip&skipCap == 0 {
- cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
+ cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
- s.store(types.Types[TINT], capAddr, cap)
+ s.store(types.Types[types.TINT], capAddr, cap)
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
- s.store(types.Types[TUINTPTR], left, itab)
+ s.store(types.Types[types.TUINTPTR], left, itab)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
@@ -5359,7 +5370,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
-func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
+func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
if !canSSAType(t) {
@@ -5373,7 +5384,7 @@ func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bo
return ssa.Param{Type: t, Offset: int32(off)}, a
}
-func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
@@ -5402,11 +5413,11 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
switch {
case t.IsSlice():
ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
- len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
- cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
+ len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+ cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
case t.IsString():
- ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[TUINT8]), v)
- len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
+ ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
+ len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
@@ -5414,7 +5425,7 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
}
s.nilCheck(v)
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
- len = s.constInt(types.Types[TINT], t.Elem().NumElem())
+ len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
@@ -5422,7 +5433,7 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
// Set default values
if i == nil {
- i = s.constInt(types.Types[TINT], 0)
+ i = s.constInt(types.Types[types.TINT], 0)
}
if j == nil {
j = len
@@ -5460,18 +5471,18 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
}
// Word-sized integer operations.
- subOp := s.ssaOp(OSUB, types.Types[TINT])
- mulOp := s.ssaOp(OMUL, types.Types[TINT])
- andOp := s.ssaOp(OAND, types.Types[TINT])
+ subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
+ mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
+ andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
// Calculate the length (rlen) and capacity (rcap) of the new slice.
// For strings the capacity of the result is unimportant. However,
// we use rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
- rlen := s.newValue2(subOp, types.Types[TINT], j, i)
+ rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
rcap := rlen
if j != k && !t.IsString() {
- rcap = s.newValue2(subOp, types.Types[TINT], k, i)
+ rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
}
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
@@ -5493,15 +5504,15 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
//
// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
// of the element type.
- stride := s.constInt(types.Types[TINT], ptr.Type.Elem().Width)
+ stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width)
// The delta is the number of bytes to offset ptr by.
- delta := s.newValue2(mulOp, types.Types[TINT], i, stride)
+ delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
- mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
- delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
+ mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
+ delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
// Compute rptr = ptr + delta.
rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
@@ -5534,15 +5545,15 @@ var u64_f32 = u642fcvtTab{
one: (*state).constInt64,
}
-func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
-func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
@@ -5568,7 +5579,7 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
- cmp := s.newValue2(cvttab.leq, types.Types[TBOOL], s.zeroVal(ft), x)
+ cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5598,7 +5609,7 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
type u322fcvtTab struct {
@@ -5615,21 +5626,21 @@ var u32_f32 = u322fcvtTab{
cvtF2F: ssa.OpCvt64Fto32F,
}
-func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
-func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
- cmp := s.newValue2(ssa.OpLeq32, types.Types[TBOOL], s.zeroVal(ft), x)
+ cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5648,9 +5659,9 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
- twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
- a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
+ a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
+ twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
+ a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
@@ -5658,12 +5669,12 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
-func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
- if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
+func (s *state) referenceTypeBuiltin(n ir.Node, x *ssa.Value) *ssa.Value {
+ if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
@@ -5674,9 +5685,9 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
// // cap
// return *(((*int)n)+1)
// }
- lenType := n.Type
- nilValue := s.constNil(types.Types[TUINTPTR])
- cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
+ lenType := n.Type()
+ nilValue := s.constNil(types.Types[types.TUINTPTR])
+ cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5695,11 +5706,11 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- switch n.Op {
- case OLEN:
+ switch n.Op() {
+ case ir.OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.load(lenType, x)
- case OCAP:
+ case ir.OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.load(lenType, sw)
@@ -5760,22 +5771,22 @@ var f64_u32 = f2uCvtTab{
cutoff: 1 << 31,
}
-func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
-func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
-func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
-func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
@@ -5785,7 +5796,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *ty
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
- cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
+ cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5813,31 +5824,31 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *ty
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
-func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
- iface := s.expr(n.Left) // input interface
- target := s.expr(n.Right) // target type
+func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.Left()) // input interface
+ target := s.expr(n.Right()) // target type
byteptr := s.f.Config.Types.BytePtr
- if n.Type.IsInterface() {
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsInterface() {
+ if n.Type().IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
- cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
- if n.Left.Type.IsEmptyInterface() && commaok {
+ if n.Left().Type().IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
@@ -5859,15 +5870,15 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
typ := s.load(byteptr, off)
- idata := s.newValue1(ssa.OpIData, n.Type, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
+ idata := s.newValue1(ssa.OpIData, n.Type(), iface)
+ res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
return
}
@@ -5875,12 +5886,12 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
- s.vars[&typVar] = s.load(byteptr, off)
+ s.vars[typVar] = s.load(byteptr, off)
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
- s.vars[&typVar] = itab
+ s.vars[typVar] = itab
s.endBlock()
// Merge point.
@@ -5888,60 +5899,60 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
- idata := s.newValue1(ssa.OpIData, n.Type, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
+ idata := s.newValue1(ssa.OpIData, n.Type(), iface)
+ res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
resok = cond
- delete(s.vars, &typVar)
+ delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion not inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion not inlined")
}
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
if commaok {
- call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
if commaok {
- call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Converting to a concrete type.
- direct := isdirectiface(n.Type)
+ direct := isdirectiface(n.Type())
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
var targetITab *ssa.Value
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
- targetITab = s.expr(n.List.First())
+ targetITab = s.expr(n.List().First())
}
- var tmp *Node // temporary for use with large types
+ var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
- if commaok && !canSSAType(n.Type) {
+ if commaok && !canSSAType(n.Type()) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
- tmp = tempAt(n.Pos, s.curfn, n.Type)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
+ tmp = tempAt(n.Pos(), s.curfn, n.Type())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
addr = s.addr(tmp)
}
- cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
@@ -5955,8 +5966,8 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
- taddr := s.expr(n.Right.Right)
- if n.Left.Type.IsEmptyInterface() {
+ taddr := s.expr(n.Right().Right())
+ if n.Left().Type().IsEmptyInterface() {
s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
@@ -5965,10 +5976,10 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// on success, return data from interface
s.startBlock(bOk)
if direct {
- return s.newValue1(ssa.OpIData, n.Type, iface), nil
+ return s.newValue1(ssa.OpIData, n.Type(), iface), nil
}
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- return s.load(n.Type, p), nil
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ return s.load(n.Type(), p), nil
}
// commaok is the more complicated case because we have
@@ -5976,52 +5987,52 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
- valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
+ valVar := ssaMarker("val")
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
- s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
+ s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.vars[valVar] = s.load(n.Type, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.vars[valVar] = s.load(n.Type(), p)
}
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.move(n.Type, addr, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.move(n.Type(), addr, p)
}
- s.vars[&okVar] = s.constBool(true)
+ s.vars[okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
- s.vars[valVar] = s.zeroVal(n.Type)
+ s.vars[valVar] = s.zeroVal(n.Type())
} else {
- s.zero(n.Type, addr)
+ s.zero(n.Type(), addr)
}
- s.vars[&okVar] = s.constBool(false)
+ s.vars[okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
- res = s.variable(valVar, n.Type)
+ res = s.variable(valVar, n.Type())
delete(s.vars, valVar)
} else {
- res = s.load(n.Type, addr)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
+ res = s.load(n.Type(), addr)
+ s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
}
- resok = s.variable(&okVar, types.Types[TBOOL])
- delete(s.vars, &okVar)
+ resok = s.variable(okVar, types.Types[types.TBOOL])
+ delete(s.vars, okVar)
return res, resok
}
// variable returns the value of a variable at the current location.
-func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
+func (s *state) variable(name ir.Node, t *types.Type) *ssa.Value {
v := s.vars[name]
if v != nil {
return v
@@ -6044,27 +6055,27 @@ func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
}
func (s *state) mem() *ssa.Value {
- return s.variable(&memVar, types.TypeMem)
+ return s.variable(memVar, types.TypeMem)
}
-func (s *state) addNamedValue(n *Node, v *ssa.Value) {
- if n.Class() == Pxxx {
- // Don't track our dummy nodes (&memVar etc.).
+func (s *state) addNamedValue(n ir.Node, v *ssa.Value) {
+ if n.Class() == ir.Pxxx {
+ // Don't track our marker nodes (memVar etc.).
return
}
- if n.IsAutoTmp() {
+ if ir.IsAutoTmp(n) {
// Don't track temporary variables.
return
}
- if n.Class() == PPARAMOUT {
+ if n.Class() == ir.PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
- if n.Class() == PAUTO && n.Xoffset != 0 {
- s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
+ if n.Class() == ir.PAUTO && n.Offset() != 0 {
+ s.Fatalf("AUTO var with offset %v %d", n, n.Offset())
}
- loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
+ loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
@@ -6100,7 +6111,7 @@ type SSAGenState struct {
bstart []*obj.Prog
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
- ScratchFpMem *Node
+ ScratchFpMem ir.Node
maxarg int64 // largest frame size for arguments to calls made by the function
@@ -6183,16 +6194,16 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
}
// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
-type byXoffset []*Node
+type byXoffset []ir.Node
func (s byXoffset) Len() int { return len(s) }
-func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
+func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
- var vars []*Node
- for _, n := range e.curfn.Func.Dcl {
- if livenessShouldTrack(n) && n.Name.Addrtaken() {
+ var vars []ir.Node
+ for _, n := range e.curfn.Func().Dcl {
+ if livenessShouldTrack(n) && n.Name().Addrtaken() {
vars = append(vars, n)
}
}
@@ -6205,18 +6216,18 @@ func emitStackObjects(e *ssafn, pp *Progs) {
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.Func.lsym.Func().StackObjects
+ x := e.curfn.Func().LSym.Func().StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
- off = duintptr(x, off, uint64(v.Xoffset))
- if !typesym(v.Type).Siggen() {
- e.Fatalf(v.Pos, "stack object's type symbol not generated for type %s", v.Type)
+ off = duintptr(x, off, uint64(v.Offset()))
+ if !typesym(v.Type()).Siggen() {
+ e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
- off = dsymptr(x, off, dtypesym(v.Type), 0)
+ off = dsymptr(x, off, dtypesym(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
@@ -6226,9 +6237,9 @@ func emitStackObjects(e *ssafn, pp *Progs) {
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
- if debuglive != 0 {
+ if base.Flag.Live != 0 {
for _, v := range vars {
- Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
+ base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String())
}
}
}
@@ -6242,7 +6253,7 @@ func genssa(f *ssa.Func, pp *Progs) {
s.livenessMap = liveness(e, f, pp)
emitStackObjects(e, pp)
- openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo
+ openDeferInfo := e.curfn.Func().LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
@@ -6268,7 +6279,7 @@ func genssa(f *ssa.Func, pp *Progs) {
s.ScratchFpMem = e.scratchFpMem
- if Ctxt.Flag_locationlists {
+ if base.Ctxt.Flag_locationlists {
if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
}
@@ -6364,7 +6375,7 @@ func genssa(f *ssa.Func, pp *Progs) {
thearch.SSAGenValue(&s, v)
}
- if Ctxt.Flag_locationlists {
+ if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.next
}
@@ -6388,7 +6399,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
// Emit control flow instructions for block
var next *ssa.Block
- if i < len(f.Blocks)-1 && Debug.N == 0 {
+ if i < len(f.Blocks)-1 && base.Flag.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
@@ -6447,7 +6458,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m])
+ pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
@@ -6459,18 +6470,19 @@ func genssa(f *ssa.Func, pp *Progs) {
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p])
+ pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
- if Ctxt.Flag_locationlists {
- e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset)
+ if base.Ctxt.Flag_locationlists {
+ debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset)
+ e.curfn.Func().DebugInfo = debugInfo
bstart := s.bstart
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
// be done later.
- e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
+ debugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
if b == f.Entry.ID {
@@ -6479,7 +6491,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
- return e.curfn.Func.lsym.Size
+ return e.curfn.Func().LSym.Size
default:
return valueToProgAfter[v].Pc
}
@@ -6563,7 +6575,7 @@ func defframe(s *SSAGenState, e *ssafn) {
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
- pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
+ pp.Text.To.Val = int32(Rnd(e.curfn.Type().ArgWidth(), int64(Widthreg)))
pp.Text.To.Offset = frame
// Insert code to zero ambiguously live variables so that the
@@ -6577,20 +6589,20 @@ func defframe(s *SSAGenState, e *ssafn) {
var state uint32
// Iterate through declarations. They are sorted in decreasing Xoffset order.
- for _, n := range e.curfn.Func.Dcl {
- if !n.Name.Needzero() {
+ for _, n := range e.curfn.Func().Dcl {
+ if !n.Name().Needzero() {
continue
}
- if n.Class() != PAUTO {
- e.Fatalf(n.Pos, "needzero class %d", n.Class())
+ if n.Class() != ir.PAUTO {
+ e.Fatalf(n.Pos(), "needzero class %d", n.Class())
}
- if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
- e.Fatalf(n.Pos, "var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
+ if n.Type().Size()%int64(Widthptr) != 0 || n.Offset()%int64(Widthptr) != 0 || n.Type().Size() == 0 {
+ e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset())
}
- if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
+ if lo != hi && n.Offset()+n.Type().Size() >= lo-int64(2*Widthreg) {
// Merge with range we already have.
- lo = n.Xoffset
+ lo = n.Offset()
continue
}
@@ -6598,8 +6610,8 @@ func defframe(s *SSAGenState, e *ssafn) {
p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
- lo = n.Xoffset
- hi = lo + n.Type.Size()
+ lo = n.Offset()
+ hi = lo + n.Type().Size()
}
// Zero final range.
@@ -6665,16 +6677,16 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
- case *Node:
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ case ir.Node:
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
- a.Sym = n.Orig.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = n.Orig().Sym().Linksym()
+ a.Offset += n.Offset()
break
}
a.Name = obj.NAME_AUTO
- a.Sym = n.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = n.Sym().Linksym()
+ a.Offset += n.Offset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
@@ -6692,17 +6704,17 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
// high word and branch to out-of-bounds failure if it is not 0.
var lo *ssa.Value
if idx.Type.IsSigned() {
- lo = s.newValue1(ssa.OpInt64Lo, types.Types[TINT], idx)
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
} else {
- lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
}
- if bounded || Debug.B != 0 {
+ if bounded || base.Flag.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
- hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], idx)
- cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
+ hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
+ cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
@@ -6771,7 +6783,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
s.Fatalf("bad unsigned index extension %s", idx.Type)
}
}
- return s.newValue1(op, types.Types[TINT], idx)
+ return s.newValue1(op, types.Types[types.TINT], idx)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
@@ -6798,27 +6810,27 @@ func CheckLoweredPhi(v *ssa.Value) {
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block || entry.Values[0] != v {
- Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
-func AutoVar(v *ssa.Value) (*Node, int64) {
+func AutoVar(v *ssa.Value) (ir.Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
- return loc.N.(*Node), loc.Off
+ return loc.N, loc.Off
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := AutoVar(v)
a.Type = obj.TYPE_MEM
- a.Sym = n.Sym.Linksym()
+ a.Sym = n.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = n.Xoffset + off
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ a.Offset = n.Offset() + off
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
@@ -6831,9 +6843,9 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) {
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
- a.Sym = s.ScratchFpMem.Sym.Linksym()
+ a.Sym = s.ScratchFpMem.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = s.ScratchFpMem.Xoffset
+ a.Offset = s.ScratchFpMem.Offset()
}
// Call returns a new CALL instruction for the SSA value v.
@@ -6860,7 +6872,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
- Fatalf("unknown indirect call family")
+ base.Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
@@ -6875,7 +6887,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) {
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
- Fatalf("missing stack map index for %v", v.LongString())
+ base.Fatalf("missing stack map index for %v", v.LongString())
}
}
@@ -6915,9 +6927,9 @@ func (s *SSAGenState) UseArgs(n int64) {
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
-func fieldIdx(n *Node) int {
- t := n.Left.Type
- f := n.Sym
+func fieldIdx(n ir.Node) int {
+ t := n.Left().Type()
+ f := n.Sym()
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
@@ -6928,7 +6940,7 @@ func fieldIdx(n *Node) int {
i++
continue
}
- if t1.Offset != n.Xoffset {
+ if t1.Offset != n.Offset() {
panic("field offset doesn't match")
}
return i
@@ -6942,9 +6954,9 @@ func fieldIdx(n *Node) int {
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
- curfn *Node
+ curfn ir.Node
strings map[string]*obj.LSym // map from constant string to data symbols
- scratchFpMem *Node // temp for floating point register / memory moves on some architectures
+ scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
@@ -6959,19 +6971,19 @@ func (e *ssafn) StringData(s string) *obj.LSym {
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
- data := stringsym(e.curfn.Pos, s)
+ data := stringsym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) ir.Node {
n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- ptrType := types.NewPtr(types.Types[TUINT8])
- lenType := types.Types[TINT]
+ ptrType := types.NewPtr(types.Types[types.TUINT8])
+ lenType := types.Types[types.TINT]
// Split this string up into two separate variables.
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
@@ -6979,12 +6991,12 @@ func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
- u := types.Types[TUINTPTR]
- t := types.NewPtr(types.Types[TUINT8])
+ n := name.N
+ u := types.Types[types.TUINTPTR]
+ t := types.NewPtr(types.Types[types.TUINT8])
// Split this interface up into two separate variables.
f := ".itab"
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsEmptyInterface() {
f = ".type"
}
c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
@@ -6994,7 +7006,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
ptrType := types.NewPtr(name.Type.Elem())
- lenType := types.Types[TINT]
+ lenType := types.Types[types.TINT]
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
@@ -7005,9 +7017,9 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
- t = types.Types[TFLOAT64]
+ t = types.Types[types.TFLOAT64]
} else {
- t = types.Types[TFLOAT32]
+ t = types.Types[types.TFLOAT32]
}
r := e.SplitSlot(&name, ".real", 0, t)
i := e.SplitSlot(&name, ".imag", t.Size(), t)
@@ -7017,14 +7029,14 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
var t *types.Type
if name.Type.IsSigned() {
- t = types.Types[TINT32]
+ t = types.Types[types.TINT32]
} else {
- t = types.Types[TUINT32]
+ t = types.Types[types.TUINT32]
}
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
+ return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32])
}
- return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
+ return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32])
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
@@ -7036,10 +7048,10 @@ func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
- n := name.N.(*Node)
+ n := name.N
at := name.Type
if at.NumElem() != 1 {
- e.Fatalf(n.Pos, "bad array size")
+ e.Fatalf(n.Pos(), "bad array size")
}
et := at.Elem()
return e.SplitSlot(&name, "[0]", 0, et)
@@ -7051,30 +7063,22 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
- node := parent.N.(*Node)
+ node := parent.N
- if node.Class() != PAUTO || node.Name.Addrtaken() {
+ if node.Class() != ir.PAUTO || node.Name().Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
- s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
-
- n := &Node{
- Name: new(Name),
- Op: ONAME,
- Pos: parent.N.(*Node).Pos,
- }
- n.Orig = n
-
- s.Def = asTypesNode(n)
- asNode(s.Def).Name.SetUsed(true)
- n.Sym = s
- n.Type = t
- n.SetClass(PAUTO)
- n.Esc = EscNever
- n.Name.Curfn = e.curfn
- e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
+ s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: ir.LocalPkg}
+ n := ir.NewNameAt(parent.N.Pos(), s)
+ s.Def = n
+ ir.AsNode(s.Def).Name().SetUsed(true)
+ n.SetType(t)
+ n.SetClass(ir.PAUTO)
+ n.SetEsc(EscNever)
+ n.Name().Curfn = e.curfn
+ e.curfn.Func().Dcl = append(e.curfn.Func().Dcl, n)
dowidth(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
@@ -7084,7 +7088,7 @@ func (e *ssafn) CanSSA(t *types.Type) bool {
}
func (e *ssafn) Line(pos src.XPos) string {
- return linestr(pos)
+ return base.FmtPos(pos)
}
// Log logs a message from the compiler.
@@ -7100,23 +7104,23 @@ func (e *ssafn) Log() bool {
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
- lineno = pos
- nargs := append([]interface{}{e.curfn.funcname()}, args...)
- Fatalf("'%s': "+msg, nargs...)
+ base.Pos = pos
+ nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
+ base.Fatalf("'%s': "+msg, nargs...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
- Warnl(pos, fmt_, args...)
+ base.WarnfAt(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
- return Debug_checknil != 0
+ return base.Debug.Nil != 0
}
func (e *ssafn) UseWriteBarrier() bool {
- return use_writebarrier
+ return base.Flag.WB
}
func (e *ssafn) Syslook(name string) *obj.LSym {
@@ -7137,36 +7141,19 @@ func (e *ssafn) Syslook(name string) *obj.LSym {
}
func (e *ssafn) SetWBPos(pos src.XPos) {
- e.curfn.Func.setWBPos(pos)
+ e.curfn.Func().SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
- return myimportpath
-}
-
-func (n *Node) Typ() *types.Type {
- return n.Type
-}
-func (n *Node) StorageClass() ssa.StorageClass {
- switch n.Class() {
- case PPARAM:
- return ssa.ClassParam
- case PPARAMOUT:
- return ssa.ClassParamOut
- case PAUTO:
- return ssa.ClassAuto
- default:
- Fatalf("untranslatable storage class for %v: %s", n, n.Class())
- return 0
- }
+ return base.Ctxt.Pkgpath
}
-func clobberBase(n *Node) *Node {
- if n.Op == ODOT && n.Left.Type.NumFields() == 1 {
- return clobberBase(n.Left)
+func clobberBase(n ir.Node) ir.Node {
+ if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 {
+ return clobberBase(n.Left())
}
- if n.Op == OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 {
- return clobberBase(n.Left)
+ if n.Op() == ir.OINDEX && n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 {
+ return clobberBase(n.Left())
}
return n
}