aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/gc/walk.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal/gc/walk.go')
-rw-r--r--src/cmd/compile/internal/gc/walk.go2745
1 files changed, 1374 insertions, 1371 deletions
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index a7b6e7fcb3..db8791ee05 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -5,12 +5,16 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/sys"
"encoding/binary"
"fmt"
+ "go/constant"
+ "go/token"
"strings"
)
@@ -18,79 +22,80 @@ import (
const tmpstringbufsize = 32
const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-func walk(fn *Node) {
+func walk(fn ir.Node) {
Curfn = fn
+ errorsBefore := base.Errors()
- if Debug.W != 0 {
- s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Nbody)
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("\nbefore walk %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Body())
}
- lno := lineno
+ lno := base.Pos
// Final typecheck for any unused variables.
- for i, ln := range fn.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
+ for i, ln := range fn.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) {
ln = typecheck(ln, ctxExpr|ctxAssign)
- fn.Func.Dcl[i] = ln
+ fn.Func().Dcl[i] = ln
}
}
// Propagate the used flag for typeswitch variables up to the NONAME in its definition.
- for _, ln := range fn.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
- ln.Name.Defn.Left.Name.SetUsed(true)
+ for _, ln := range fn.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name().Defn != nil && ln.Name().Defn.Op() == ir.OTYPESW && ln.Name().Used() {
+ ln.Name().Defn.Left().Name().SetUsed(true)
}
}
- for _, ln := range fn.Func.Dcl {
- if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
+ for _, ln := range fn.Func().Dcl {
+ if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Name().Used() {
continue
}
- if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
- if defn.Left.Name.Used() {
+ if defn := ln.Name().Defn; defn != nil && defn.Op() == ir.OTYPESW {
+ if defn.Left().Name().Used() {
continue
}
- yyerrorl(defn.Left.Pos, "%v declared but not used", ln.Sym)
- defn.Left.Name.SetUsed(true) // suppress repeats
+ base.ErrorfAt(defn.Left().Pos(), "%v declared but not used", ln.Sym())
+ defn.Left().Name().SetUsed(true) // suppress repeats
} else {
- yyerrorl(ln.Pos, "%v declared but not used", ln.Sym)
+ base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
}
}
- lineno = lno
- if nerrors != 0 {
+ base.Pos = lno
+ if base.Errors() > errorsBefore {
return
}
- walkstmtlist(Curfn.Nbody.Slice())
- if Debug.W != 0 {
- s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Nbody)
+ walkstmtlist(Curfn.Body().Slice())
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("after walk %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Body())
}
zeroResults()
heapmoves()
- if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
- s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Func.Enter)
+ if base.Flag.W != 0 && Curfn.Func().Enter.Len() > 0 {
+ s := fmt.Sprintf("enter %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Func().Enter)
}
}
-func walkstmtlist(s []*Node) {
+func walkstmtlist(s []ir.Node) {
for i := range s {
s[i] = walkstmt(s[i])
}
}
-func paramoutheap(fn *Node) bool {
- for _, ln := range fn.Func.Dcl {
+func paramoutheap(fn ir.Node) bool {
+ for _, ln := range fn.Func().Dcl {
switch ln.Class() {
- case PPARAMOUT:
- if ln.isParamStackCopy() || ln.Name.Addrtaken() {
+ case ir.PPARAMOUT:
+ if isParamStackCopy(ln) || ln.Name().Addrtaken() {
return true
}
- case PAUTO:
+ case ir.PAUTO:
// stop early - parameters are over
return false
}
@@ -101,237 +106,237 @@ func paramoutheap(fn *Node) bool {
// The result of walkstmt MUST be assigned back to n, e.g.
// n.Left = walkstmt(n.Left)
-func walkstmt(n *Node) *Node {
+func walkstmt(n ir.Node) ir.Node {
if n == nil {
return n
}
setlineno(n)
- walkstmtlist(n.Ninit.Slice())
+ walkstmtlist(n.Init().Slice())
- switch n.Op {
+ switch n.Op() {
default:
- if n.Op == ONAME {
- yyerror("%v is not a top level statement", n.Sym)
+ if n.Op() == ir.ONAME {
+ base.Errorf("%v is not a top level statement", n.Sym())
} else {
- yyerror("%v is not a top level statement", n.Op)
- }
- Dump("nottop", n)
-
- case OAS,
- OASOP,
- OAS2,
- OAS2DOTTYPE,
- OAS2RECV,
- OAS2FUNC,
- OAS2MAPR,
- OCLOSE,
- OCOPY,
- OCALLMETH,
- OCALLINTER,
- OCALL,
- OCALLFUNC,
- ODELETE,
- OSEND,
- OPRINT,
- OPRINTN,
- OPANIC,
- OEMPTY,
- ORECOVER,
- OGETG:
+ base.Errorf("%v is not a top level statement", n.Op())
+ }
+ ir.Dump("nottop", n)
+
+ case ir.OAS,
+ ir.OASOP,
+ ir.OAS2,
+ ir.OAS2DOTTYPE,
+ ir.OAS2RECV,
+ ir.OAS2FUNC,
+ ir.OAS2MAPR,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.OCALLMETH,
+ ir.OCALLINTER,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.ODELETE,
+ ir.OSEND,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OPANIC,
+ ir.OEMPTY,
+ ir.ORECOVER,
+ ir.OGETG:
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- wascopy := n.Op == OCOPY
- init := n.Ninit
- n.Ninit.Set(nil)
+ wascopy := n.Op() == ir.OCOPY
+ init := n.Init()
+ n.PtrInit().Set(nil)
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
- if wascopy && n.Op == OCONVNOP {
- n.Op = OEMPTY // don't leave plain values as statements.
+ if wascopy && n.Op() == ir.OCONVNOP {
+ n.SetOp(ir.OEMPTY) // don't leave plain values as statements.
}
// special case for a receive where we throw away
// the value received.
- case ORECV:
+ case ir.ORECV:
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- init := n.Ninit
- n.Ninit.Set(nil)
+ init := n.Init()
+ n.PtrInit().Set(nil)
- n.Left = walkexpr(n.Left, &init)
- n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
+ n.SetLeft(walkexpr(n.Left(), &init))
+ n = mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil())
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
- case OBREAK,
- OCONTINUE,
- OFALL,
- OGOTO,
- OLABEL,
- ODCLCONST,
- ODCLTYPE,
- OCHECKNIL,
- OVARDEF,
- OVARKILL,
- OVARLIVE:
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OCHECKNIL,
+ ir.OVARDEF,
+ ir.OVARKILL,
+ ir.OVARLIVE:
break
- case ODCL:
- v := n.Left
- if v.Class() == PAUTOHEAP {
- if compiling_runtime {
- yyerror("%v escapes to heap, not allowed in runtime", v)
+ case ir.ODCL:
+ v := n.Left()
+ if v.Class() == ir.PAUTOHEAP {
+ if base.Flag.CompilingRuntime {
+ base.Errorf("%v escapes to heap, not allowed in runtime", v)
}
if prealloc[v] == nil {
- prealloc[v] = callnew(v.Type)
+ prealloc[v] = callnew(v.Type())
}
- nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v])
+ nn := ir.Nod(ir.OAS, v.Name().Param.Heapaddr, prealloc[v])
nn.SetColas(true)
nn = typecheck(nn, ctxStmt)
return walkstmt(nn)
}
- case OBLOCK:
- walkstmtlist(n.List.Slice())
+ case ir.OBLOCK:
+ walkstmtlist(n.List().Slice())
- case OCASE:
- yyerror("case statement out of place")
+ case ir.OCASE:
+ base.Errorf("case statement out of place")
- case ODEFER:
- Curfn.Func.SetHasDefer(true)
- Curfn.Func.numDefers++
- if Curfn.Func.numDefers > maxOpenDefers {
+ case ir.ODEFER:
+ Curfn.Func().SetHasDefer(true)
+ Curfn.Func().NumDefers++
+ if Curfn.Func().NumDefers > maxOpenDefers {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.Func().SetOpenCodedDeferDisallowed(true)
}
- if n.Esc != EscNever {
+ if n.Esc() != EscNever {
// If n.Esc is not EscNever, then this defer occurs in a loop,
// so open-coded defers cannot be used in this function.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.Func().SetOpenCodedDeferDisallowed(true)
}
fallthrough
- case OGO:
- switch n.Left.Op {
- case OPRINT, OPRINTN:
- n.Left = wrapCall(n.Left, &n.Ninit)
-
- case ODELETE:
- if mapfast(n.Left.List.First().Type) == mapslow {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ case ir.OGO:
+ switch n.Left().Op() {
+ case ir.OPRINT, ir.OPRINTN:
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
+
+ case ir.ODELETE:
+ if mapfast(n.Left().List().First().Type()) == mapslow {
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
- case OCOPY:
- n.Left = copyany(n.Left, &n.Ninit, true)
+ case ir.OCOPY:
+ n.SetLeft(copyany(n.Left(), n.PtrInit(), true))
- case OCALLFUNC, OCALLMETH, OCALLINTER:
- if n.Left.Nbody.Len() > 0 {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ if n.Left().Body().Len() > 0 {
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
default:
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
- case OFOR, OFORUNTIL:
- if n.Left != nil {
- walkstmtlist(n.Left.Ninit.Slice())
- init := n.Left.Ninit
- n.Left.Ninit.Set(nil)
- n.Left = walkexpr(n.Left, &init)
- n.Left = addinit(n.Left, init.Slice())
+ case ir.OFOR, ir.OFORUNTIL:
+ if n.Left() != nil {
+ walkstmtlist(n.Left().Init().Slice())
+ init := n.Left().Init()
+ n.Left().PtrInit().Set(nil)
+ n.SetLeft(walkexpr(n.Left(), &init))
+ n.SetLeft(addinit(n.Left(), init.Slice()))
}
- n.Right = walkstmt(n.Right)
- if n.Op == OFORUNTIL {
- walkstmtlist(n.List.Slice())
+ n.SetRight(walkstmt(n.Right()))
+ if n.Op() == ir.OFORUNTIL {
+ walkstmtlist(n.List().Slice())
}
- walkstmtlist(n.Nbody.Slice())
+ walkstmtlist(n.Body().Slice())
- case OIF:
- n.Left = walkexpr(n.Left, &n.Ninit)
- walkstmtlist(n.Nbody.Slice())
- walkstmtlist(n.Rlist.Slice())
+ case ir.OIF:
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
+ walkstmtlist(n.Body().Slice())
+ walkstmtlist(n.Rlist().Slice())
- case ORETURN:
- Curfn.Func.numReturns++
- if n.List.Len() == 0 {
+ case ir.ORETURN:
+ Curfn.Func().NumReturns++
+ if n.List().Len() == 0 {
break
}
- if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
+ if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
- var rl []*Node
+ var rl []ir.Node
- for _, ln := range Curfn.Func.Dcl {
+ for _, ln := range Curfn.Func().Dcl {
cl := ln.Class()
- if cl == PAUTO || cl == PAUTOHEAP {
+ if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
break
}
- if cl == PPARAMOUT {
- if ln.isParamStackCopy() {
- ln = walkexpr(typecheck(nod(ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil)
+ if cl == ir.PPARAMOUT {
+ if isParamStackCopy(ln) {
+ ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Param.Heapaddr, nil), ctxExpr), nil)
}
rl = append(rl, ln)
}
}
- if got, want := n.List.Len(), len(rl); got != want {
+ if got, want := n.List().Len(), len(rl); got != want {
// order should have rewritten multi-value function calls
// with explicit OAS2FUNC nodes.
- Fatalf("expected %v return arguments, have %v", want, got)
+ base.Fatalf("expected %v return arguments, have %v", want, got)
}
// move function calls out, to make reorder3's job easier.
- walkexprlistsafe(n.List.Slice(), &n.Ninit)
+ walkexprlistsafe(n.List().Slice(), n.PtrInit())
- ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
- n.List.Set(reorder3(ll))
+ ll := ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit())
+ n.PtrList().Set(reorder3(ll))
break
}
- walkexprlist(n.List.Slice(), &n.Ninit)
+ walkexprlist(n.List().Slice(), n.PtrInit())
// For each return parameter (lhs), assign the corresponding result (rhs).
- lhs := Curfn.Type.Results()
- rhs := n.List.Slice()
- res := make([]*Node, lhs.NumFields())
+ lhs := Curfn.Type().Results()
+ rhs := n.List().Slice()
+ res := make([]ir.Node, lhs.NumFields())
for i, nl := range lhs.FieldSlice() {
- nname := asNode(nl.Nname)
- if nname.isParamHeapCopy() {
- nname = nname.Name.Param.Stackcopy
+ nname := ir.AsNode(nl.Nname)
+ if isParamHeapCopy(nname) {
+ nname = nname.Name().Param.Stackcopy
}
- a := nod(OAS, nname, rhs[i])
- res[i] = convas(a, &n.Ninit)
+ a := ir.Nod(ir.OAS, nname, rhs[i])
+ res[i] = convas(a, n.PtrInit())
}
- n.List.Set(res)
+ n.PtrList().Set(res)
- case ORETJMP:
+ case ir.ORETJMP:
break
- case OINLMARK:
+ case ir.OINLMARK:
break
- case OSELECT:
+ case ir.OSELECT:
walkselect(n)
- case OSWITCH:
+ case ir.OSWITCH:
walkswitch(n)
- case ORANGE:
+ case ir.ORANGE:
n = walkrange(n)
}
- if n.Op == ONAME {
- Fatalf("walkstmt ended up with name: %+v", n)
+ if n.Op() == ir.ONAME {
+ base.Fatalf("walkstmt ended up with name: %+v", n)
}
return n
}
@@ -341,20 +346,20 @@ func walkstmt(n *Node) *Node {
// the types expressions are calculated.
// compile-time constants are evaluated.
// complex side effects like statements are appended to init
-func walkexprlist(s []*Node, init *Nodes) {
+func walkexprlist(s []ir.Node, init *ir.Nodes) {
for i := range s {
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistsafe(s []*Node, init *Nodes) {
+func walkexprlistsafe(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = safeexpr(n, init)
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistcheap(s []*Node, init *Nodes) {
+func walkexprlistcheap(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = cheapexpr(n, init)
s[i] = walkexpr(s[i], init)
@@ -377,7 +382,7 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
return "convT16", false
case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
return "convT32", false
- case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
+ case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
return "convT64", false
}
if sc := from.SoleComponent(); sc != nil {
@@ -402,205 +407,204 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
return "convT2I", true
}
}
- Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
+ base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
panic("unreachable")
}
// The result of walkexpr MUST be assigned back to n, e.g.
// n.Left = walkexpr(n.Left, init)
-func walkexpr(n *Node, init *Nodes) *Node {
+func walkexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return n
}
// Eagerly checkwidth all expressions for the back end.
- if n.Type != nil && !n.Type.WidthCalculated() {
- switch n.Type.Etype {
- case TBLANK, TNIL, TIDEAL:
+ if n.Type() != nil && !n.Type().WidthCalculated() {
+ switch n.Type().Etype {
+ case types.TBLANK, types.TNIL, types.TIDEAL:
default:
- checkwidth(n.Type)
+ checkwidth(n.Type())
}
}
- if init == &n.Ninit {
+ if init == n.PtrInit() {
// not okay to use n->ninit when walking n,
// because we might replace n with some other node
// and would lose the init list.
- Fatalf("walkexpr init == &n->ninit")
+ base.Fatalf("walkexpr init == &n->ninit")
}
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
lno := setlineno(n)
- if Debug.w > 1 {
- Dump("before walk expr", n)
+ if base.Flag.LowerW > 1 {
+ ir.Dump("before walk expr", n)
}
if n.Typecheck() != 1 {
- Fatalf("missed typecheck: %+v", n)
+ base.Fatalf("missed typecheck: %+v", n)
}
- if n.Type.IsUntyped() {
- Fatalf("expression has untyped type: %+v", n)
+ if n.Type().IsUntyped() {
+ base.Fatalf("expression has untyped type: %+v", n)
}
- if n.Op == ONAME && n.Class() == PAUTOHEAP {
- nn := nod(ODEREF, n.Name.Param.Heapaddr, nil)
+ if n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP {
+ nn := ir.Nod(ir.ODEREF, n.Name().Param.Heapaddr, nil)
nn = typecheck(nn, ctxExpr)
nn = walkexpr(nn, init)
- nn.Left.MarkNonNil()
+ nn.Left().MarkNonNil()
return nn
}
opswitch:
- switch n.Op {
+ switch n.Op() {
default:
- Dump("walk", n)
- Fatalf("walkexpr: switch 1 unknown op %+S", n)
+ ir.Dump("walk", n)
+ base.Fatalf("walkexpr: switch 1 unknown op %+S", n)
- case ONONAME, OEMPTY, OGETG, ONEWOBJ:
+ case ir.ONONAME, ir.OEMPTY, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR:
- case OTYPE, ONAME, OLITERAL:
+ case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL:
// TODO(mdempsky): Just return n; see discussion on CL 38655.
// Perhaps refactor to use Node.mayBeShared for these instead.
// If these return early, make sure to still call
// stringsym for constant strings.
- case ONOT, ONEG, OPLUS, OBITNOT, OREAL, OIMAG, ODOTMETH, ODOTINTER,
- ODEREF, OSPTR, OITAB, OIDATA, OADDR:
- n.Left = walkexpr(n.Left, init)
+ case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.ODOTMETH, ir.ODOTINTER,
+ ir.ODEREF, ir.OSPTR, ir.OITAB, ir.OIDATA, ir.OADDR:
+ n.SetLeft(walkexpr(n.Left(), init))
- case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
- case ODOT, ODOTPTR:
+ case ir.ODOT, ir.ODOTPTR:
usefield(n)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
- case ODOTTYPE, ODOTTYPE2:
- n.Left = walkexpr(n.Left, init)
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n.SetLeft(walkexpr(n.Left(), init))
// Set up interface type addresses for back end.
- n.Right = typename(n.Type)
- if n.Op == ODOTTYPE {
- n.Right.Right = typename(n.Left.Type)
+ n.SetRight(typename(n.Type()))
+ if n.Op() == ir.ODOTTYPE {
+ n.Right().SetRight(typename(n.Left().Type()))
}
- if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
- n.List.Set1(itabname(n.Type, n.Left.Type))
+ if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() {
+ n.PtrList().Set1(itabname(n.Type(), n.Left().Type()))
}
- case OLEN, OCAP:
+ case ir.OLEN, ir.OCAP:
if isRuneCount(n) {
// Replace len([]rune(string)) with runtime.countrunes(string).
- n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[TSTRING]))
+ n = mkcall("countrunes", n.Type(), init, conv(n.Left().Left(), types.Types[types.TSTRING]))
break
}
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
// replace len(*[10]int) with 10.
// delayed until now to preserve side effects.
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
- safeexpr(n.Left, init)
- setintconst(n, t.NumElem())
+ safeexpr(n.Left(), init)
+ n = origIntConst(n, t.NumElem())
n.SetTypecheck(1)
}
- case OCOMPLEX:
+ case ir.OCOMPLEX:
// Use results from call expression as arguments for complex.
- if n.Left == nil && n.Right == nil {
- n.Left = n.List.First()
- n.Right = n.List.Second()
+ if n.Left() == nil && n.Right() == nil {
+ n.SetLeft(n.List().First())
+ n.SetRight(n.List().Second())
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
- case OEQ, ONE, OLT, OLE, OGT, OGE:
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n = walkcompare(n, init)
- case OANDAND, OOROR:
- n.Left = walkexpr(n.Left, init)
+ case ir.OANDAND, ir.OOROR:
+ n.SetLeft(walkexpr(n.Left(), init))
// cannot put side effects from n.Right on init,
// because they cannot run before n.Left is checked.
// save elsewhere and store on the eventual n.Right.
- var ll Nodes
+ var ll ir.Nodes
- n.Right = walkexpr(n.Right, &ll)
- n.Right = addinit(n.Right, ll.Slice())
+ n.SetRight(walkexpr(n.Right(), &ll))
+ n.SetRight(addinit(n.Right(), ll.Slice()))
- case OPRINT, OPRINTN:
+ case ir.OPRINT, ir.OPRINTN:
n = walkprint(n, init)
- case OPANIC:
- n = mkcall("gopanic", nil, init, n.Left)
+ case ir.OPANIC:
+ n = mkcall("gopanic", nil, init, n.Left())
- case ORECOVER:
- n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
+ case ir.ORECOVER:
+ n = mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil))
- case OCLOSUREVAR, OCFUNC:
+ case ir.OCLOSUREVAR, ir.OCFUNC:
- case OCALLINTER, OCALLFUNC, OCALLMETH:
- if n.Op == OCALLINTER {
+ case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
+ if n.Op() == ir.OCALLINTER {
usemethod(n)
markUsedIfaceMethod(n)
}
- if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.OCLOSURE {
// Transform direct call of a closure to call of a normal function.
// transformclosure already did all preparation work.
// Prepend captured variables to argument list.
- n.List.Prepend(n.Left.Func.Enter.Slice()...)
-
- n.Left.Func.Enter.Set(nil)
+ n.PtrList().Prepend(n.Left().Func().ClosureEnter.Slice()...)
+ n.Left().Func().ClosureEnter.Set(nil)
// Replace OCLOSURE with ONAME/PFUNC.
- n.Left = n.Left.Func.Closure.Func.Nname
+ n.SetLeft(n.Left().Func().Nname)
// Update type of OCALLFUNC node.
// Output arguments had not changed, but their offsets could.
- if n.Left.Type.NumResults() == 1 {
- n.Type = n.Left.Type.Results().Field(0).Type
+ if n.Left().Type().NumResults() == 1 {
+ n.SetType(n.Left().Type().Results().Field(0).Type)
} else {
- n.Type = n.Left.Type.Results()
+ n.SetType(n.Left().Type().Results())
}
}
walkCall(n, init)
- case OAS, OASOP:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS, ir.OASOP:
+ init.AppendNodes(n.PtrInit())
// Recognize m[k] = append(m[k], ...) so we can reuse
// the mapassign call.
- mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND
- if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) {
- Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First())
+ mapAppend := n.Left().Op() == ir.OINDEXMAP && n.Right().Op() == ir.OAPPEND
+ if mapAppend && !samesafeexpr(n.Left(), n.Right().List().First()) {
+ base.Fatalf("not same expressions: %v != %v", n.Left(), n.Right().List().First())
}
- n.Left = walkexpr(n.Left, init)
- n.Left = safeexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetLeft(safeexpr(n.Left(), init))
if mapAppend {
- n.Right.List.SetFirst(n.Left)
+ n.Right().List().SetFirst(n.Left())
}
- if n.Op == OASOP {
+ if n.Op() == ir.OASOP {
// Rewrite x op= y into x = x op y.
- n.Right = nod(n.SubOp(), n.Left, n.Right)
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetRight(ir.Nod(n.SubOp(), n.Left(), n.Right()))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- n.Op = OAS
+ n.SetOp(ir.OAS)
n.ResetAux()
}
@@ -608,35 +612,35 @@ opswitch:
break
}
- if n.Right == nil {
+ if n.Right() == nil {
// TODO(austin): Check all "implicit zeroing"
break
}
- if !instrumenting && isZero(n.Right) {
+ if !instrumenting && isZero(n.Right()) {
break
}
- switch n.Right.Op {
+ switch n.Right().Op() {
default:
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(walkexpr(n.Right(), init))
- case ORECV:
+ case ir.ORECV:
// x = <-c; n.Left is x, n.Right.Left is c.
// order.stmt made sure x is addressable.
- n.Right.Left = walkexpr(n.Right.Left, init)
+ n.Right().SetLeft(walkexpr(n.Right().Left(), init))
- n1 := nod(OADDR, n.Left, nil)
- r := n.Right.Left // the channel
- n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
+ n1 := ir.Nod(ir.OADDR, n.Left(), nil)
+ r := n.Right().Left() // the channel
+ n = mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
n = walkexpr(n, init)
break opswitch
- case OAPPEND:
+ case ir.OAPPEND:
// x = append(...)
- r := n.Right
- if r.Type.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
+ r := n.Right()
+ if r.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type().Elem())
}
switch {
case isAppendOfMake(r):
@@ -647,86 +651,86 @@ opswitch:
default:
r = walkappend(r, init, n)
}
- n.Right = r
- if r.Op == OAPPEND {
+ n.SetRight(r)
+ if r.Op() == ir.OAPPEND {
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
- r.Left = typename(r.Type.Elem())
+ r.SetLeft(typename(r.Type().Elem()))
break opswitch
}
// Otherwise, lowered for race detector.
// Treat as ordinary assignment.
}
- if n.Left != nil && n.Right != nil {
+ if n.Left() != nil && n.Right() != nil {
n = convas(n, init)
}
- case OAS2:
- init.AppendNodes(&n.Ninit)
- walkexprlistsafe(n.List.Slice(), init)
- walkexprlistsafe(n.Rlist.Slice(), init)
- ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
+ case ir.OAS2:
+ init.AppendNodes(n.PtrInit())
+ walkexprlistsafe(n.List().Slice(), init)
+ walkexprlistsafe(n.Rlist().Slice(), init)
+ ll := ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init)
ll = reorder3(ll)
n = liststmt(ll)
// a,b,... = fn()
- case OAS2FUNC:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS2FUNC:
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
r = walkexpr(r, init)
if isIntrinsicCall(r) {
- n.Right = r
+ n.SetRight(r)
break
}
init.Append(r)
- ll := ascompatet(n.List, r.Type)
+ ll := ascompatet(n.List(), r.Type())
n = liststmt(ll)
// x, y = <-c
// order.stmt made sure x is addressable or blank.
- case OAS2RECV:
- init.AppendNodes(&n.Ninit)
-
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
- var n1 *Node
- if n.List.First().isBlank() {
+ case ir.OAS2RECV:
+ init.AppendNodes(n.PtrInit())
+
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
+ var n1 ir.Node
+ if ir.IsBlank(n.List().First()) {
n1 = nodnil()
} else {
- n1 = nod(OADDR, n.List.First(), nil)
+ n1 = ir.Nod(ir.OADDR, n.List().First(), nil)
}
- fn := chanfn("chanrecv2", 2, r.Left.Type)
- ok := n.List.Second()
- call := mkcall1(fn, types.Types[TBOOL], init, r.Left, n1)
- n = nod(OAS, ok, call)
+ fn := chanfn("chanrecv2", 2, r.Left().Type())
+ ok := n.List().Second()
+ call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1)
+ n = ir.Nod(ir.OAS, ok, call)
n = typecheck(n, ctxStmt)
// a,b = m[i]
- case OAS2MAPR:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS2MAPR:
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
- r.Right = walkexpr(r.Right, init)
- t := r.Left.Type
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
+ r.SetRight(walkexpr(r.Right(), init))
+ t := r.Left().Type()
fast := mapfast(t)
- var key *Node
+ var key ir.Node
if fast != mapslow {
// fast versions take key by value
- key = r.Right
+ key = r.Right()
} else {
// standard version takes key by reference
// order.expr made sure key is addressable.
- key = nod(OADDR, r.Right, nil)
+ key = ir.Nod(ir.OADDR, r.Right(), nil)
}
// from:
@@ -734,71 +738,71 @@ opswitch:
// to:
// var,b = mapaccess2*(t, m, i)
// a = *var
- a := n.List.First()
+ a := n.List().First()
if w := t.Elem().Width; w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
+ r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key)
} else {
fn := mapfn("mapaccess2_fat", t)
z := zeroaddr(w)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
+ r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
- if ok := n.List.Second(); !ok.isBlank() && ok.Type.IsBoolean() {
- r.Type.Field(1).Type = ok.Type
+ if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+ r.Type().Field(1).Type = ok.Type()
}
- n.Right = r
- n.Op = OAS2FUNC
+ n.SetRight(r)
+ n.SetOp(ir.OAS2FUNC)
// don't generate a = *var if a is _
- if !a.isBlank() {
+ if !ir.IsBlank(a) {
var_ := temp(types.NewPtr(t.Elem()))
var_.SetTypecheck(1)
var_.MarkNonNil() // mapaccess always returns a non-nil pointer
- n.List.SetFirst(var_)
+ n.List().SetFirst(var_)
n = walkexpr(n, init)
init.Append(n)
- n = nod(OAS, a, nod(ODEREF, var_, nil))
+ n = ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil))
}
n = typecheck(n, ctxStmt)
n = walkexpr(n, init)
- case ODELETE:
- init.AppendNodes(&n.Ninit)
- map_ := n.List.First()
- key := n.List.Second()
+ case ir.ODELETE:
+ init.AppendNodes(n.PtrInit())
+ map_ := n.List().First()
+ key := n.List().Second()
map_ = walkexpr(map_, init)
key = walkexpr(key, init)
- t := map_.Type
+ t := map_.Type()
fast := mapfast(t)
if fast == mapslow {
// order.stmt made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = ir.Nod(ir.OADDR, key, nil)
}
n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
- case OAS2DOTTYPE:
- walkexprlistsafe(n.List.Slice(), init)
- n.Right = walkexpr(n.Right, init)
+ case ir.OAS2DOTTYPE:
+ walkexprlistsafe(n.List().Slice(), init)
+ n.SetRight(walkexpr(n.Right(), init))
- case OCONVIFACE:
- n.Left = walkexpr(n.Left, init)
+ case ir.OCONVIFACE:
+ n.SetLeft(walkexpr(n.Left(), init))
- fromType := n.Left.Type
- toType := n.Type
+ fromType := n.Left().Type()
+ toType := n.Type()
- if !fromType.IsInterface() && !Curfn.Func.Nname.isBlank() { // skip unnamed functions (func _())
- markTypeUsedInInterface(fromType, Curfn.Func.lsym)
+ if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func().Nname) { // skip unnamed functions (func _())
+ markTypeUsedInInterface(fromType, Curfn.Func().LSym)
}
// typeword generates the type word of the interface value.
- typeword := func() *Node {
+ typeword := func() ir.Node {
if toType.IsEmptyInterface() {
return typename(fromType)
}
@@ -807,58 +811,58 @@ opswitch:
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
if isdirectiface(fromType) {
- l := nod(OEFACE, typeword(), n.Left)
- l.Type = toType
+ l := ir.Nod(ir.OEFACE, typeword(), n.Left())
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
n = l
break
}
if staticuint64s == nil {
- staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
- staticuint64s.SetClass(PEXTERN)
+ staticuint64s = NewName(Runtimepkg.Lookup("staticuint64s"))
+ staticuint64s.SetClass(ir.PEXTERN)
// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
// individual bytes.
- staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
- zerobase = newname(Runtimepkg.Lookup("zerobase"))
- zerobase.SetClass(PEXTERN)
- zerobase.Type = types.Types[TUINTPTR]
+ staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
+ zerobase = NewName(Runtimepkg.Lookup("zerobase"))
+ zerobase.SetClass(ir.PEXTERN)
+ zerobase.SetType(types.Types[types.TUINTPTR])
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
- var value *Node
+ var value ir.Node
switch {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
- cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
+ cheapexpr(n.Left(), init) // Evaluate n.Left for side-effects. See issue 19246.
value = zerobase
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
// and staticuint64s[n.Left * 8 + 7] on big-endian.
- n.Left = cheapexpr(n.Left, init)
+ n.SetLeft(cheapexpr(n.Left(), init))
// byteindex widens n.Left so that the multiplication doesn't overflow.
- index := nod(OLSH, byteindex(n.Left), nodintconst(3))
+ index := ir.Nod(ir.OLSH, byteindex(n.Left()), nodintconst(3))
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- index = nod(OADD, index, nodintconst(7))
+ index = ir.Nod(ir.OADD, index, nodintconst(7))
}
- value = nod(OINDEX, staticuint64s, index)
+ value = ir.Nod(ir.OINDEX, staticuint64s, index)
value.SetBounded(true)
- case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
+ case n.Left().Class() == ir.PEXTERN && n.Left().Name() != nil && n.Left().Name().Readonly():
// n.Left is a readonly global; use it directly.
- value = n.Left
- case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024:
+ value = n.Left()
+ case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024:
// n.Left does not escape. Use a stack temporary initialized to n.Left.
value = temp(fromType)
- init.Append(typecheck(nod(OAS, value, n.Left), ctxStmt))
+ init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left()), ctxStmt))
}
if value != nil {
// Value is identical to n.Left.
// Construct the interface directly: {type/itab, &value}.
- l := nod(OEFACE, typeword(), typecheck(nod(OADDR, value, nil), ctxExpr))
- l.Type = toType
+ l := ir.Nod(ir.OEFACE, typeword(), typecheck(ir.Nod(ir.OADDR, value, nil), ctxExpr))
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
n = l
break
@@ -873,20 +877,20 @@ opswitch:
if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
// Evaluate the input interface.
c := temp(fromType)
- init.Append(nod(OAS, c, n.Left))
+ init.Append(ir.Nod(ir.OAS, c, n.Left()))
// Get the itab out of the interface.
- tmp := temp(types.NewPtr(types.Types[TUINT8]))
- init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), ctxExpr)))
+ tmp := temp(types.NewPtr(types.Types[types.TUINT8]))
+ init.Append(ir.Nod(ir.OAS, tmp, typecheck(ir.Nod(ir.OITAB, c, nil), ctxExpr)))
// Get the type out of the itab.
- nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), ctxExpr), nil)
- nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
+ nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil)
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, tmp, itabType(tmp)))
init.Append(nif)
// Build the result.
- e := nod(OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[TUINT8])))
- e.Type = toType // assign type manually, typecheck doesn't understand OEFACE.
+ e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
e.SetTypecheck(1)
n = e
break
@@ -901,20 +905,20 @@ opswitch:
fn := syslook(fnname)
dowidth(fromType)
fn = substArgTypes(fn, fromType)
- dowidth(fn.Type)
- call := nod(OCALL, fn, nil)
- call.List.Set1(n.Left)
+ dowidth(fn.Type())
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Set1(n.Left())
call = typecheck(call, ctxExpr)
call = walkexpr(call, init)
call = safeexpr(call, init)
- e := nod(OEFACE, typeword(), call)
- e.Type = toType
+ e := ir.Nod(ir.OEFACE, typeword(), call)
+ e.SetType(toType)
e.SetTypecheck(1)
n = e
break
}
- var tab *Node
+ var tab ir.Node
if fromType.IsInterface() {
// convI2I
tab = typename(toType)
@@ -923,7 +927,7 @@ opswitch:
tab = typeword()
}
- v := n.Left
+ v := n.Left()
if needsaddr {
// Types of large or unknown size are passed by reference.
// Orderexpr arranged for n.Left to be a temporary for all
@@ -932,49 +936,49 @@ opswitch:
// with non-interface cases, is not visible to order.stmt, so we
// have to fall back on allocating a temp here.
if !islvalue(v) {
- v = copyexpr(v, v.Type, init)
+ v = copyexpr(v, v.Type(), init)
}
- v = nod(OADDR, v, nil)
+ v = ir.Nod(ir.OADDR, v, nil)
}
dowidth(fromType)
fn := syslook(fnname)
fn = substArgTypes(fn, fromType, toType)
- dowidth(fn.Type)
- n = nod(OCALL, fn, nil)
- n.List.Set2(tab, v)
+ dowidth(fn.Type())
+ n = ir.Nod(ir.OCALL, fn, nil)
+ n.PtrList().Set2(tab, v)
n = typecheck(n, ctxExpr)
n = walkexpr(n, init)
- case OCONV, OCONVNOP:
- n.Left = walkexpr(n.Left, init)
- if n.Op == OCONVNOP && checkPtr(Curfn, 1) {
- if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
+ case ir.OCONV, ir.OCONVNOP:
+ n.SetLeft(walkexpr(n.Left(), init))
+ if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) {
+ if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T
n = walkCheckPtrAlignment(n, init, nil)
break
}
- if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
+ if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer
n = walkCheckPtrArithmetic(n, init)
break
}
}
- param, result := rtconvfn(n.Left.Type, n.Type)
- if param == Txxx {
+ param, result := rtconvfn(n.Left().Type(), n.Type())
+ if param == types.Txxx {
break
}
- fn := basicnames[param] + "to" + basicnames[result]
- n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
+ fn := ir.BasicTypeNames[param] + "to" + ir.BasicTypeNames[result]
+ n = conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type())
- case ODIV, OMOD:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ case ir.ODIV, ir.OMOD:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// rewrite complex div into function call.
- et := n.Left.Type.Etype
+ et := n.Left().Type().Etype
- if isComplex[et] && n.Op == ODIV {
- t := n.Type
- n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
+ if isComplex[et] && n.Op() == ir.ODIV {
+ t := n.Type()
+ n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128]))
n = conv(n, t)
break
}
@@ -987,21 +991,21 @@ opswitch:
// rewrite 64-bit div and mod on 32-bit architectures.
// TODO: Remove this code once we can introduce
// runtime calls late in SSA processing.
- if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
- if n.Right.Op == OLITERAL {
+ if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) {
+ if n.Right().Op() == ir.OLITERAL {
// Leave div/mod by constant powers of 2 or small 16-bit constants.
// The SSA backend will handle those.
switch et {
- case TINT64:
- c := n.Right.Int64Val()
+ case types.TINT64:
+ c := n.Right().Int64Val()
if c < 0 {
c = -c
}
if c != 0 && c&(c-1) == 0 {
break opswitch
}
- case TUINT64:
- c := uint64(n.Right.Int64Val())
+ case types.TUINT64:
+ c := n.Right().Uint64Val()
if c < 1<<16 {
break opswitch
}
@@ -1011,75 +1015,75 @@ opswitch:
}
}
var fn string
- if et == TINT64 {
+ if et == types.TINT64 {
fn = "int64"
} else {
fn = "uint64"
}
- if n.Op == ODIV {
+ if n.Op() == ir.ODIV {
fn += "div"
} else {
fn += "mod"
}
- n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
+ n = mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et]))
}
- case OINDEX:
- n.Left = walkexpr(n.Left, init)
+ case ir.OINDEX:
+ n.SetLeft(walkexpr(n.Left(), init))
// save the original node for bounds checking elision.
// If it was a ODIV/OMOD walk might rewrite it.
- r := n.Right
+ r := n.Right()
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(walkexpr(n.Right(), init))
// if range of type cannot exceed static array bound,
// disable bounds check.
if n.Bounded() {
break
}
- t := n.Left.Type
+ t := n.Left().Type()
if t != nil && t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
- if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
- Warn("index bounds check elided")
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
+ base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
- yyerror("index out of bounds")
+ if smallintconst(n.Right()) && !n.Bounded() {
+ base.Errorf("index out of bounds")
}
- } else if Isconst(n.Left, CTSTR) {
- n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
- if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
- Warn("index bounds check elided")
+ } else if ir.IsConst(n.Left(), constant.String) {
+ n.SetBounded(bounded(r, int64(len(n.Left().StringVal()))))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
+ base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
- yyerror("index out of bounds")
+ if smallintconst(n.Right()) && !n.Bounded() {
+ base.Errorf("index out of bounds")
}
}
- if Isconst(n.Right, CTINT) {
- if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- yyerror("index out of bounds")
+ if ir.IsConst(n.Right(), constant.Int) {
+ if v := n.Right().Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) {
+ base.Errorf("index out of bounds")
}
}
- case OINDEXMAP:
+ case ir.OINDEXMAP:
// Replace m[k] with *map{access1,assign}(maptype, m, &k)
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- map_ := n.Left
- key := n.Right
- t := map_.Type
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ map_ := n.Left()
+ key := n.Right()
+ t := map_.Type()
if n.IndexMapLValue() {
// This m[k] expression is on the left-hand side of an assignment.
fast := mapfast(t)
if fast == mapslow {
// standard version takes key by reference.
// order.expr made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = ir.Nod(ir.OADDR, key, nil)
}
n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
} else {
@@ -1088,7 +1092,7 @@ opswitch:
if fast == mapslow {
// standard version takes key by reference.
// order.expr made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = ir.Nod(ir.OADDR, key, nil)
}
if w := t.Elem().Width; w <= zeroValSize {
@@ -1098,26 +1102,26 @@ opswitch:
n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
}
}
- n.Type = types.NewPtr(t.Elem())
+ n.SetType(types.NewPtr(t.Elem()))
n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
- n = nod(ODEREF, n, nil)
- n.Type = t.Elem()
+ n = ir.Nod(ir.ODEREF, n, nil)
+ n.SetType(t.Elem())
n.SetTypecheck(1)
- case ORECV:
- Fatalf("walkexpr ORECV") // should see inside OAS only
+ case ir.ORECV:
+ base.Fatalf("walkexpr ORECV") // should see inside OAS only
- case OSLICEHEADER:
- n.Left = walkexpr(n.Left, init)
- n.List.SetFirst(walkexpr(n.List.First(), init))
- n.List.SetSecond(walkexpr(n.List.Second(), init))
+ case ir.OSLICEHEADER:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
+ n.List().SetSecond(walkexpr(n.List().Second(), init))
- case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
- checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().Left().Type().IsUnsafePtr()
if checkSlice {
- n.Left.Left = walkexpr(n.Left.Left, init)
+ n.Left().SetLeft(walkexpr(n.Left().Left(), init))
} else {
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
}
low, high, max := n.SliceBounds()
low = walkexpr(low, init)
@@ -1129,15 +1133,15 @@ opswitch:
max = walkexpr(max, init)
n.SetSliceBounds(low, high, max)
if checkSlice {
- n.Left = walkCheckPtrAlignment(n.Left, init, max)
+ n.SetLeft(walkCheckPtrAlignment(n.Left(), init, max))
}
- if n.Op.IsSlice3() {
- if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
+ if n.Op().IsSlice3() {
+ if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.Left()) {
// Reduce x[i:j:cap(x)] to x[i:j].
- if n.Op == OSLICE3 {
- n.Op = OSLICE
+ if n.Op() == ir.OSLICE3 {
+ n.SetOp(ir.OSLICE)
} else {
- n.Op = OSLICEARR
+ n.SetOp(ir.OSLICEARR)
}
n = reduceSlice(n)
}
@@ -1145,84 +1149,84 @@ opswitch:
n = reduceSlice(n)
}
- case ONEW:
- if n.Type.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
+ case ir.ONEW:
+ if n.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
}
- if n.Esc == EscNone {
- if n.Type.Elem().Width >= maxImplicitStackVarSize {
- Fatalf("large ONEW with EscNone: %v", n)
+ if n.Esc() == EscNone {
+ if n.Type().Elem().Width >= maxImplicitStackVarSize {
+ base.Fatalf("large ONEW with EscNone: %v", n)
}
- r := temp(n.Type.Elem())
- r = nod(OAS, r, nil) // zero temp
+ r := temp(n.Type().Elem())
+ r = ir.Nod(ir.OAS, r, nil) // zero temp
r = typecheck(r, ctxStmt)
init.Append(r)
- r = nod(OADDR, r.Left, nil)
+ r = ir.Nod(ir.OADDR, r.Left(), nil)
r = typecheck(r, ctxExpr)
n = r
} else {
- n = callnew(n.Type.Elem())
+ n = callnew(n.Type().Elem())
}
- case OADDSTR:
+ case ir.OADDSTR:
n = addstr(n, init)
- case OAPPEND:
+ case ir.OAPPEND:
// order should make sure we only see OAS(node, OAPPEND), which we handle above.
- Fatalf("append outside assignment")
+ base.Fatalf("append outside assignment")
- case OCOPY:
- n = copyany(n, init, instrumenting && !compiling_runtime)
+ case ir.OCOPY:
+ n = copyany(n, init, instrumenting && !base.Flag.CompilingRuntime)
// cannot use chanfn - closechan takes any, not chan any
- case OCLOSE:
+ case ir.OCLOSE:
fn := syslook("closechan")
- fn = substArgTypes(fn, n.Left.Type)
- n = mkcall1(fn, nil, init, n.Left)
+ fn = substArgTypes(fn, n.Left().Type())
+ n = mkcall1(fn, nil, init, n.Left())
- case OMAKECHAN:
+ case ir.OMAKECHAN:
// When size fits into int, use makechan instead of
// makechan64, which is faster and shorter on 32 bit platforms.
- size := n.Left
+ size := n.Left()
fnname := "makechan64"
- argtype := types.Types[TINT64]
+ argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL size is positive and fits in an int.
// The case of size overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makechan during runtime.
- if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
+ if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makechan"
- argtype = types.Types[TINT]
+ argtype = types.Types[types.TINT]
}
- n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype))
+ n = mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype))
- case OMAKEMAP:
- t := n.Type
+ case ir.OMAKEMAP:
+ t := n.Type()
hmapType := hmap(t)
- hint := n.Left
+ hint := n.Left()
// var h *hmap
- var h *Node
- if n.Esc == EscNone {
+ var h ir.Node
+ if n.Esc() == EscNone {
// Allocate hmap on stack.
// var hv hmap
hv := temp(hmapType)
- zero := nod(OAS, hv, nil)
+ zero := ir.Nod(ir.OAS, hv, nil)
zero = typecheck(zero, ctxStmt)
init.Append(zero)
// h = &hv
- h = nod(OADDR, hv, nil)
+ h = ir.Nod(ir.OADDR, hv, nil)
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
- if !Isconst(hint, CTINT) ||
- hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+ if !ir.IsConst(hint, constant.Int) ||
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
// In case hint is larger than BUCKETSIZE runtime.makemap
// will allocate the buckets on the heap, see #20184
@@ -1233,21 +1237,21 @@ opswitch:
// h.buckets = b
// }
- nif := nod(OIF, nod(OLE, hint, nodintconst(BUCKETSIZE)), nil)
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OLE, hint, nodintconst(BUCKETSIZE)), nil)
nif.SetLikely(true)
// var bv bmap
bv := temp(bmap(t))
- zero = nod(OAS, bv, nil)
- nif.Nbody.Append(zero)
+ zero = ir.Nod(ir.OAS, bv, nil)
+ nif.PtrBody().Append(zero)
// b = &bv
- b := nod(OADDR, bv, nil)
+ b := ir.Nod(ir.OADDR, bv, nil)
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := nod(OAS, nodSym(ODOT, h, bsym), b)
- nif.Nbody.Append(na)
+ na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b)
+ nif.PtrBody().Append(na)
nif = typecheck(nif, ctxStmt)
nif = walkstmt(nif)
@@ -1255,7 +1259,7 @@ opswitch:
}
}
- if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and
@@ -1263,13 +1267,13 @@ opswitch:
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
// and no buckets will be allocated by makemap. Therefore,
// no buckets need to be allocated in this code path.
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
// h.hash0 = fastrand()
- rand := mkcall("fastrand", types.Types[TUINT32], init)
+ rand := mkcall("fastrand", types.Types[types.TUINT32], init)
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
- a := nod(OAS, nodSym(ODOT, h, hashsym), rand)
+ a := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
@@ -1279,10 +1283,10 @@ opswitch:
// hmap on the heap and initialize hmap's hash0 field.
fn := syslook("makemap_small")
fn = substArgTypes(fn, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init)
+ n = mkcall1(fn, n.Type(), init)
}
} else {
- if n.Esc != EscNone {
+ if n.Esc() != EscNone {
h = nodnil()
}
// Map initialization with a variable or large hint is
@@ -1293,42 +1297,42 @@ opswitch:
// When hint fits into int, use makemap instead of
// makemap64, which is faster and shorter on 32 bit platforms.
fnname := "makemap64"
- argtype := types.Types[TINT64]
+ argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL hint is positive and fits in an int.
// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
// The case of hint overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makemap during runtime.
- if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
+ if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makemap"
- argtype = types.Types[TINT]
+ argtype = types.Types[types.TINT]
}
fn := syslook(fnname)
fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h)
+ n = mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h)
}
- case OMAKESLICE:
- l := n.Left
- r := n.Right
+ case ir.OMAKESLICE:
+ l := n.Left()
+ r := n.Right()
if r == nil {
r = safeexpr(l, init)
l = r
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
if why := heapAllocReason(n); why != "" {
- Fatalf("%v has EscNone, but %v", n, why)
+ base.Fatalf("%v has EscNone, but %v", n, why)
}
// var arr [r]T
// n = arr[:l]
i := indexconst(r)
if i < 0 {
- Fatalf("walkexpr: invalid index %v", r)
+ base.Fatalf("walkexpr: invalid index %v", r)
}
// cap is constrained to [0,2^31) or [0,2^63) depending on whether
@@ -1338,21 +1342,21 @@ opswitch:
// if len < 0 { panicmakeslicelen() }
// panicmakeslicecap()
// }
- nif := nod(OIF, nod(OGT, conv(l, types.Types[TUINT64]), nodintconst(i)), nil)
- niflen := nod(OIF, nod(OLT, l, nodintconst(0)), nil)
- niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init))
- nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil)
+ niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil)
+ niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init))
+ nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init))
nif = typecheck(nif, ctxStmt)
init.Append(nif)
t = types.NewArray(t.Elem(), i) // [r]T
var_ := temp(t)
- a := nod(OAS, var_, nil) // zero temp
+ a := ir.Nod(ir.OAS, var_, nil) // zero temp
a = typecheck(a, ctxStmt)
init.Append(a)
- r := nod(OSLICE, var_, nil) // arr[:l]
+ r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l]
r.SetSliceBounds(nil, l, nil)
- r = conv(r, n.Type) // in case n.Type is named.
+ r = conv(r, n.Type()) // in case n.Type is named.
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
n = r
@@ -1364,43 +1368,43 @@ opswitch:
len, cap := l, r
fnname := "makeslice64"
- argtype := types.Types[TINT64]
+ argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makeslice during runtime.
- if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
- (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
+ if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+ (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
fnname = "makeslice"
- argtype = types.Types[TINT]
+ argtype = types.Types[types.TINT]
}
- m := nod(OSLICEHEADER, nil, nil)
- m.Type = t
+ m := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ m.SetType(t)
fn := syslook(fnname)
- m.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
- m.Left.MarkNonNil()
- m.List.Set2(conv(len, types.Types[TINT]), conv(cap, types.Types[TINT]))
+ m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)))
+ m.Left().MarkNonNil()
+ m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT]))
m = typecheck(m, ctxExpr)
m = walkexpr(m, init)
n = m
}
- case OMAKESLICECOPY:
- if n.Esc == EscNone {
- Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+ case ir.OMAKESLICECOPY:
+ if n.Esc() == EscNone {
+ base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- length := conv(n.Left, types.Types[TINT])
- copylen := nod(OLEN, n.Right, nil)
- copyptr := nod(OSPTR, n.Right, nil)
+ length := conv(n.Left(), types.Types[types.TINT])
+ copylen := ir.Nod(ir.OLEN, n.Right(), nil)
+ copyptr := ir.Nod(ir.OSPTR, n.Right(), nil)
if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers:
@@ -1409,25 +1413,25 @@ opswitch:
// We do not check for overflow of len(to)*elem.Width here
// since len(from) is an existing checked slice capacity
// with same elem.Width for the from slice.
- size := nod(OMUL, conv(length, types.Types[TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[TUINTPTR]))
+ size := ir.Nod(ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR]))
// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
fn := syslook("mallocgc")
- sh := nod(OSLICEHEADER, nil, nil)
- sh.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, size, nodnil(), nodbool(false))
- sh.Left.MarkNonNil()
- sh.List.Set2(length, length)
- sh.Type = t
+ sh := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)))
+ sh.Left().MarkNonNil()
+ sh.PtrList().Set2(length, length)
+ sh.SetType(t)
s := temp(t)
- r := typecheck(nod(OAS, s, sh), ctxStmt)
+ r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt)
r = walkexpr(r, init)
init.Append(r)
// instantiate memmove(to *any, frm *any, size uintptr)
fn = syslook("memmove")
fn = substArgTypes(fn, t.Elem(), t.Elem())
- ncopy := mkcall1(fn, nil, init, nod(OSPTR, s, nil), copyptr, size)
+ ncopy := mkcall1(fn, nil, init, ir.Nod(ir.OSPTR, s, nil), copyptr, size)
ncopy = typecheck(ncopy, ctxStmt)
ncopy = walkexpr(ncopy, init)
init.Append(ncopy)
@@ -1436,96 +1440,96 @@ opswitch:
} else { // Replace make+copy with runtime.makeslicecopy.
// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
fn := syslook("makeslicecopy")
- s := nod(OSLICEHEADER, nil, nil)
- s.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[TUNSAFEPTR]))
- s.Left.MarkNonNil()
- s.List.Set2(length, length)
- s.Type = t
+ s := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])))
+ s.Left().MarkNonNil()
+ s.PtrList().Set2(length, length)
+ s.SetType(t)
n = typecheck(s, ctxExpr)
n = walkexpr(n, init)
}
- case ORUNESTR:
+ case ir.ORUNESTR:
a := nodnil()
- if n.Esc == EscNone {
- t := types.NewArray(types.Types[TUINT8], 4)
- a = nod(OADDR, temp(t), nil)
+ if n.Esc() == EscNone {
+ t := types.NewArray(types.Types[types.TUINT8], 4)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
// intstring(*[4]byte, rune)
- n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
+ n = mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64]))
- case OBYTES2STR, ORUNES2STR:
+ case ir.OBYTES2STR, ir.ORUNES2STR:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for string on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
- if n.Op == ORUNES2STR {
+ if n.Op() == ir.ORUNES2STR {
// slicerunetostring(*[32]byte, []rune) string
- n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+ n = mkcall("slicerunetostring", n.Type(), init, a, n.Left())
} else {
// slicebytetostring(*[32]byte, ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := n.Left.backingArrayPtrLen()
- n = mkcall("slicebytetostring", n.Type, init, a, ptr, len)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ n = mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
}
- case OBYTES2STRTMP:
- n.Left = walkexpr(n.Left, init)
+ case ir.OBYTES2STRTMP:
+ n.SetLeft(walkexpr(n.Left(), init))
if !instrumenting {
// Let the backend handle OBYTES2STRTMP directly
// to avoid a function call to slicebytetostringtmp.
break
}
// slicebytetostringtmp(ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := n.Left.backingArrayPtrLen()
- n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ n = mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
- case OSTR2BYTES:
- s := n.Left
- if Isconst(s, CTSTR) {
+ case ir.OSTR2BYTES:
+ s := n.Left()
+ if ir.IsConst(s, constant.String) {
sc := s.StringVal()
// Allocate a [n]byte of the right size.
- t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
- var a *Node
- if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
+ var a ir.Node
+ if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
+ a = ir.Nod(ir.OADDR, temp(t), nil)
} else {
a = callnew(t)
}
p := temp(t.PtrTo()) // *[n]byte
- init.Append(typecheck(nod(OAS, p, a), ctxStmt))
+ init.Append(typecheck(ir.Nod(ir.OAS, p, a), ctxStmt))
// Copy from the static string data to the [n]byte.
if len(sc) > 0 {
- as := nod(OAS,
- nod(ODEREF, p, nil),
- nod(ODEREF, convnop(nod(OSPTR, s, nil), t.PtrTo()), nil))
+ as := ir.Nod(ir.OAS,
+ ir.Nod(ir.ODEREF, p, nil),
+ ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil))
as = typecheck(as, ctxStmt)
as = walkstmt(as)
init.Append(as)
}
// Slice the [n]byte to a []byte.
- n.Op = OSLICEARR
- n.Left = p
+ n.SetOp(ir.OSLICEARR)
+ n.SetLeft(p)
n = walkexpr(n, init)
break
}
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
// stringtoslicebyte(*32[byte], string) []byte
- n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[TSTRING]))
+ n = mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING]))
- case OSTR2BYTESTMP:
+ case ir.OSTR2BYTESTMP:
// []byte(string) conversion that creates a slice
// referring to the actual string bytes.
// This conversion is handled later by the backend and
@@ -1533,43 +1537,43 @@ opswitch:
// that know that the slice won't be mutated.
// The only such case today is:
// for i, c := range []byte(string)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
- case OSTR2RUNES:
+ case ir.OSTR2RUNES:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
// stringtoslicerune(*[32]rune, string) []rune
- n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
+ n = mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING]))
- case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
- if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
+ if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) {
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
- vstat := readonlystaticname(n.Type)
+ vstat := readonlystaticname(n.Type())
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
n = vstat
n = typecheck(n, ctxExpr)
break
}
- var_ := temp(n.Type)
+ var_ := temp(n.Type())
anylit(n, var_, init)
n = var_
- case OSEND:
- n1 := n.Right
- n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
+ case ir.OSEND:
+ n1 := n.Right()
+ n1 = assignconv(n1, n.Left().Type().Elem(), "chan send")
n1 = walkexpr(n1, init)
- n1 = nod(OADDR, n1, nil)
- n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
+ n1 = ir.Nod(ir.OADDR, n1, nil)
+ n = mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1)
- case OCLOSURE:
+ case ir.OCLOSURE:
n = walkclosure(n, init)
- case OCALLPART:
+ case ir.OCALLPART:
n = walkpartialcall(n, init)
}
@@ -1578,27 +1582,27 @@ opswitch:
// constants until walk. For example, if n is y%1 == 0, the
// walk of y%1 may have replaced it by 0.
// Check whether n with its updated args is itself now a constant.
- t := n.Type
- evconst(n)
- if n.Type != t {
- Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
+ t := n.Type()
+ n = evalConst(n)
+ if n.Type() != t {
+ base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type())
}
- if n.Op == OLITERAL {
+ if n.Op() == ir.OLITERAL {
n = typecheck(n, ctxExpr)
// Emit string symbol now to avoid emitting
// any concurrently during the backend.
- if s, ok := n.Val().U.(string); ok {
- _ = stringsym(n.Pos, s)
+ if v := n.Val(); v.Kind() == constant.String {
+ _ = stringsym(n.Pos(), constant.StringVal(v))
}
}
updateHasCall(n)
- if Debug.w != 0 && n != nil {
- Dump("after walk expr", n)
+ if base.Flag.LowerW != 0 && n != nil {
+ ir.Dump("after walk expr", n)
}
- lineno = lno
+ base.Pos = lno
return n
}
@@ -1615,14 +1619,14 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
// markUsedIfaceMethod marks that an interface method is used in the current
// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *Node) {
- ityp := n.Left.Left.Type
+func markUsedIfaceMethod(n ir.Node) {
+ ityp := n.Left().Left().Type()
tsym := typenamesym(ityp).Linksym()
- r := obj.Addrel(Curfn.Func.lsym)
+ r := obj.Addrel(Curfn.Func().LSym)
r.Sym = tsym
// n.Left.Xoffset is the method index * Widthptr (the offset of code pointer
// in itab).
- midx := n.Left.Xoffset / int64(Widthptr)
+ midx := n.Left().Offset() / int64(Widthptr)
r.Add = ifaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
@@ -1634,76 +1638,76 @@ func markUsedIfaceMethod(n *Node) {
// If no such function is necessary, it returns (Txxx, Txxx).
func rtconvfn(src, dst *types.Type) (param, result types.EType) {
if thearch.SoftFloat {
- return Txxx, Txxx
+ return types.Txxx, types.Txxx
}
switch thearch.LinkArch.Family {
case sys.ARM, sys.MIPS:
if src.IsFloat() {
switch dst.Etype {
- case TINT64, TUINT64:
- return TFLOAT64, dst.Etype
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Etype
}
}
if dst.IsFloat() {
switch src.Etype {
- case TINT64, TUINT64:
- return src.Etype, TFLOAT64
+ case types.TINT64, types.TUINT64:
+ return src.Etype, types.TFLOAT64
}
}
case sys.I386:
if src.IsFloat() {
switch dst.Etype {
- case TINT64, TUINT64:
- return TFLOAT64, dst.Etype
- case TUINT32, TUINT, TUINTPTR:
- return TFLOAT64, TUINT32
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Etype
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TFLOAT64, types.TUINT32
}
}
if dst.IsFloat() {
switch src.Etype {
- case TINT64, TUINT64:
- return src.Etype, TFLOAT64
- case TUINT32, TUINT, TUINTPTR:
- return TUINT32, TFLOAT64
+ case types.TINT64, types.TUINT64:
+ return src.Etype, types.TFLOAT64
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TUINT32, types.TFLOAT64
}
}
}
- return Txxx, Txxx
+ return types.Txxx, types.Txxx
}
// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *Node) *Node {
+func reduceSlice(n ir.Node) ir.Node {
low, high, max := n.SliceBounds()
- if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
+ if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) {
// Reduce x[i:len(x)] to x[i:].
high = nil
}
n.SetSliceBounds(low, high, max)
- if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
+ if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil {
// Reduce x[:] to x.
- if Debug_slice > 0 {
- Warn("slice: omit slice operation")
+ if base.Debug.Slice > 0 {
+ base.Warn("slice: omit slice operation")
}
- return n.Left
+ return n.Left()
}
return n
}
-func ascompatee1(l *Node, r *Node, init *Nodes) *Node {
+func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) ir.Node {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
- n := nod(OAS, l, r)
+ n := ir.Nod(ir.OAS, l, r)
- if l.Op == OINDEXMAP {
+ if l.Op() == ir.OINDEXMAP {
return n
}
return convas(n, init)
}
-func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
+func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
// check assign expression list to
// an expression list. called in
// expr-list = expr-list
@@ -1716,14 +1720,14 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
nr[i1] = safeexpr(nr[i1], init)
}
- var nn []*Node
+ var nn []ir.Node
i := 0
for ; i < len(nl); i++ {
if i >= len(nr) {
break
}
// Do not generate 'x = x' during return. See issue 4014.
- if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
+ if op == ir.ORETURN && samesafeexpr(nl[i], nr[i]) {
continue
}
nn = append(nn, ascompatee1(nl[i], nr[i], init))
@@ -1731,20 +1735,20 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
// cannot happen: caller checked that lists had same length
if i < len(nl) || i < len(nr) {
- var nln, nrn Nodes
+ var nln, nrn ir.Nodes
nln.Set(nl)
nrn.Set(nr)
- Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname())
+ base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(Curfn))
}
return nn
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l *Node, rt *types.Type) bool {
- if l.HasCall() || l.Op == OINDEXMAP {
+func fncall(l ir.Node, rt *types.Type) bool {
+ if l.HasCall() || l.Op() == ir.OINDEXMAP {
return true
}
- if types.Identical(l.Type, rt) {
+ if types.Identical(l.Type(), rt) {
return false
}
// There might be a conversion required, which might involve a runtime call.
@@ -1754,14 +1758,14 @@ func fncall(l *Node, rt *types.Type) bool {
// check assign type list to
// an expression list. called in
// expr-list = func()
-func ascompatet(nl Nodes, nr *types.Type) []*Node {
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
if nl.Len() != nr.NumFields() {
- Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
+ base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
}
- var nn, mm Nodes
+ var nn, mm ir.Nodes
for i, l := range nl.Slice() {
- if l.isBlank() {
+ if ir.IsBlank(l) {
continue
}
r := nr.Field(i)
@@ -1771,23 +1775,23 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node {
if fncall(l, r.Type) {
tmp := temp(r.Type)
tmp = typecheck(tmp, ctxExpr)
- a := nod(OAS, l, tmp)
+ a := ir.Nod(ir.OAS, l, tmp)
a = convas(a, &mm)
mm.Append(a)
l = tmp
}
- res := nod(ORESULT, nil, nil)
- res.Xoffset = Ctxt.FixedFrameSize() + r.Offset
- res.Type = r.Type
+ res := ir.Nod(ir.ORESULT, nil, nil)
+ res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset)
+ res.SetType(r.Type)
res.SetTypecheck(1)
- a := nod(OAS, l, res)
+ a := ir.Nod(ir.OAS, l, res)
a = convas(a, &nn)
updateHasCall(a)
if a.HasCall() {
- Dump("ascompatet ucount", a)
- Fatalf("ascompatet: too many function calls evaluating parameters")
+ ir.Dump("ascompatet ucount", a)
+ base.Fatalf("ascompatet: too many function calls evaluating parameters")
}
nn.Append(a)
@@ -1796,28 +1800,28 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node {
}
// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *types.Type, args []*Node) *Node {
- var n *Node
+func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node {
+ var n ir.Node
if len(args) == 0 {
n = nodnil()
- n.Type = typ
+ n.SetType(typ)
} else {
- n = nod(OCOMPLIT, nil, typenod(typ))
- n.List.Append(args...)
+ n = ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
+ n.PtrList().Append(args...)
n.SetImplicit(true)
}
n = typecheck(n, ctxExpr)
- if n.Type == nil {
- Fatalf("mkdotargslice: typecheck failed")
+ if n.Type() == nil {
+ base.Fatalf("mkdotargslice: typecheck failed")
}
return n
}
// fixVariadicCall rewrites calls to variadic functions to use an
// explicit ... argument if one is not already present.
-func fixVariadicCall(call *Node) {
- fntype := call.Left.Type
+func fixVariadicCall(call ir.Node) {
+ fntype := call.Left().Type()
if !fntype.IsVariadic() || call.IsDDD() {
return
}
@@ -1825,33 +1829,33 @@ func fixVariadicCall(call *Node) {
vi := fntype.NumParams() - 1
vt := fntype.Params().Field(vi).Type
- args := call.List.Slice()
+ args := call.List().Slice()
extra := args[vi:]
slice := mkdotargslice(vt, extra)
for i := range extra {
extra[i] = nil // allow GC
}
- call.List.Set(append(args[:vi], slice))
+ call.PtrList().Set(append(args[:vi], slice))
call.SetIsDDD(true)
}
-func walkCall(n *Node, init *Nodes) {
- if n.Rlist.Len() != 0 {
+func walkCall(n ir.Node, init *ir.Nodes) {
+ if n.Rlist().Len() != 0 {
return // already walked
}
- params := n.Left.Type.Params()
- args := n.List.Slice()
+ params := n.Left().Type().Params()
+ args := n.List().Slice()
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
walkexprlist(args, init)
// If this is a method call, add the receiver at the beginning of the args.
- if n.Op == OCALLMETH {
- withRecv := make([]*Node, len(args)+1)
- withRecv[0] = n.Left.Left
- n.Left.Left = nil
+ if n.Op() == ir.OCALLMETH {
+ withRecv := make([]ir.Node, len(args)+1)
+ withRecv[0] = n.Left().Left()
+ n.Left().SetLeft(nil)
copy(withRecv[1:], args)
args = withRecv
}
@@ -1860,14 +1864,14 @@ func walkCall(n *Node, init *Nodes) {
// store that argument into a temporary variable,
// to prevent that calls from clobbering arguments already on the stack.
// When instrumenting, all arguments might require function calls.
- var tempAssigns []*Node
+ var tempAssigns []ir.Node
for i, arg := range args {
updateHasCall(arg)
// Determine param type.
var t *types.Type
- if n.Op == OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
- t = n.Left.Type.Recv().Type
+ t = n.Left().Type().Recv().Type
} else {
t = params.Field(i - 1).Type
}
@@ -1877,7 +1881,7 @@ func walkCall(n *Node, init *Nodes) {
if instrumenting || fncall(arg, t) {
// make assignment of fncall to tempAt
tmp := temp(t)
- a := nod(OAS, tmp, arg)
+ a := ir.Nod(ir.OAS, tmp, arg)
a = convas(a, init)
tempAssigns = append(tempAssigns, a)
// replace arg with temp
@@ -1885,19 +1889,19 @@ func walkCall(n *Node, init *Nodes) {
}
}
- n.List.Set(tempAssigns)
- n.Rlist.Set(args)
+ n.PtrList().Set(tempAssigns)
+ n.PtrRlist().Set(args)
}
// generate code for print
-func walkprint(nn *Node, init *Nodes) *Node {
+func walkprint(nn ir.Node, init *ir.Nodes) ir.Node {
// Hoist all the argument evaluation up before the lock.
- walkexprlistcheap(nn.List.Slice(), init)
+ walkexprlistcheap(nn.List().Slice(), init)
// For println, add " " between elements and "\n" at the end.
- if nn.Op == OPRINTN {
- s := nn.List.Slice()
- t := make([]*Node, 0, len(s)*2)
+ if nn.Op() == ir.OPRINTN {
+ s := nn.List().Slice()
+ t := make([]ir.Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, nodstr(" "))
@@ -1905,15 +1909,15 @@ func walkprint(nn *Node, init *Nodes) *Node {
t = append(t, n)
}
t = append(t, nodstr("\n"))
- nn.List.Set(t)
+ nn.PtrList().Set(t)
}
// Collapse runs of constant strings.
- s := nn.List.Slice()
- t := make([]*Node, 0, len(s))
+ s := nn.List().Slice()
+ t := make([]ir.Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
- for i < len(s) && Isconst(s[i], CTSTR) {
+ for i < len(s) && ir.IsConst(s[i], constant.String) {
strs = append(strs, s[i].StringVal())
i++
}
@@ -1925,74 +1929,75 @@ func walkprint(nn *Node, init *Nodes) *Node {
i++
}
}
- nn.List.Set(t)
+ nn.PtrList().Set(t)
- calls := []*Node{mkcall("printlock", nil, init)}
- for i, n := range nn.List.Slice() {
- if n.Op == OLITERAL {
- switch n.Val().Ctype() {
- case CTRUNE:
+ calls := []ir.Node{mkcall("printlock", nil, init)}
+ for i, n := range nn.List().Slice() {
+ if n.Op() == ir.OLITERAL {
+ if n.Type() == types.UntypedRune {
n = defaultlit(n, types.Runetype)
+ }
- case CTINT:
- n = defaultlit(n, types.Types[TINT64])
+ switch n.Val().Kind() {
+ case constant.Int:
+ n = defaultlit(n, types.Types[types.TINT64])
- case CTFLT:
- n = defaultlit(n, types.Types[TFLOAT64])
+ case constant.Float:
+ n = defaultlit(n, types.Types[types.TFLOAT64])
}
}
- if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
- n = defaultlit(n, types.Types[TINT64])
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Etype == types.TIDEAL {
+ n = defaultlit(n, types.Types[types.TINT64])
}
n = defaultlit(n, nil)
- nn.List.SetIndex(i, n)
- if n.Type == nil || n.Type.Etype == TFORW {
+ nn.List().SetIndex(i, n)
+ if n.Type() == nil || n.Type().Etype == types.TFORW {
continue
}
- var on *Node
- switch n.Type.Etype {
- case TINTER:
- if n.Type.IsEmptyInterface() {
+ var on ir.Node
+ switch n.Type().Etype {
+ case types.TINTER:
+ if n.Type().IsEmptyInterface() {
on = syslook("printeface")
} else {
on = syslook("printiface")
}
- on = substArgTypes(on, n.Type) // any-1
- case TPTR:
- if n.Type.Elem().NotInHeap() {
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TPTR:
+ if n.Type().Elem().NotInHeap() {
on = syslook("printuintptr")
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUNSAFEPTR]
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUINTPTR]
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUNSAFEPTR])
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUINTPTR])
break
}
fallthrough
- case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
+ case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
on = syslook("printpointer")
- on = substArgTypes(on, n.Type) // any-1
- case TSLICE:
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TSLICE:
on = syslook("printslice")
- on = substArgTypes(on, n.Type) // any-1
- case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
- if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" {
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ if isRuntimePkg(n.Type().Sym.Pkg) && n.Type().Sym.Name == "hex" {
on = syslook("printhex")
} else {
on = syslook("printuint")
}
- case TINT, TINT8, TINT16, TINT32, TINT64:
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
on = syslook("printint")
- case TFLOAT32, TFLOAT64:
+ case types.TFLOAT32, types.TFLOAT64:
on = syslook("printfloat")
- case TCOMPLEX64, TCOMPLEX128:
+ case types.TCOMPLEX64, types.TCOMPLEX128:
on = syslook("printcomplex")
- case TBOOL:
+ case types.TBOOL:
on = syslook("printbool")
- case TSTRING:
+ case types.TSTRING:
cs := ""
- if Isconst(n, CTSTR) {
+ if ir.IsConst(n, constant.String) {
cs = n.StringVal()
}
switch cs {
@@ -2004,18 +2009,18 @@ func walkprint(nn *Node, init *Nodes) *Node {
on = syslook("printstring")
}
default:
- badtype(OPRINT, n.Type, nil)
+ badtype(ir.OPRINT, n.Type(), nil)
continue
}
- r := nod(OCALL, on, nil)
- if params := on.Type.Params().FieldSlice(); len(params) > 0 {
+ r := ir.Nod(ir.OCALL, on, nil)
+ if params := on.Type().Params().FieldSlice(); len(params) > 0 {
t := params[0].Type
- if !types.Identical(t, n.Type) {
- n = nod(OCONV, n, nil)
- n.Type = t
+ if !types.Identical(t, n.Type()) {
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(t)
}
- r.List.Append(n)
+ r.PtrList().Append(n)
}
calls = append(calls, r)
}
@@ -2025,17 +2030,17 @@ func walkprint(nn *Node, init *Nodes) *Node {
typecheckslice(calls, ctxStmt)
walkexprlist(calls, init)
- r := nod(OEMPTY, nil, nil)
+ r := ir.Nod(ir.OEMPTY, nil, nil)
r = typecheck(r, ctxStmt)
r = walkexpr(r, init)
- r.Ninit.Set(calls)
+ r.PtrInit().Set(calls)
return r
}
-func callnew(t *types.Type) *Node {
+func callnew(t *types.Type) ir.Node {
dowidth(t)
- n := nod(ONEWOBJ, typename(t), nil)
- n.Type = types.NewPtr(t)
+ n := ir.Nod(ir.ONEWOBJ, typename(t), nil)
+ n.SetType(types.NewPtr(t))
n.SetTypecheck(1)
n.MarkNonNil()
return n
@@ -2043,55 +2048,55 @@ func callnew(t *types.Type) *Node {
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
-func isReflectHeaderDataField(l *Node) bool {
- if l.Type != types.Types[TUINTPTR] {
+func isReflectHeaderDataField(l ir.Node) bool {
+ if l.Type() != types.Types[types.TUINTPTR] {
return false
}
var tsym *types.Sym
- switch l.Op {
- case ODOT:
- tsym = l.Left.Type.Sym
- case ODOTPTR:
- tsym = l.Left.Type.Elem().Sym
+ switch l.Op() {
+ case ir.ODOT:
+ tsym = l.Left().Type().Sym
+ case ir.ODOTPTR:
+ tsym = l.Left().Type().Elem().Sym
default:
return false
}
- if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
+ if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
return false
}
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
-func convas(n *Node, init *Nodes) *Node {
- if n.Op != OAS {
- Fatalf("convas: not OAS %v", n.Op)
+func convas(n ir.Node, init *ir.Nodes) ir.Node {
+ if n.Op() != ir.OAS {
+ base.Fatalf("convas: not OAS %v", n.Op())
}
defer updateHasCall(n)
n.SetTypecheck(1)
- if n.Left == nil || n.Right == nil {
+ if n.Left() == nil || n.Right() == nil {
return n
}
- lt := n.Left.Type
- rt := n.Right.Type
+ lt := n.Left().Type()
+ rt := n.Right().Type()
if lt == nil || rt == nil {
return n
}
- if n.Left.isBlank() {
- n.Right = defaultlit(n.Right, nil)
+ if ir.IsBlank(n.Left()) {
+ n.SetRight(defaultlit(n.Right(), nil))
return n
}
if !types.Identical(lt, rt) {
- n.Right = assignconv(n.Right, lt, "assignment")
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(assignconv(n.Right(), lt, "assignment"))
+ n.SetRight(walkexpr(n.Right(), init))
}
- dowidth(n.Right.Type)
+ dowidth(n.Right().Type())
return n
}
@@ -2102,53 +2107,53 @@ func convas(n *Node, init *Nodes) *Node {
// be later use of an earlier lvalue.
//
// function calls have been removed.
-func reorder3(all []*Node) []*Node {
+func reorder3(all []ir.Node) []ir.Node {
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
- var early []*Node
+ var early []ir.Node
- var mapinit Nodes
+ var mapinit ir.Nodes
for i, n := range all {
- l := n.Left
+ l := n.Left()
// Save subexpressions needed on left side.
// Drill through non-dereferences.
for {
- if l.Op == ODOT || l.Op == OPAREN {
- l = l.Left
+ if l.Op() == ir.ODOT || l.Op() == ir.OPAREN {
+ l = l.Left()
continue
}
- if l.Op == OINDEX && l.Left.Type.IsArray() {
- l.Right = reorder3save(l.Right, all, i, &early)
- l = l.Left
+ if l.Op() == ir.OINDEX && l.Left().Type().IsArray() {
+ l.SetRight(reorder3save(l.Right(), all, i, &early))
+ l = l.Left()
continue
}
break
}
- switch l.Op {
+ switch l.Op() {
default:
- Fatalf("reorder3 unexpected lvalue %#v", l.Op)
+ base.Fatalf("reorder3 unexpected lvalue %#v", l.Op())
- case ONAME:
+ case ir.ONAME:
break
- case OINDEX, OINDEXMAP:
- l.Left = reorder3save(l.Left, all, i, &early)
- l.Right = reorder3save(l.Right, all, i, &early)
- if l.Op == OINDEXMAP {
+ case ir.OINDEX, ir.OINDEXMAP:
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
+ l.SetRight(reorder3save(l.Right(), all, i, &early))
+ if l.Op() == ir.OINDEXMAP {
all[i] = convas(all[i], &mapinit)
}
- case ODEREF, ODOTPTR:
- l.Left = reorder3save(l.Left, all, i, &early)
+ case ir.ODEREF, ir.ODOTPTR:
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
}
// Save expression on right side.
- all[i].Right = reorder3save(all[i].Right, all, i, &early)
+ all[i].SetRight(reorder3save(all[i].Right(), all, i, &early))
}
early = append(mapinit.Slice(), early...)
@@ -2161,31 +2166,31 @@ func reorder3(all []*Node) []*Node {
// replace *np with that temp.
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
+func reorder3save(n ir.Node, all []ir.Node, i int, early *[]ir.Node) ir.Node {
if !aliased(n, all[:i]) {
return n
}
- q := temp(n.Type)
- q = nod(OAS, q, n)
+ q := temp(n.Type())
+ q = ir.Nod(ir.OAS, q, n)
q = typecheck(q, ctxStmt)
*early = append(*early, q)
- return q.Left
+ return q.Left()
}
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
-func outervalue(n *Node) *Node {
+func outervalue(n ir.Node) ir.Node {
for {
- switch n.Op {
- case OXDOT:
- Fatalf("OXDOT in walk")
- case ODOT, OPAREN, OCONVNOP:
- n = n.Left
+ switch n.Op() {
+ case ir.OXDOT:
+ base.Fatalf("OXDOT in walk")
+ case ir.ODOT, ir.OPAREN, ir.OCONVNOP:
+ n = n.Left()
continue
- case OINDEX:
- if n.Left.Type != nil && n.Left.Type.IsArray() {
- n = n.Left
+ case ir.OINDEX:
+ if n.Left().Type() != nil && n.Left().Type().IsArray() {
+ n = n.Left()
continue
}
}
@@ -2196,15 +2201,15 @@ func outervalue(n *Node) *Node {
// Is it possible that the computation of r might be
// affected by assignments in all?
-func aliased(r *Node, all []*Node) bool {
+func aliased(r ir.Node, all []ir.Node) bool {
if r == nil {
return false
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
- for r.Op == ODOT {
- r = r.Left
+ for r.Op() == ir.ODOT {
+ r = r.Left()
}
// Look for obvious aliasing: a variable being assigned
@@ -2215,26 +2220,26 @@ func aliased(r *Node, all []*Node) bool {
memwrite := false
for _, as := range all {
// We can ignore assignments to blank.
- if as.Left.isBlank() {
+ if ir.IsBlank(as.Left()) {
continue
}
- l := outervalue(as.Left)
- if l.Op != ONAME {
+ l := outervalue(as.Left())
+ if l.Op() != ir.ONAME {
memwrite = true
continue
}
switch l.Class() {
default:
- Fatalf("unexpected class: %v, %v", l, l.Class())
+ base.Fatalf("unexpected class: %v, %v", l, l.Class())
- case PAUTOHEAP, PEXTERN:
+ case ir.PAUTOHEAP, ir.PEXTERN:
memwrite = true
continue
- case PAUTO, PPARAM, PPARAMOUT:
- if l.Name.Addrtaken() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ if l.Name().Addrtaken() {
memwrite = true
continue
}
@@ -2270,51 +2275,51 @@ func aliased(r *Node, all []*Node) bool {
// does the evaluation of n only refer to variables
// whose addresses have not been taken?
// (and no other memory)
-func varexpr(n *Node) bool {
+func varexpr(n ir.Node) bool {
if n == nil {
return true
}
- switch n.Op {
- case OLITERAL:
+ switch n.Op() {
+ case ir.OLITERAL, ir.ONIL:
return true
- case ONAME:
+ case ir.ONAME:
switch n.Class() {
- case PAUTO, PPARAM, PPARAMOUT:
- if !n.Name.Addrtaken() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ if !n.Name().Addrtaken() {
return true
}
}
return false
- case OADD,
- OSUB,
- OOR,
- OXOR,
- OMUL,
- ODIV,
- OMOD,
- OLSH,
- ORSH,
- OAND,
- OANDNOT,
- OPLUS,
- ONEG,
- OBITNOT,
- OPAREN,
- OANDAND,
- OOROR,
- OCONV,
- OCONVNOP,
- OCONVIFACE,
- ODOTTYPE:
- return varexpr(n.Left) && varexpr(n.Right)
-
- case ODOT: // but not ODOTPTR
+ case ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OMUL,
+ ir.ODIV,
+ ir.OMOD,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OBITNOT,
+ ir.OPAREN,
+ ir.OANDAND,
+ ir.OOROR,
+ ir.OCONV,
+ ir.OCONVNOP,
+ ir.OCONVIFACE,
+ ir.ODOTTYPE:
+ return varexpr(n.Left()) && varexpr(n.Right())
+
+ case ir.ODOT: // but not ODOTPTR
// Should have been handled in aliased.
- Fatalf("varexpr unexpected ODOT")
+ base.Fatalf("varexpr unexpected ODOT")
}
// Be conservative.
@@ -2322,26 +2327,26 @@ func varexpr(n *Node) bool {
}
// is the name l mentioned in r?
-func vmatch2(l *Node, r *Node) bool {
+func vmatch2(l ir.Node, r ir.Node) bool {
if r == nil {
return false
}
- switch r.Op {
+ switch r.Op() {
// match each right given left
- case ONAME:
+ case ir.ONAME:
return l == r
- case OLITERAL:
+ case ir.OLITERAL, ir.ONIL:
return false
}
- if vmatch2(l, r.Left) {
+ if vmatch2(l, r.Left()) {
return true
}
- if vmatch2(l, r.Right) {
+ if vmatch2(l, r.Right()) {
return true
}
- for _, n := range r.List.Slice() {
+ for _, n := range r.List().Slice() {
if vmatch2(l, n) {
return true
}
@@ -2351,15 +2356,15 @@ func vmatch2(l *Node, r *Node) bool {
// is any name mentioned in l also mentioned in r?
// called by sinit.go
-func vmatch1(l *Node, r *Node) bool {
+func vmatch1(l ir.Node, r ir.Node) bool {
// isolate all left sides
if l == nil || r == nil {
return false
}
- switch l.Op {
- case ONAME:
+ switch l.Op() {
+ case ir.ONAME:
switch l.Class() {
- case PPARAM, PAUTO:
+ case ir.PPARAM, ir.PAUTO:
break
default:
@@ -2372,17 +2377,17 @@ func vmatch1(l *Node, r *Node) bool {
return vmatch2(l, r)
- case OLITERAL:
+ case ir.OLITERAL, ir.ONIL:
return false
}
- if vmatch1(l.Left, r) {
+ if vmatch1(l.Left(), r) {
return true
}
- if vmatch1(l.Right, r) {
+ if vmatch1(l.Right(), r) {
return true
}
- for _, n := range l.List.Slice() {
+ for _, n := range l.List().Slice() {
if vmatch1(n, r) {
return true
}
@@ -2392,21 +2397,21 @@ func vmatch1(l *Node, r *Node) bool {
// paramstoheap returns code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []*Node {
- var nn []*Node
+func paramstoheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
- v := asNode(t.Nname)
- if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
+ v := ir.AsNode(t.Nname)
+ if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
v = nil
}
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
- nn = append(nn, walkstmt(nod(ODCL, v, nil)))
- if stackcopy.Class() == PPARAM {
- nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), ctxStmt)))
+ if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil {
+ nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil)))
+ if stackcopy.Class() == ir.PPARAM {
+ nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt)))
}
}
}
@@ -2422,39 +2427,39 @@ func paramstoheap(params *types.Type) []*Node {
// even allocations to move params/results to the heap.
// The generated code is added to Curfn's Enter list.
func zeroResults() {
- for _, f := range Curfn.Type.Results().Fields().Slice() {
- v := asNode(f.Nname)
- if v != nil && v.Name.Param.Heapaddr != nil {
+ for _, f := range Curfn.Type().Results().Fields().Slice() {
+ v := ir.AsNode(f.Nname)
+ if v != nil && v.Name().Param.Heapaddr != nil {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:livenessepilogue.
continue
}
- if v.isParamHeapCopy() {
+ if isParamHeapCopy(v) {
// TODO(josharian/khr): Investigate whether we can switch to "continue" here,
// and document more in either case.
// In the review of CL 114797, Keith wrote (roughly):
// I don't think the zeroing below matters.
// The stack return value will never be marked as live anywhere in the function.
// It is not written to until deferreturn returns.
- v = v.Name.Param.Stackcopy
+ v = v.Name().Param.Stackcopy
}
// Zero the stack location containing f.
- Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, v, nil))
+ Curfn.Func().Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil))
}
}
// returnsfromheap returns code to copy values for heap-escaped parameters
// back to the stack.
-func returnsfromheap(params *types.Type) []*Node {
- var nn []*Node
+func returnsfromheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
- v := asNode(t.Nname)
+ v := ir.AsNode(t.Nname)
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT {
- nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), ctxStmt)))
+ if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT {
+ nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt)))
}
}
@@ -2465,65 +2470,65 @@ func returnsfromheap(params *types.Type) []*Node {
// between the stack and the heap. The generated code is added to Curfn's
// Enter and Exit lists.
func heapmoves() {
- lno := lineno
- lineno = Curfn.Pos
- nn := paramstoheap(Curfn.Type.Recvs())
- nn = append(nn, paramstoheap(Curfn.Type.Params())...)
- nn = append(nn, paramstoheap(Curfn.Type.Results())...)
- Curfn.Func.Enter.Append(nn...)
- lineno = Curfn.Func.Endlineno
- Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
- lineno = lno
+ lno := base.Pos
+ base.Pos = Curfn.Pos()
+ nn := paramstoheap(Curfn.Type().Recvs())
+ nn = append(nn, paramstoheap(Curfn.Type().Params())...)
+ nn = append(nn, paramstoheap(Curfn.Type().Results())...)
+ Curfn.Func().Enter.Append(nn...)
+ base.Pos = Curfn.Func().Endlineno
+ Curfn.Func().Exit.Append(returnsfromheap(Curfn.Type().Results())...)
+ base.Pos = lno
}
-func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
- if fn.Type == nil || fn.Type.Etype != TFUNC {
- Fatalf("mkcall %v %v", fn, fn.Type)
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node {
+ if fn.Type() == nil || fn.Type().Etype != types.TFUNC {
+ base.Fatalf("mkcall %v %v", fn, fn.Type())
}
- n := fn.Type.NumParams()
+ n := fn.Type().NumParams()
if n != len(va) {
- Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+ base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
}
- r := nod(OCALL, fn, nil)
- r.List.Set(va)
- if fn.Type.NumResults() > 0 {
+ r := ir.Nod(ir.OCALL, fn, nil)
+ r.PtrList().Set(va)
+ if fn.Type().NumResults() > 0 {
r = typecheck(r, ctxExpr|ctxMultiOK)
} else {
r = typecheck(r, ctxStmt)
}
r = walkexpr(r, init)
- r.Type = t
+ r.SetType(t)
return r
}
-func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(syslook(name), t, init, args)
}
-func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(fn, t, init, args)
}
-func conv(n *Node, t *types.Type) *Node {
- if types.Identical(n.Type, t) {
+func conv(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
return n
}
- n = nod(OCONV, n, nil)
- n.Type = t
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
// convnop converts node n to type t using the OCONVNOP op
// and typechecks the result with ctxExpr.
-func convnop(n *Node, t *types.Type) *Node {
- if types.Identical(n.Type, t) {
+func convnop(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
return n
}
- n = nod(OCONVNOP, n, nil)
- n.Type = t
+ n = ir.Nod(ir.OCONVNOP, n, nil)
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
@@ -2531,30 +2536,30 @@ func convnop(n *Node, t *types.Type) *Node {
// byteindex converts n, which is byte-sized, to an int used to index into an array.
// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
-func byteindex(n *Node) *Node {
+func byteindex(n ir.Node) ir.Node {
// We cannot convert from bool to int directly.
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
// Reinterpreting the value as an unsigned byte solves both cases.
- if !types.Identical(n.Type, types.Types[TUINT8]) {
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUINT8]
+ if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUINT8])
n.SetTypecheck(1)
}
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TINT]
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TINT])
n.SetTypecheck(1)
return n
}
-func chanfn(name string, n int, t *types.Type) *Node {
+func chanfn(name string, n int, t *types.Type) ir.Node {
if !t.IsChan() {
- Fatalf("chanfn %v", t)
+ base.Fatalf("chanfn %v", t)
}
fn := syslook(name)
switch n {
default:
- Fatalf("chanfn %d", n)
+ base.Fatalf("chanfn %d", n)
case 1:
fn = substArgTypes(fn, t.Elem())
case 2:
@@ -2563,18 +2568,18 @@ func chanfn(name string, n int, t *types.Type) *Node {
return fn
}
-func mapfn(name string, t *types.Type) *Node {
+func mapfn(name string, t *types.Type) ir.Node {
if !t.IsMap() {
- Fatalf("mapfn %v", t)
+ base.Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
return fn
}
-func mapfndel(name string, t *types.Type) *Node {
+func mapfndel(name string, t *types.Type) ir.Node {
if !t.IsMap() {
- Fatalf("mapfn %v", t)
+ base.Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key())
@@ -2615,7 +2620,7 @@ func mapfast(t *types.Type) int {
if Widthptr == 4 {
return mapfast32ptr
}
- Fatalf("small pointer %v", t.Key())
+ base.Fatalf("small pointer %v", t.Key())
case AMEM64:
if !t.Key().HasPointers() {
return mapfast64
@@ -2631,25 +2636,25 @@ func mapfast(t *types.Type) int {
return mapslow
}
-func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)
return fn
}
-func addstr(n *Node, init *Nodes) *Node {
+func addstr(n ir.Node, init *ir.Nodes) ir.Node {
// order.expr rewrote OADDSTR to have a list of strings.
- c := n.List.Len()
+ c := n.List().Len()
if c < 2 {
- Fatalf("addstr count %d too small", c)
+ base.Fatalf("addstr count %d too small", c)
}
buf := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
sz := int64(0)
- for _, n1 := range n.List.Slice() {
- if n1.Op == OLITERAL {
+ for _, n1 := range n.List().Slice() {
+ if n1.Op() == ir.OLITERAL {
sz += int64(len(n1.StringVal()))
}
}
@@ -2657,15 +2662,15 @@ func addstr(n *Node, init *Nodes) *Node {
// Don't allocate the buffer if the result won't fit.
if sz < tmpstringbufsize {
// Create temporary buffer for result string on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- buf = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ buf = ir.Nod(ir.OADDR, temp(t), nil)
}
}
// build list of string arguments
- args := []*Node{buf}
- for _, n2 := range n.List.Slice() {
- args = append(args, conv(n2, types.Types[TSTRING]))
+ args := []ir.Node{buf}
+ for _, n2 := range n.List().Slice() {
+ args = append(args, conv(n2, types.Types[types.TSTRING]))
}
var fn string
@@ -2677,33 +2682,33 @@ func addstr(n *Node, init *Nodes) *Node {
// large numbers of strings are passed to the runtime as a slice.
fn = "concatstrings"
- t := types.NewSlice(types.Types[TSTRING])
- slice := nod(OCOMPLIT, nil, typenod(t))
+ t := types.NewSlice(types.Types[types.TSTRING])
+ slice := ir.Nod(ir.OCOMPLIT, nil, typenod(t))
if prealloc[n] != nil {
prealloc[slice] = prealloc[n]
}
- slice.List.Set(args[1:]) // skip buf arg
- args = []*Node{buf, slice}
- slice.Esc = EscNone
+ slice.PtrList().Set(args[1:]) // skip buf arg
+ args = []ir.Node{buf, slice}
+ slice.SetEsc(EscNone)
}
cat := syslook(fn)
- r := nod(OCALL, cat, nil)
- r.List.Set(args)
+ r := ir.Nod(ir.OCALL, cat, nil)
+ r.PtrList().Set(args)
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
- r.Type = n.Type
+ r.SetType(n.Type())
return r
}
-func walkAppendArgs(n *Node, init *Nodes) {
- walkexprlistsafe(n.List.Slice(), init)
+func walkAppendArgs(n ir.Node, init *ir.Nodes) {
+ walkexprlistsafe(n.List().Slice(), init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
ls[i1] = cheapexpr(n1, init)
}
@@ -2723,90 +2728,90 @@ func walkAppendArgs(n *Node, init *Nodes) {
// s
//
// l2 is allowed to be a string.
-func appendslice(n *Node, init *Nodes) *Node {
+func appendslice(n ir.Node, init *ir.Nodes) ir.Node {
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 := n.List.Second()
+ l1 := n.List().First()
+ l2 := n.List().Second()
l2 = cheapexpr(l2, init)
- n.List.SetSecond(l2)
+ n.List().SetSecond(l2)
- var nodes Nodes
+ var nodes ir.Nodes
// var s []T
- s := temp(l1.Type)
- nodes.Append(nod(OAS, s, l1)) // s = l1
+ s := temp(l1.Type())
+ nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + len(l2)
- nn := temp(types.Types[TINT])
- nodes.Append(nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
+ nn := temp(types.Types[types.TINT])
+ nodes.Append(ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), ir.Nod(ir.OLEN, l2, nil))))
// if uint(n) > uint(cap(s))
- nif := nod(OIF, nil, nil)
- nuint := conv(nn, types.Types[TUINT])
- scapuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
- nif.Left = nod(OGT, nuint, scapuint)
+ nif := ir.Nod(ir.OIF, nil, nil)
+ nuint := conv(nn, types.Types[types.TUINT])
+ scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
+ nif.SetLeft(ir.Nod(ir.OGT, nuint, scapuint))
// instantiate growslice(typ *type, []any, int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes.Append(nif)
// s = s[:n]
- nt := nod(OSLICE, s, nil)
+ nt := ir.Nod(ir.OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
- nodes.Append(nod(OAS, s, nt))
+ nodes.Append(ir.Nod(ir.OAS, s, nt))
- var ncopy *Node
+ var ncopy ir.Node
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
- nptr1 := nod(OSLICE, s, nil)
- nptr1.Type = s.Type
- nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
+ nptr1 := ir.Nod(ir.OSLICE, s, nil)
+ nptr1.SetType(s.Type())
+ nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
nptr1 = cheapexpr(nptr1, &nodes)
nptr2 := l2
- Curfn.Func.setWBPos(n.Pos)
+ Curfn.Func().SetWBPos(n.Pos())
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
fn := syslook("typedslicecopy")
- fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
- ptr1, len1 := nptr1.backingArrayPtrLen()
- ptr2, len2 := nptr2.backingArrayPtrLen()
- ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
- } else if instrumenting && !compiling_runtime {
+ fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+ ptr1, len1 := backingArrayPtrLen(nptr1)
+ ptr2, len2 := backingArrayPtrLen(nptr2)
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
+ } else if instrumenting && !base.Flag.CompilingRuntime {
// rely on runtime to instrument:
// copy(s[len(l1):], l2)
// l2 can be a slice or string.
- nptr1 := nod(OSLICE, s, nil)
- nptr1.Type = s.Type
- nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
+ nptr1 := ir.Nod(ir.OSLICE, s, nil)
+ nptr1.SetType(s.Type())
+ nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
nptr1 = cheapexpr(nptr1, &nodes)
nptr2 := l2
- ptr1, len1 := nptr1.backingArrayPtrLen()
- ptr2, len2 := nptr2.backingArrayPtrLen()
+ ptr1, len1 := backingArrayPtrLen(nptr1)
+ ptr2, len2 := backingArrayPtrLen(nptr2)
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem())
- ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
+ fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
- nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
+ nptr1 := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
nptr1.SetBounded(true)
- nptr1 = nod(OADDR, nptr1, nil)
+ nptr1 = ir.Nod(ir.OADDR, nptr1, nil)
- nptr2 := nod(OSPTR, l2, nil)
+ nptr2 := ir.Nod(ir.OSPTR, l2, nil)
- nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &nodes)
- nwid = nod(OMUL, nwid, nodintconst(elemtype.Width))
+ nwid := cheapexpr(conv(ir.Nod(ir.OLEN, l2, nil), types.Types[types.TUINTPTR]), &nodes)
+ nwid = ir.Nod(ir.OMUL, nwid, nodintconst(elemtype.Width))
// instantiate func memmove(to *any, frm *any, length uintptr)
fn := syslook("memmove")
@@ -2823,21 +2828,21 @@ func appendslice(n *Node, init *Nodes) *Node {
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n *Node) bool {
- if Debug.N != 0 || instrumenting {
+func isAppendOfMake(n ir.Node) bool {
+ if base.Flag.N != 0 || instrumenting {
return false
}
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- if n.Op != OAPPEND || !n.IsDDD() || n.List.Len() != 2 {
+ if n.Op() != ir.OAPPEND || !n.IsDDD() || n.List().Len() != 2 {
return false
}
- second := n.List.Second()
- if second.Op != OMAKESLICE || second.Right != nil {
+ second := n.List().Second()
+ if second.Op() != ir.OMAKESLICE || second.Right() != nil {
return false
}
@@ -2847,8 +2852,8 @@ func isAppendOfMake(n *Node) bool {
// typecheck made sure that constant arguments to make are not negative and fit into an int.
// The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
- y := second.Left
- if !Isconst(y, CTINT) && maxintval[y.Type.Etype].Cmp(maxintval[TUINT]) > 0 {
+ y := second.Left()
+ if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
return false
}
@@ -2882,93 +2887,93 @@ func isAppendOfMake(n *Node) bool {
// }
// }
// s
-func extendslice(n *Node, init *Nodes) *Node {
+func extendslice(n ir.Node, init *ir.Nodes) ir.Node {
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
// check of l2 < 0 at runtime which is generated below.
- l2 := conv(n.List.Second().Left, types.Types[TINT])
+ l2 := conv(n.List().Second().Left(), types.Types[types.TINT])
l2 = typecheck(l2, ctxExpr)
- n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
+ n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs
+ l1 := n.List().First()
+ l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs
- var nodes []*Node
+ var nodes []ir.Node
// if l2 >= 0 (likely happens), do nothing
- nifneg := nod(OIF, nod(OGE, l2, nodintconst(0)), nil)
+ nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil)
nifneg.SetLikely(true)
// else panicmakeslicelen()
- nifneg.Rlist.Set1(mkcall("panicmakeslicelen", nil, init))
+ nifneg.PtrRlist().Set1(mkcall("panicmakeslicelen", nil, init))
nodes = append(nodes, nifneg)
// s := l1
- s := temp(l1.Type)
- nodes = append(nodes, nod(OAS, s, l1))
+ s := temp(l1.Type())
+ nodes = append(nodes, ir.Nod(ir.OAS, s, l1))
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + l2
- nn := temp(types.Types[TINT])
- nodes = append(nodes, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), l2)))
+ nn := temp(types.Types[types.TINT])
+ nodes = append(nodes, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), l2)))
// if uint(n) > uint(cap(s))
- nuint := conv(nn, types.Types[TUINT])
- capuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
- nif := nod(OIF, nod(OGT, nuint, capuint), nil)
+ nuint := conv(nn, types.Types[types.TUINT])
+ capuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, nuint, capuint), nil)
// instantiate growslice(typ *type, old []any, newcap int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes = append(nodes, nif)
// s = s[:n]
- nt := nod(OSLICE, s, nil)
+ nt := ir.Nod(ir.OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
- nodes = append(nodes, nod(OAS, s, nt))
+ nodes = append(nodes, ir.Nod(ir.OAS, s, nt))
// lptr := &l1[0]
- l1ptr := temp(l1.Type.Elem().PtrTo())
- tmp := nod(OSPTR, l1, nil)
- nodes = append(nodes, nod(OAS, l1ptr, tmp))
+ l1ptr := temp(l1.Type().Elem().PtrTo())
+ tmp := ir.Nod(ir.OSPTR, l1, nil)
+ nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp))
// sptr := &s[0]
sptr := temp(elemtype.PtrTo())
- tmp = nod(OSPTR, s, nil)
- nodes = append(nodes, nod(OAS, sptr, tmp))
+ tmp = ir.Nod(ir.OSPTR, s, nil)
+ nodes = append(nodes, ir.Nod(ir.OAS, sptr, tmp))
// hp := &s[len(l1)]
- hp := nod(OINDEX, s, nod(OLEN, l1, nil))
+ hp := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
hp.SetBounded(true)
- hp = nod(OADDR, hp, nil)
- hp = convnop(hp, types.Types[TUNSAFEPTR])
+ hp = ir.Nod(ir.OADDR, hp, nil)
+ hp = convnop(hp, types.Types[types.TUNSAFEPTR])
// hn := l2 * sizeof(elem(s))
- hn := nod(OMUL, l2, nodintconst(elemtype.Width))
- hn = conv(hn, types.Types[TUINTPTR])
+ hn := ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width))
+ hn = conv(hn, types.Types[types.TUINTPTR])
clrname := "memclrNoHeapPointers"
hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
- Curfn.Func.setWBPos(n.Pos)
+ Curfn.Func().SetWBPos(n.Pos())
}
- var clr Nodes
+ var clr ir.Nodes
clrfn := mkcall(clrname, nil, &clr, hp, hn)
clr.Append(clrfn)
if hasPointers {
// if l1ptr == sptr
- nifclr := nod(OIF, nod(OEQ, l1ptr, sptr), nil)
- nifclr.Nbody = clr
+ nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil)
+ nifclr.SetBody(clr)
nodes = append(nodes, nifclr)
} else {
nodes = append(nodes, clr.Slice()...)
@@ -3001,14 +3006,14 @@ func extendslice(n *Node, init *Nodes) *Node {
// ...
// }
// s
-func walkappend(n *Node, init *Nodes, dst *Node) *Node {
- if !samesafeexpr(dst, n.List.First()) {
- n.List.SetFirst(safeexpr(n.List.First(), init))
- n.List.SetFirst(walkexpr(n.List.First(), init))
+func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node {
+ if !samesafeexpr(dst, n.List().First()) {
+ n.List().SetFirst(safeexpr(n.List().First(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
}
- walkexprlistsafe(n.List.Slice()[1:], init)
+ walkexprlistsafe(n.List().Slice()[1:], init)
- nsrc := n.List.First()
+ nsrc := n.List().First()
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
@@ -3016,60 +3021,60 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
- ls := n.List.Slice()[1:]
+ ls := n.List().Slice()[1:]
for i, n := range ls {
n = cheapexpr(n, init)
- if !types.Identical(n.Type, nsrc.Type.Elem()) {
- n = assignconv(n, nsrc.Type.Elem(), "append")
+ if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+ n = assignconv(n, nsrc.Type().Elem(), "append")
n = walkexpr(n, init)
}
ls[i] = n
}
- argc := n.List.Len() - 1
+ argc := n.List().Len() - 1
if argc < 1 {
return nsrc
}
// General case, with no function calls left as arguments.
// Leave for gen, except that instrumentation requires old form.
- if !instrumenting || compiling_runtime {
+ if !instrumenting || base.Flag.CompilingRuntime {
return n
}
- var l []*Node
+ var l []ir.Node
- ns := temp(nsrc.Type)
- l = append(l, nod(OAS, ns, nsrc)) // s = src
+ ns := temp(nsrc.Type())
+ l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src
na := nodintconst(int64(argc)) // const argc
- nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc
- nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
+ nx := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc
+ nx.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na))
fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
- fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
+ fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
- nx.Nbody.Set1(nod(OAS, ns,
- mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
- nod(OADD, nod(OLEN, ns, nil), na))))
+ nx.PtrBody().Set1(ir.Nod(ir.OAS, ns,
+ mkcall1(fn, ns.Type(), nx.PtrInit(), typename(ns.Type().Elem()), ns,
+ ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na))))
l = append(l, nx)
- nn := temp(types.Types[TINT])
- l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
+ nn := temp(types.Types[types.TINT])
+ l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OLEN, ns, nil))) // n = len(s)
- nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
- nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
+ nx = ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc]
+ nx.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil)
nx.SetBounded(true)
- l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
+ l = append(l, ir.Nod(ir.OAS, ns, nx)) // s = s[:n+argc]
- ls = n.List.Slice()[1:]
+ ls = n.List().Slice()[1:]
for i, n := range ls {
- nx = nod(OINDEX, ns, nn) // s[n] ...
+ nx = ir.Nod(ir.OINDEX, ns, nn) // s[n] ...
nx.SetBounded(true)
- l = append(l, nod(OAS, nx, n)) // s[n] = arg
+ l = append(l, ir.Nod(ir.OAS, nx, n)) // s[n] = arg
if i+1 < len(ls) {
- l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
+ l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, nn, nodintconst(1)))) // n = n + 1
}
}
@@ -3090,15 +3095,15 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
//
// Also works if b is a string.
//
-func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
- if n.Left.Type.Elem().HasPointers() {
- Curfn.Func.setWBPos(n.Pos)
- fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := n.Left.backingArrayPtrLen()
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := n.Right.backingArrayPtrLen()
- return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR)
+func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node {
+ if n.Left().Type().Elem().HasPointers() {
+ Curfn.Func().SetWBPos(n.Pos())
+ fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
+ return mkcall1(fn, n.Type(), init, typename(n.Left().Type().Elem()), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
@@ -3106,53 +3111,53 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
// copy(n.Left, n.Right)
// n.Right can be a slice or string.
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := n.Left.backingArrayPtrLen()
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := n.Right.backingArrayPtrLen()
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem())
+ fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
- return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width))
+ return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left().Type().Elem().Width))
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- nl := temp(n.Left.Type)
- nr := temp(n.Right.Type)
- var l []*Node
- l = append(l, nod(OAS, nl, n.Left))
- l = append(l, nod(OAS, nr, n.Right))
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ nl := temp(n.Left().Type())
+ nr := temp(n.Right().Type())
+ var l []ir.Node
+ l = append(l, ir.Nod(ir.OAS, nl, n.Left()))
+ l = append(l, ir.Nod(ir.OAS, nr, n.Right()))
- nfrm := nod(OSPTR, nr, nil)
- nto := nod(OSPTR, nl, nil)
+ nfrm := ir.Nod(ir.OSPTR, nr, nil)
+ nto := ir.Nod(ir.OSPTR, nl, nil)
- nlen := temp(types.Types[TINT])
+ nlen := temp(types.Types[types.TINT])
// n = len(to)
- l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
+ l = append(l, ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nl, nil)))
// if n > len(frm) { n = len(frm) }
- nif := nod(OIF, nil, nil)
+ nif := ir.Nod(ir.OIF, nil, nil)
- nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
- nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
+ nif.SetLeft(ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil)))
+ nif.PtrBody().Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil)))
l = append(l, nif)
// if to.ptr != frm.ptr { memmove( ... ) }
- ne := nod(OIF, nod(ONE, nto, nfrm), nil)
+ ne := ir.Nod(ir.OIF, ir.Nod(ir.ONE, nto, nfrm), nil)
ne.SetLikely(true)
l = append(l, ne)
fn := syslook("memmove")
- fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
- nwid := temp(types.Types[TUINTPTR])
- setwid := nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))
- ne.Nbody.Append(setwid)
- nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
+ fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
+ nwid := temp(types.Types[types.TUINTPTR])
+ setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR]))
+ ne.PtrBody().Append(setwid)
+ nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width))
call := mkcall1(fn, nil, init, nto, nfrm, nwid)
- ne.Nbody.Append(call)
+ ne.PtrBody().Append(call)
typecheckslice(l, ctxStmt)
walkstmtlist(l)
@@ -3160,7 +3165,7 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
return nlen
}
-func eqfor(t *types.Type) (n *Node, needsize bool) {
+func eqfor(t *types.Type) (n ir.Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
@@ -3172,73 +3177,73 @@ func eqfor(t *types.Type) (n *Node, needsize bool) {
return n, true
case ASPECIAL:
sym := typesymprefix(".eq", t)
- n := newname(sym)
+ n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
- }, []*Node{
- anonfield(types.Types[TBOOL]),
- })
+ }, []ir.Node{
+ anonfield(types.Types[types.TBOOL]),
+ }))
return n, false
}
- Fatalf("eqfor %v", t)
+ base.Fatalf("eqfor %v", t)
return nil, false
}
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
-func walkcompare(n *Node, init *Nodes) *Node {
- if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != OLITERAL && n.Right.Op != OLITERAL {
+func walkcompare(n ir.Node, init *ir.Nodes) ir.Node {
+ if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL {
return walkcompareInterface(n, init)
}
- if n.Left.Type.IsString() && n.Right.Type.IsString() {
+ if n.Left().Type().IsString() && n.Right().Type().IsString() {
return walkcompareString(n, init)
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// Given mixed interface/concrete comparison,
// rewrite into types-equal && data-equal.
// This is efficient, avoids allocations, and avoids runtime calls.
- if n.Left.Type.IsInterface() != n.Right.Type.IsInterface() {
+ if n.Left().Type().IsInterface() != n.Right().Type().IsInterface() {
// Preserve side-effects in case of short-circuiting; see #32187.
- l := cheapexpr(n.Left, init)
- r := cheapexpr(n.Right, init)
+ l := cheapexpr(n.Left(), init)
+ r := cheapexpr(n.Right(), init)
// Swap so that l is the interface value and r is the concrete value.
- if n.Right.Type.IsInterface() {
+ if n.Right().Type().IsInterface() {
l, r = r, l
}
// Handle both == and !=.
- eq := n.Op
- andor := OOROR
- if eq == OEQ {
- andor = OANDAND
+ eq := n.Op()
+ andor := ir.OOROR
+ if eq == ir.OEQ {
+ andor = ir.OANDAND
}
// Check for types equal.
// For empty interface, this is:
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
- var eqtype *Node
- tab := nod(OITAB, l, nil)
- rtyp := typename(r.Type)
- if l.Type.IsEmptyInterface() {
- tab.Type = types.NewPtr(types.Types[TUINT8])
+ var eqtype ir.Node
+ tab := ir.Nod(ir.OITAB, l, nil)
+ rtyp := typename(r.Type())
+ if l.Type().IsEmptyInterface() {
+ tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1)
- eqtype = nod(eq, tab, rtyp)
+ eqtype = ir.Nod(eq, tab, rtyp)
} else {
- nonnil := nod(brcom(eq), nodnil(), tab)
- match := nod(eq, itabType(tab), rtyp)
- eqtype = nod(andor, nonnil, match)
+ nonnil := ir.Nod(brcom(eq), nodnil(), tab)
+ match := ir.Nod(eq, itabType(tab), rtyp)
+ eqtype = ir.Nod(andor, nonnil, match)
}
// Check for data equal.
- eqdata := nod(eq, ifaceData(n.Pos, l, r.Type), r)
+ eqdata := ir.Nod(eq, ifaceData(n.Pos(), l, r.Type()), r)
// Put it all together.
- expr := nod(andor, eqtype, eqdata)
+ expr := ir.Nod(andor, eqtype, eqdata)
n = finishcompare(n, expr, init)
return n
}
@@ -3247,7 +3252,7 @@ func walkcompare(n *Node, init *Nodes) *Node {
// Otherwise back end handles it.
// While we're here, decide whether to
// inline or call an eq alg.
- t := n.Left.Type
+ t := n.Left().Type()
var inline bool
maxcmpsize := int64(4)
@@ -3259,19 +3264,19 @@ func walkcompare(n *Node, init *Nodes) *Node {
switch t.Etype {
default:
- if Debug_libfuzzer != 0 && t.IsInteger() {
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
+ if base.Debug.Libfuzzer != 0 && t.IsInteger() {
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
// If exactly one comparison operand is
// constant, invoke the constcmp functions
// instead, and arrange for the constant
// operand to be the first argument.
- l, r := n.Left, n.Right
- if r.Op == OLITERAL {
+ l, r := n.Left(), n.Right()
+ if r.Op() == ir.OLITERAL {
l, r = r, l
}
- constcmp := l.Op == OLITERAL && r.Op != OLITERAL
+ constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
var fn string
var paramType *types.Type
@@ -3281,81 +3286,81 @@ func walkcompare(n *Node, init *Nodes) *Node {
if constcmp {
fn = "libfuzzerTraceConstCmp1"
}
- paramType = types.Types[TUINT8]
+ paramType = types.Types[types.TUINT8]
case 2:
fn = "libfuzzerTraceCmp2"
if constcmp {
fn = "libfuzzerTraceConstCmp2"
}
- paramType = types.Types[TUINT16]
+ paramType = types.Types[types.TUINT16]
case 4:
fn = "libfuzzerTraceCmp4"
if constcmp {
fn = "libfuzzerTraceConstCmp4"
}
- paramType = types.Types[TUINT32]
+ paramType = types.Types[types.TUINT32]
case 8:
fn = "libfuzzerTraceCmp8"
if constcmp {
fn = "libfuzzerTraceConstCmp8"
}
- paramType = types.Types[TUINT64]
+ paramType = types.Types[types.TUINT64]
default:
- Fatalf("unexpected integer size %d for %v", t.Size(), t)
+ base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
}
init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
}
return n
- case TARRAY:
+ case types.TARRAY:
// We can compare several elements at once with 2/4/8 byte integer compares
inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
- case TSTRUCT:
+ case types.TSTRUCT:
inline = t.NumComponents(types.IgnoreBlankFields) <= 4
}
- cmpl := n.Left
- for cmpl != nil && cmpl.Op == OCONVNOP {
- cmpl = cmpl.Left
+ cmpl := n.Left()
+ for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+ cmpl = cmpl.Left()
}
- cmpr := n.Right
- for cmpr != nil && cmpr.Op == OCONVNOP {
- cmpr = cmpr.Left
+ cmpr := n.Right()
+ for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+ cmpr = cmpr.Left()
}
// Chose not to inline. Call equality function directly.
if !inline {
// eq algs take pointers; cmpl and cmpr must be addressable
if !islvalue(cmpl) || !islvalue(cmpr) {
- Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
}
fn, needsize := eqfor(t)
- call := nod(OCALL, fn, nil)
- call.List.Append(nod(OADDR, cmpl, nil))
- call.List.Append(nod(OADDR, cmpr, nil))
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Append(ir.Nod(ir.OADDR, cmpl, nil))
+ call.PtrList().Append(ir.Nod(ir.OADDR, cmpr, nil))
if needsize {
- call.List.Append(nodintconst(t.Width))
+ call.PtrList().Append(nodintconst(t.Width))
}
res := call
- if n.Op != OEQ {
- res = nod(ONOT, res, nil)
+ if n.Op() != ir.OEQ {
+ res = ir.Nod(ir.ONOT, res, nil)
}
n = finishcompare(n, res, init)
return n
}
// inline: build boolean expression comparing element by element
- andor := OANDAND
- if n.Op == ONE {
- andor = OOROR
+ andor := ir.OANDAND
+ if n.Op() == ir.ONE {
+ andor = ir.OOROR
}
- var expr *Node
- compare := func(el, er *Node) {
- a := nod(n.Op, el, er)
+ var expr ir.Node
+ compare := func(el, er ir.Node) {
+ a := ir.Nod(n.Op(), el, er)
if expr == nil {
expr = a
} else {
- expr = nod(andor, expr, a)
+ expr = ir.Nod(andor, expr, a)
}
}
cmpl = safeexpr(cmpl, init)
@@ -3367,8 +3372,8 @@ func walkcompare(n *Node, init *Nodes) *Node {
continue
}
compare(
- nodSym(OXDOT, cmpl, sym),
- nodSym(OXDOT, cmpr, sym),
+ nodSym(ir.OXDOT, cmpl, sym),
+ nodSym(ir.OXDOT, cmpr, sym),
)
}
} else {
@@ -3381,45 +3386,45 @@ func walkcompare(n *Node, init *Nodes) *Node {
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
- convType = types.Types[TINT64]
+ convType = types.Types[types.TINT64]
step = 8 / t.Elem().Width
case remains >= 4 && combine32bit:
- convType = types.Types[TUINT32]
+ convType = types.Types[types.TUINT32]
step = 4 / t.Elem().Width
case remains >= 2 && combine16bit:
- convType = types.Types[TUINT16]
+ convType = types.Types[types.TUINT16]
step = 2 / t.Elem().Width
default:
step = 1
}
if step == 1 {
compare(
- nod(OINDEX, cmpl, nodintconst(i)),
- nod(OINDEX, cmpr, nodintconst(i)),
+ ir.Nod(ir.OINDEX, cmpl, nodintconst(i)),
+ ir.Nod(ir.OINDEX, cmpr, nodintconst(i)),
)
i++
remains -= t.Elem().Width
} else {
elemType := t.Elem().ToUnsigned()
- cmplw := nod(OINDEX, cmpl, nodintconst(i))
+ cmplw := ir.Nod(ir.OINDEX, cmpl, nodintconst(i))
cmplw = conv(cmplw, elemType) // convert to unsigned
cmplw = conv(cmplw, convType) // widen
- cmprw := nod(OINDEX, cmpr, nodintconst(i))
+ cmprw := ir.Nod(ir.OINDEX, cmpr, nodintconst(i))
cmprw = conv(cmprw, elemType)
cmprw = conv(cmprw, convType)
// For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will generate a single large load.
for offset := int64(1); offset < step; offset++ {
- lb := nod(OINDEX, cmpl, nodintconst(i+offset))
+ lb := ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset))
lb = conv(lb, elemType)
lb = conv(lb, convType)
- lb = nod(OLSH, lb, nodintconst(8*t.Elem().Width*offset))
- cmplw = nod(OOR, cmplw, lb)
- rb := nod(OINDEX, cmpr, nodintconst(i+offset))
+ lb = ir.Nod(ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset))
+ cmplw = ir.Nod(ir.OOR, cmplw, lb)
+ rb := ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset))
rb = conv(rb, elemType)
rb = conv(rb, convType)
- rb = nod(OLSH, rb, nodintconst(8*t.Elem().Width*offset))
- cmprw = nod(OOR, cmprw, rb)
+ rb = ir.Nod(ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset))
+ cmprw = ir.Nod(ir.OOR, cmprw, rb)
}
compare(cmplw, cmprw)
i += step
@@ -3428,13 +3433,13 @@ func walkcompare(n *Node, init *Nodes) *Node {
}
}
if expr == nil {
- expr = nodbool(n.Op == OEQ)
+ expr = nodbool(n.Op() == ir.OEQ)
// We still need to use cmpl and cmpr, in case they contain
// an expression which might panic. See issue 23837.
- t := temp(cmpl.Type)
- a1 := nod(OAS, t, cmpl)
+ t := temp(cmpl.Type())
+ a1 := ir.Nod(ir.OAS, t, cmpl)
a1 = typecheck(a1, ctxStmt)
- a2 := nod(OAS, t, cmpr)
+ a2 := ir.Nod(ir.OAS, t, cmpr)
a2 = typecheck(a2, ctxStmt)
init.Append(a1, a2)
}
@@ -3442,48 +3447,48 @@ func walkcompare(n *Node, init *Nodes) *Node {
return n
}
-func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node {
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
- n = copyexpr(n, n.Type, init)
+ if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 {
+ n = copyexpr(n, n.Type(), init)
}
return conv(n, t)
}
-func walkcompareInterface(n *Node, init *Nodes) *Node {
- n.Right = cheapexpr(n.Right, init)
- n.Left = cheapexpr(n.Left, init)
- eqtab, eqdata := eqinterface(n.Left, n.Right)
- var cmp *Node
- if n.Op == OEQ {
- cmp = nod(OANDAND, eqtab, eqdata)
+func walkcompareInterface(n ir.Node, init *ir.Nodes) ir.Node {
+ n.SetRight(cheapexpr(n.Right(), init))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ eqtab, eqdata := eqinterface(n.Left(), n.Right())
+ var cmp ir.Node
+ if n.Op() == ir.OEQ {
+ cmp = ir.Nod(ir.OANDAND, eqtab, eqdata)
} else {
- eqtab.Op = ONE
- cmp = nod(OOROR, eqtab, nod(ONOT, eqdata, nil))
+ eqtab.SetOp(ir.ONE)
+ cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil))
}
return finishcompare(n, cmp, init)
}
-func walkcompareString(n *Node, init *Nodes) *Node {
+func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node {
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs *Node // const string, non-const string
+ var cs, ncs ir.Node // const string, non-const string
switch {
- case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
+ case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String):
// ignore; will be constant evaluated
- case Isconst(n.Left, CTSTR):
- cs = n.Left
- ncs = n.Right
- case Isconst(n.Right, CTSTR):
- cs = n.Right
- ncs = n.Left
+ case ir.IsConst(n.Left(), constant.String):
+ cs = n.Left()
+ ncs = n.Right()
+ case ir.IsConst(n.Right(), constant.String):
+ cs = n.Right()
+ ncs = n.Left()
}
if cs != nil {
- cmp := n.Op
+ cmp := n.Op()
// Our comparison below assumes that the non-constant string
// is on the left hand side, so rewrite "" cmp x to x cmp "".
// See issue 24817.
- if Isconst(n.Left, CTSTR) {
+ if ir.IsConst(n.Left(), constant.String) {
cmp = brrev(cmp)
}
@@ -3502,12 +3507,12 @@ func walkcompareString(n *Node, init *Nodes) *Node {
combine64bit = thearch.LinkArch.RegSize >= 8
}
- var and Op
+ var and ir.Op
switch cmp {
- case OEQ:
- and = OANDAND
- case ONE:
- and = OOROR
+ case ir.OEQ:
+ and = ir.OANDAND
+ case ir.ONE:
+ and = ir.OOROR
default:
// Don't do byte-wise comparisons for <, <=, etc.
// They're fairly complicated.
@@ -3518,13 +3523,13 @@ func walkcompareString(n *Node, init *Nodes) *Node {
if len(s) > 0 {
ncs = safeexpr(ncs, init)
}
- r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
+ r := ir.Nod(cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s))))
remains := len(s)
for i := 0; remains > 0; {
if remains == 1 || !canCombineLoads {
cb := nodintconst(int64(s[i]))
- ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
- r = nod(and, r, nod(cmp, ncb, cb))
+ ncb := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i)))
+ r = ir.Nod(and, r, ir.Nod(cmp, ncb, cb))
remains--
i++
continue
@@ -3533,31 +3538,31 @@ func walkcompareString(n *Node, init *Nodes) *Node {
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
- convType = types.Types[TINT64]
+ convType = types.Types[types.TINT64]
step = 8
case remains >= 4:
- convType = types.Types[TUINT32]
+ convType = types.Types[types.TUINT32]
step = 4
case remains >= 2:
- convType = types.Types[TUINT16]
+ convType = types.Types[types.TUINT16]
step = 2
}
- ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
+ ncsubstr := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i)))
ncsubstr = conv(ncsubstr, convType)
csubstr := int64(s[i])
// Calculate large constant from bytes as sequence of shifts and ors.
// Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will combine this into a single large load.
for offset := 1; offset < step; offset++ {
- b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
+ b := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset)))
b = conv(b, convType)
- b = nod(OLSH, b, nodintconst(int64(8*offset)))
- ncsubstr = nod(OOR, ncsubstr, b)
+ b = ir.Nod(ir.OLSH, b, nodintconst(int64(8*offset)))
+ ncsubstr = ir.Nod(ir.OOR, ncsubstr, b)
csubstr |= int64(s[i+offset]) << uint8(8*offset)
}
csubstrPart := nodintconst(csubstr)
// Compare "step" bytes as once
- r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
+ r = ir.Nod(and, r, ir.Nod(cmp, csubstrPart, ncsubstr))
remains -= step
i += step
}
@@ -3565,26 +3570,26 @@ func walkcompareString(n *Node, init *Nodes) *Node {
}
}
- var r *Node
- if n.Op == OEQ || n.Op == ONE {
+ var r ir.Node
+ if n.Op() == ir.OEQ || n.Op() == ir.ONE {
// prepare for rewrite below
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
- eqlen, eqmem := eqstring(n.Left, n.Right)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
+ eqlen, eqmem := eqstring(n.Left(), n.Right())
// quick check of len before full compare for == or !=.
// memequal then tests equality up to length len.
- if n.Op == OEQ {
+ if n.Op() == ir.OEQ {
// len(left) == len(right) && memequal(left, right, len)
- r = nod(OANDAND, eqlen, eqmem)
+ r = ir.Nod(ir.OANDAND, eqlen, eqmem)
} else {
// len(left) != len(right) || !memequal(left, right, len)
- eqlen.Op = ONE
- r = nod(OOROR, eqlen, nod(ONOT, eqmem, nil))
+ eqlen.SetOp(ir.ONE)
+ r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil))
}
} else {
// sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
- r = nod(n.Op, r, nodintconst(0))
+ r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING]))
+ r = ir.Nod(n.Op(), r, nodintconst(0))
}
return finishcompare(n, r, init)
@@ -3592,36 +3597,36 @@ func walkcompareString(n *Node, init *Nodes) *Node {
// The result of finishcompare MUST be assigned back to n, e.g.
// n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n, r *Node, init *Nodes) *Node {
+func finishcompare(n, r ir.Node, init *ir.Nodes) ir.Node {
r = typecheck(r, ctxExpr)
- r = conv(r, n.Type)
+ r = conv(r, n.Type())
r = walkexpr(r, init)
return r
}
// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *Node, max int64) bool {
- if n.Type == nil || !n.Type.IsInteger() {
+func bounded(n ir.Node, max int64) bool {
+ if n.Type() == nil || !n.Type().IsInteger() {
return false
}
- sign := n.Type.IsSigned()
- bits := int32(8 * n.Type.Width)
+ sign := n.Type().IsSigned()
+ bits := int32(8 * n.Type().Width)
if smallintconst(n) {
v := n.Int64Val()
return 0 <= v && v < max
}
- switch n.Op {
- case OAND, OANDNOT:
+ switch n.Op() {
+ case ir.OAND, ir.OANDNOT:
v := int64(-1)
switch {
- case smallintconst(n.Left):
- v = n.Left.Int64Val()
- case smallintconst(n.Right):
- v = n.Right.Int64Val()
- if n.Op == OANDNOT {
+ case smallintconst(n.Left()):
+ v = n.Left().Int64Val()
+ case smallintconst(n.Right()):
+ v = n.Right().Int64Val()
+ if n.Op() == ir.OANDNOT {
v = ^v
if !sign {
v &= 1<<uint(bits) - 1
@@ -3632,26 +3637,26 @@ func bounded(n *Node, max int64) bool {
return true
}
- case OMOD:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.OMOD:
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
if 0 <= v && v <= max {
return true
}
}
- case ODIV:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.ODIV:
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
for bits > 0 && v >= 2 {
bits--
v >>= 1
}
}
- case ORSH:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.ORSH:
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
if v > int64(bits) {
return true
}
@@ -3667,8 +3672,8 @@ func bounded(n *Node, max int64) bool {
}
// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *Node) {
- t := n.Left.Type
+func usemethod(n ir.Node) {
+ t := n.Left().Type()
// Looking for either of:
// Method(int) reflect.Method
@@ -3690,7 +3695,7 @@ func usemethod(n *Node) {
}
if res1 == nil {
- if p0.Type.Etype != TINT {
+ if p0.Type.Etype != types.TINT {
return
}
} else {
@@ -3706,64 +3711,64 @@ func usemethod(n *Node) {
// (including global variables such as numImports - was issue #19028).
// Also need to check for reflect package itself (see Issue #38515).
if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
- Curfn.Func.SetReflectMethod(true)
+ Curfn.Func().SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
- Curfn.Func.lsym.Set(obj.AttrReflectMethod, true)
+ Curfn.Func().LSym.Set(obj.AttrReflectMethod, true)
}
}
-func usefield(n *Node) {
+func usefield(n ir.Node) {
if objabi.Fieldtrack_enabled == 0 {
return
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("usefield %v", n.Op)
+ base.Fatalf("usefield %v", n.Op())
- case ODOT, ODOTPTR:
+ case ir.ODOT, ir.ODOTPTR:
break
}
- if n.Sym == nil {
+ if n.Sym() == nil {
// No field name. This DOTPTR was built by the compiler for access
// to runtime data structures. Ignore.
return
}
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
field := n.Opt().(*types.Field)
if field == nil {
- Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
+ base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym())
}
- if field.Sym != n.Sym || field.Offset != n.Xoffset {
- Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset)
+ if field.Sym != n.Sym() || field.Offset != n.Offset() {
+ base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym(), n.Offset())
}
if !strings.Contains(field.Note, "go:\"track\"") {
return
}
- outer := n.Left.Type
+ outer := n.Left().Type()
if outer.IsPtr() {
outer = outer.Elem()
}
if outer.Sym == nil {
- yyerror("tracked field must be in named struct type")
+ base.Errorf("tracked field must be in named struct type")
}
if !types.IsExported(field.Sym.Name) {
- yyerror("tracked field must be exported (upper case)")
+ base.Errorf("tracked field must be exported (upper case)")
}
sym := tracksym(outer, field)
- if Curfn.Func.FieldTrack == nil {
- Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
+ if Curfn.Func().FieldTrack == nil {
+ Curfn.Func().FieldTrack = make(map[*types.Sym]struct{})
}
- Curfn.Func.FieldTrack[sym] = struct{}{}
+ Curfn.Func().FieldTrack[sym] = struct{}{}
}
-func candiscardlist(l Nodes) bool {
+func candiscardlist(l ir.Nodes) bool {
for _, n := range l.Slice() {
if !candiscard(n) {
return false
@@ -3772,96 +3777,94 @@ func candiscardlist(l Nodes) bool {
return true
}
-func candiscard(n *Node) bool {
+func candiscard(n ir.Node) bool {
if n == nil {
return true
}
- switch n.Op {
+ switch n.Op() {
default:
return false
// Discardable as long as the subpieces are.
- case ONAME,
- ONONAME,
- OTYPE,
- OPACK,
- OLITERAL,
- OADD,
- OSUB,
- OOR,
- OXOR,
- OADDSTR,
- OADDR,
- OANDAND,
- OBYTES2STR,
- ORUNES2STR,
- OSTR2BYTES,
- OSTR2RUNES,
- OCAP,
- OCOMPLIT,
- OMAPLIT,
- OSTRUCTLIT,
- OARRAYLIT,
- OSLICELIT,
- OPTRLIT,
- OCONV,
- OCONVIFACE,
- OCONVNOP,
- ODOT,
- OEQ,
- ONE,
- OLT,
- OLE,
- OGT,
- OGE,
- OKEY,
- OSTRUCTKEY,
- OLEN,
- OMUL,
- OLSH,
- ORSH,
- OAND,
- OANDNOT,
- ONEW,
- ONOT,
- OBITNOT,
- OPLUS,
- ONEG,
- OOROR,
- OPAREN,
- ORUNESTR,
- OREAL,
- OIMAG,
- OCOMPLEX:
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OTYPE,
+ ir.OPACK,
+ ir.OLITERAL,
+ ir.ONIL,
+ ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OADDSTR,
+ ir.OADDR,
+ ir.OANDAND,
+ ir.OBYTES2STR,
+ ir.ORUNES2STR,
+ ir.OSTR2BYTES,
+ ir.OSTR2RUNES,
+ ir.OCAP,
+ ir.OCOMPLIT,
+ ir.OMAPLIT,
+ ir.OSTRUCTLIT,
+ ir.OARRAYLIT,
+ ir.OSLICELIT,
+ ir.OPTRLIT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODOT,
+ ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OLE,
+ ir.OGT,
+ ir.OGE,
+ ir.OKEY,
+ ir.OSTRUCTKEY,
+ ir.OLEN,
+ ir.OMUL,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.ONEW,
+ ir.ONOT,
+ ir.OBITNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.ORUNESTR,
+ ir.OREAL,
+ ir.OIMAG,
+ ir.OCOMPLEX:
break
// Discardable as long as we know it's not division by zero.
- case ODIV, OMOD:
- if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
- break
- }
- if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
+ case ir.ODIV, ir.OMOD:
+ if n.Right().Op() == ir.OLITERAL && constant.Sign(n.Right().Val()) != 0 {
break
}
return false
// Discardable as long as we know it won't fail because of a bad size.
- case OMAKECHAN, OMAKEMAP:
- if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
+ case ir.OMAKECHAN, ir.OMAKEMAP:
+ if ir.IsConst(n.Left(), constant.Int) && constant.Sign(n.Left().Val()) == 0 {
break
}
return false
// Difficult to tell what sizes are okay.
- case OMAKESLICE:
+ case ir.OMAKESLICE:
return false
- case OMAKESLICECOPY:
+ case ir.OMAKESLICECOPY:
return false
}
- if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+ if !candiscard(n.Left()) || !candiscard(n.Right()) || !candiscardlist(n.Init()) || !candiscardlist(n.Body()) || !candiscardlist(n.List()) || !candiscardlist(n.Rlist()) {
return false
}
@@ -3888,67 +3891,67 @@ var wrapCall_prgen int
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *Node, init *Nodes) *Node {
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+func wrapCall(n ir.Node, init *ir.Nodes) ir.Node {
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
- isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
+ isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
// Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
if !isBuiltinCall && n.IsDDD() {
- last := n.List.Len() - 1
- if va := n.List.Index(last); va.Op == OSLICELIT {
- n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
+ last := n.List().Len() - 1
+ if va := n.List().Index(last); va.Op() == ir.OSLICELIT {
+ n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...))
n.SetIsDDD(false)
}
}
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]*Node, n.List.Len())
- t := nod(OTFUNC, nil, nil)
- for i, arg := range n.List.Slice() {
+ origArgs := make([]ir.Node, n.List().Len())
+ t := ir.Nod(ir.OTFUNC, nil, nil)
+ for i, arg := range n.List().Slice() {
s := lookupN("a", i)
- if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
+ if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.Left().Type().IsUnsafePtr() {
origArgs[i] = arg
- arg = arg.Left
- n.List.SetIndex(i, arg)
+ arg = arg.Left()
+ n.List().SetIndex(i, arg)
}
- t.List.Append(symfield(s, arg.Type))
+ t.PtrList().Append(symfield(s, arg.Type()))
}
wrapCall_prgen++
sym := lookupN("wrap·", wrapCall_prgen)
fn := dclfunc(sym, t)
- args := paramNnames(t.Type)
+ args := paramNnames(t.Type())
for i, origArg := range origArgs {
if origArg == nil {
continue
}
- arg := nod(origArg.Op, args[i], nil)
- arg.Type = origArg.Type
+ arg := ir.Nod(origArg.Op(), args[i], nil)
+ arg.SetType(origArg.Type())
args[i] = arg
}
- call := nod(n.Op, nil, nil)
+ call := ir.Nod(n.Op(), nil, nil)
if !isBuiltinCall {
- call.Op = OCALL
- call.Left = n.Left
+ call.SetOp(ir.OCALL)
+ call.SetLeft(n.Left())
call.SetIsDDD(n.IsDDD())
}
- call.List.Set(args)
- fn.Nbody.Set1(call)
+ call.PtrList().Set(args)
+ fn.PtrBody().Set1(call)
funcbody()
fn = typecheck(fn, ctxStmt)
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
xtop = append(xtop, fn)
- call = nod(OCALL, nil, nil)
- call.Left = fn.Func.Nname
- call.List.Set(n.List.Slice())
+ call = ir.Nod(ir.OCALL, nil, nil)
+ call.SetLeft(fn.Func().Nname)
+ call.PtrList().Set(n.List().Slice())
call = typecheck(call, ctxStmt)
call = walkexpr(call, init)
return call
@@ -3959,15 +3962,15 @@ func wrapCall(n *Node, init *Nodes) *Node {
// type syntax expression n.Type.
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *Node, types_ ...*types.Type) *Node {
- n := old.copy()
+func substArgTypes(old ir.Node, types_ ...*types.Type) ir.Node {
+ n := ir.Copy(old)
for _, t := range types_ {
dowidth(t)
}
- n.Type = types.SubstAny(n.Type, &types_)
+ n.SetType(types.SubstAny(n.Type(), &types_))
if len(types_) > 0 {
- Fatalf("substArgTypes: too many argument types")
+ base.Fatalf("substArgTypes: too many argument types")
}
return n
}
@@ -3989,18 +3992,18 @@ func canMergeLoads() bool {
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n *Node) bool {
- return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
+func isRuneCount(n ir.Node) bool {
+ return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES
}
-func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
- if !n.Type.IsPtr() {
- Fatalf("expected pointer type: %v", n.Type)
+func walkCheckPtrAlignment(n ir.Node, init *ir.Nodes, count ir.Node) ir.Node {
+ if !n.Type().IsPtr() {
+ base.Fatalf("expected pointer type: %v", n.Type())
}
- elem := n.Type.Elem()
+ elem := n.Type().Elem()
if count != nil {
if !elem.IsArray() {
- Fatalf("expected array type: %v", elem)
+ base.Fatalf("expected array type: %v", elem)
}
elem = elem.Elem()
}
@@ -4014,14 +4017,14 @@ func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
count = nodintconst(1)
}
- n.Left = cheapexpr(n.Left, init)
- init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[TUNSAFEPTR]), typename(elem), conv(count, types.Types[TUINTPTR])))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left(), types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR])))
return n
}
var walkCheckPtrArithmeticMarker byte
-func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
+func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node {
// Calling cheapexpr(n, init) below leads to a recursive call
// to walkexpr, which leads us back here again. Use n.Opt to
// prevent infinite loops.
@@ -4030,19 +4033,19 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
} else if opt != nil {
// We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
// there's no guarantee that temporarily replacing it is safe, so just hard fail here.
- Fatalf("unexpected Opt: %v", opt)
+ base.Fatalf("unexpected Opt: %v", opt)
}
n.SetOpt(&walkCheckPtrArithmeticMarker)
defer n.SetOpt(nil)
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
- switch n.Left.Op {
- case OCALLFUNC, OCALLMETH, OCALLINTER:
+ switch n.Left().Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return n
}
- if n.Left.Op == ODOTPTR && isReflectHeaderDataField(n.Left) {
+ if n.Left().Op() == ir.ODOTPTR && isReflectHeaderDataField(n.Left()) {
return n
}
@@ -4052,30 +4055,30 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
// "It is valid both to add and to subtract offsets from a
// pointer in this way. It is also valid to use &^ to round
// pointers, usually for alignment."
- var originals []*Node
- var walk func(n *Node)
- walk = func(n *Node) {
- switch n.Op {
- case OADD:
- walk(n.Left)
- walk(n.Right)
- case OSUB, OANDNOT:
- walk(n.Left)
- case OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- n.Left = cheapexpr(n.Left, init)
- originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OADD:
+ walk(n.Left())
+ walk(n.Right())
+ case ir.OSUB, ir.OANDNOT:
+ walk(n.Left())
+ case ir.OCONVNOP:
+ if n.Left().Type().IsUnsafePtr() {
+ n.SetLeft(cheapexpr(n.Left(), init))
+ originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR]))
}
}
}
- walk(n.Left)
+ walk(n.Left())
n = cheapexpr(n, init)
- slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals)
- slice.Esc = EscNone
+ slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice.SetEsc(EscNone)
- init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice))
+ init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[types.TUNSAFEPTR]), slice))
// TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
// the backing store for multiple calls to checkptrArithmetic.
@@ -4085,6 +4088,6 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
// checkPtr reports whether pointer checking should be enabled for
// function fn at a given level. See debugHelpFooter for defined
// levels.
-func checkPtr(fn *Node, level int) bool {
- return Debug_checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0
+func checkPtr(fn ir.Node, level int) bool {
+ return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0
}