aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2020-11-25 01:11:56 -0500
committerRuss Cox <rsc@golang.org>2020-11-25 17:30:43 +0000
commit41f3af9d04362a56c1af186af134c704a03fa97b (patch)
treee67e7c1b67dd5c48a4c18fdb2f973bf789c7b2d1
parent4d0d9c2c5c35377b0662f2fd0995867919552251 (diff)
downloadgo-41f3af9d04362a56c1af186af134c704a03fa97b.tar.gz
go-41f3af9d04362a56c1af186af134c704a03fa97b.zip
[dev.regabi] cmd/compile: replace *Node type with an interface Node [generated]
The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct. The previous CL defined an interface INode modeling a *Node. This CL: - Changes all references outside internal/ir to use INode, along with many references inside internal/ir as well. - Renames Node to node. - Renames INode to Node So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible, and the code outside package ir is now (clearly) using only the interface. The usual rule is never to redefine an existing name with a new meaning, so that old code that hasn't been updated gets a "unknown name" error instead of more mysterious errors or silent misbehavior. That rule would caution against replacing Node-the-struct with Node-the-interface, as in this CL, because code that says *Node would now be using a pointer to an interface. But this CL is being landed at the same time as another that moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node, which does follow the rule: any lingering references to gc.Node will be told it's gone, not silently start using pointers to interfaces. So the rule is followed by the CL sequence, just not this specific CL. Overall, the loss of inlining caused by using interfaces cuts the compiler speed by about 6%, a not insignificant amount. However, as we convert the representation to concrete structs that are not the giant Node over the next weeks, that speed should come back as more of the compiler starts operating directly on concrete types and the memory taken up by the graph of Nodes drops due to the more precise structs. Honestly, I was expecting worse. % benchstat bench.old bench.new name old time/op new time/op delta Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9) Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9) GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9) Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9) SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10) Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8) GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8) Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9) Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9) XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8) LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8) ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10) LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8) StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9) [Geo mean] 480ms 510ms +6.31% name old user-time/op new user-time/op delta Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10) Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9) GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9) Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9) SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10) Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9) GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9) Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10) Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10) XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9) LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9) ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10) LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8) [Geo mean] 483ms 504ms +4.41% [git-generate] cd src/cmd/compile/internal/ir : # We need to do the conversion in multiple steps, so we introduce : # a temporary type alias that will start out meaning the pointer-to-struct : # and then change to mean the interface. rf ' mv Node OldNode add node.go \ type Node = *OldNode ' : # It should work to do this ex in ir, but it misses test files, due to a bug in rf. : # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests. cd ../gc rf ' ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm { import "cmd/compile/internal/ir" *ir.OldNode -> ir.Node } ' cd ../ssa rf ' ex { import "cmd/compile/internal/ir" *ir.OldNode -> ir.Node } ' : # Back in ir, finish conversion clumsily with sed, : # because type checking and circular aliases do not mix. cd ../ir sed -i '' ' /type Node = \*OldNode/d s/\*OldNode/Node/g s/^func (n Node)/func (n *OldNode)/ s/OldNode/node/g s/type INode interface/type Node interface/ s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/ ' *.go gofmt -w *.go sed -i '' ' s/{Func{}, 136, 248}/{Func{}, 152, 280}/ s/{Name{}, 32, 56}/{Name{}, 44, 80}/ s/{Param{}, 24, 48}/{Param{}, 44, 88}/ s/{node{}, 76, 128}/{node{}, 88, 152}/ ' sizeof_test.go cd ../ssa sed -i '' ' s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/ ' sizeof_test.go cd ../gc sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go cd ../../../.. go install std cmd cd cmd/compile go test -u || go test -u Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553 Reviewed-on: https://go-review.googlesource.com/c/go/+/272935 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
-rw-r--r--src/cmd/compile/fmtmap_test.go23
-rw-r--r--src/cmd/compile/internal/arm/ssa.go2
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go2
-rw-r--r--src/cmd/compile/internal/gc/alg.go46
-rw-r--r--src/cmd/compile/internal/gc/bexport.go2
-rw-r--r--src/cmd/compile/internal/gc/bimport.go4
-rw-r--r--src/cmd/compile/internal/gc/builtin.go182
-rw-r--r--src/cmd/compile/internal/gc/closure.go40
-rw-r--r--src/cmd/compile/internal/gc/const.go30
-rw-r--r--src/cmd/compile/internal/gc/dcl.go90
-rw-r--r--src/cmd/compile/internal/gc/embed.go10
-rw-r--r--src/cmd/compile/internal/gc/escape.go84
-rw-r--r--src/cmd/compile/internal/gc/export.go10
-rw-r--r--src/cmd/compile/internal/gc/gen.go8
-rw-r--r--src/cmd/compile/internal/gc/go.go12
-rw-r--r--src/cmd/compile/internal/gc/gsubr.go8
-rw-r--r--src/cmd/compile/internal/gc/iexport.go38
-rw-r--r--src/cmd/compile/internal/gc/iimport.go44
-rw-r--r--src/cmd/compile/internal/gc/init.go2
-rw-r--r--src/cmd/compile/internal/gc/initorder.go38
-rw-r--r--src/cmd/compile/internal/gc/inl.go96
-rw-r--r--src/cmd/compile/internal/gc/main.go4
-rw-r--r--src/cmd/compile/internal/gc/mkbuiltin.go2
-rw-r--r--src/cmd/compile/internal/gc/noder.go126
-rw-r--r--src/cmd/compile/internal/gc/obj.go16
-rw-r--r--src/cmd/compile/internal/gc/order.go74
-rw-r--r--src/cmd/compile/internal/gc/pgen.go56
-rw-r--r--src/cmd/compile/internal/gc/pgen_test.go14
-rw-r--r--src/cmd/compile/internal/gc/phi.go34
-rw-r--r--src/cmd/compile/internal/gc/plive.go28
-rw-r--r--src/cmd/compile/internal/gc/racewalk.go2
-rw-r--r--src/cmd/compile/internal/gc/range.go36
-rw-r--r--src/cmd/compile/internal/gc/reflect.go14
-rw-r--r--src/cmd/compile/internal/gc/scc.go14
-rw-r--r--src/cmd/compile/internal/gc/scope.go2
-rw-r--r--src/cmd/compile/internal/gc/select.go30
-rw-r--r--src/cmd/compile/internal/gc/sinit.go78
-rw-r--r--src/cmd/compile/internal/gc/ssa.go326
-rw-r--r--src/cmd/compile/internal/gc/subr.go74
-rw-r--r--src/cmd/compile/internal/gc/swt.go62
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go118
-rw-r--r--src/cmd/compile/internal/gc/unsafe.go2
-rw-r--r--src/cmd/compile/internal/gc/walk.go202
-rw-r--r--src/cmd/compile/internal/ir/dump.go2
-rw-r--r--src/cmd/compile/internal/ir/fmt.go32
-rw-r--r--src/cmd/compile/internal/ir/node.go364
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go8
-rw-r--r--src/cmd/compile/internal/ir/val.go6
-rw-r--r--src/cmd/compile/internal/mips/ssa.go2
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go2
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go2
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go2
-rw-r--r--src/cmd/compile/internal/ssa/config.go2
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go20
-rw-r--r--src/cmd/compile/internal/ssa/debug.go12
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/location.go2
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go2
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go2
-rw-r--r--src/cmd/compile/internal/ssa/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go2
-rw-r--r--src/cmd/compile/internal/wasm/ssa.go2
62 files changed, 1277 insertions, 1276 deletions
diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go
index 432d26a7b8..7a375604fd 100644
--- a/src/cmd/compile/fmtmap_test.go
+++ b/src/cmd/compile/fmtmap_test.go
@@ -22,14 +22,7 @@ package main_test
var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
"*cmd/compile/internal/gc.EscLocation %v": "",
- "*cmd/compile/internal/ir.Node %#v": "",
- "*cmd/compile/internal/ir.Node %+S": "",
- "*cmd/compile/internal/ir.Node %+v": "",
- "*cmd/compile/internal/ir.Node %L": "",
- "*cmd/compile/internal/ir.Node %S": "",
- "*cmd/compile/internal/ir.Node %j": "",
- "*cmd/compile/internal/ir.Node %p": "",
- "*cmd/compile/internal/ir.Node %v": "",
+ "*cmd/compile/internal/ir.node %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
@@ -83,6 +76,14 @@ var knownFormats = map[string]string{
"cmd/compile/internal/ir.Class %d": "",
"cmd/compile/internal/ir.Class %v": "",
"cmd/compile/internal/ir.FmtMode %d": "",
+ "cmd/compile/internal/ir.Node %#v": "",
+ "cmd/compile/internal/ir.Node %+S": "",
+ "cmd/compile/internal/ir.Node %+v": "",
+ "cmd/compile/internal/ir.Node %L": "",
+ "cmd/compile/internal/ir.Node %S": "",
+ "cmd/compile/internal/ir.Node %j": "",
+ "cmd/compile/internal/ir.Node %p": "",
+ "cmd/compile/internal/ir.Node %v": "",
"cmd/compile/internal/ir.Nodes %#v": "",
"cmd/compile/internal/ir.Nodes %+v": "",
"cmd/compile/internal/ir.Nodes %.v": "",
@@ -160,9 +161,9 @@ var knownFormats = map[string]string{
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",
- "map[*cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "",
- "map[*cmd/compile/internal/ir.Node][]*cmd/compile/internal/ir.Node %v": "",
- "map[cmd/compile/internal/ssa.ID]uint32 %v": "",
+ "map[cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "",
+ "map[cmd/compile/internal/ir.Node][]cmd/compile/internal/ir.Node %v": "",
+ "map[cmd/compile/internal/ssa.ID]uint32 %v": "",
"map[int64]uint32 %v": "",
"math/big.Accuracy %s": "",
"reflect.Type %s": "",
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index ff1dd8869e..b34e2973b2 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 58c00dc3bd..d5bd9687cf 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -396,7 +396,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
index ffd1682b35..d2762126ad 100644
--- a/src/cmd/compile/internal/gc/alg.go
+++ b/src/cmd/compile/internal/gc/alg.go
@@ -404,7 +404,7 @@ func genhash(t *types.Type) *obj.LSym {
return closure
}
-func hashfor(t *types.Type) *ir.Node {
+func hashfor(t *types.Type) ir.Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
@@ -432,10 +432,10 @@ func hashfor(t *types.Type) *ir.Node {
n := NewName(sym)
setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[types.TUINTPTR]),
- }, []*ir.Node{
+ }, []ir.Node{
anonfield(types.Types[types.TUINTPTR]),
}))
return n
@@ -567,9 +567,9 @@ func geneq(t *types.Type) *obj.LSym {
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
- checkAll := func(unroll int64, last bool, eq func(pi, qi *ir.Node) *ir.Node) {
+ checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
// checkIdx generates a node to check for equality at index i.
- checkIdx := func(i *ir.Node) *ir.Node {
+ checkIdx := func(i ir.Node) ir.Node {
// pi := p[i]
pi := ir.Nod(ir.OINDEX, np, i)
pi.SetBounded(true)
@@ -621,24 +621,24 @@ func geneq(t *types.Type) *obj.LSym {
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
- checkAll(3, false, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
- checkAll(1, true, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case types.TFLOAT32, types.TFLOAT64:
- checkAll(2, true, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(2, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return ir.Nod(ir.OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
- checkAll(1, true, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return ir.Nod(ir.OEQ, pi, qi)
})
@@ -648,9 +648,9 @@ func geneq(t *types.Type) *obj.LSym {
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
- var conds [][]*ir.Node
- conds = append(conds, []*ir.Node{})
- and := func(n *ir.Node) {
+ var conds [][]ir.Node
+ conds = append(conds, []ir.Node{})
+ and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
@@ -670,7 +670,7 @@ func geneq(t *types.Type) *obj.LSym {
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
- conds = append(conds, []*ir.Node{})
+ conds = append(conds, []ir.Node{})
}
p := nodSym(ir.OXDOT, np, f.Sym)
q := nodSym(ir.OXDOT, nq, f.Sym)
@@ -684,7 +684,7 @@ func geneq(t *types.Type) *obj.LSym {
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
- conds = append(conds, []*ir.Node{})
+ conds = append(conds, []ir.Node{})
}
i++
continue
@@ -709,9 +709,9 @@ func geneq(t *types.Type) *obj.LSym {
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
- var flatConds []*ir.Node
+ var flatConds []ir.Node
for _, c := range conds {
- isCall := func(n *ir.Node) bool {
+ isCall := func(n ir.Node) bool {
return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
@@ -785,7 +785,7 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
-func hasCall(n *ir.Node) bool {
+func hasCall(n ir.Node) bool {
if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC {
return true
}
@@ -820,7 +820,7 @@ func hasCall(n *ir.Node) bool {
// eqfield returns the node
// p.field == q.field
-func eqfield(p *ir.Node, q *ir.Node, field *types.Sym) *ir.Node {
+func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
nx := nodSym(ir.OXDOT, p, field)
ny := nodSym(ir.OXDOT, q, field)
ne := ir.Nod(ir.OEQ, nx, ny)
@@ -833,7 +833,7 @@ func eqfield(p *ir.Node, q *ir.Node, field *types.Sym) *ir.Node {
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) {
+func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) {
s = conv(s, types.Types[types.TSTRING])
t = conv(t, types.Types[types.TSTRING])
sptr := ir.Nod(ir.OSPTR, s, nil)
@@ -859,13 +859,13 @@ func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) {
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) {
+func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
- var fn *ir.Node
+ var fn ir.Node
if s.Type().IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
@@ -893,7 +893,7 @@ func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) {
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
-func eqmem(p *ir.Node, q *ir.Node, field *types.Sym, size int64) *ir.Node {
+func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil)
ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil)
nx = typecheck(nx, ctxExpr)
@@ -910,7 +910,7 @@ func eqmem(p *ir.Node, q *ir.Node, field *types.Sym, size int64) *ir.Node {
return call
}
-func eqmemfunc(size int64, t *types.Type) (fn *ir.Node, needsize bool) {
+func eqmemfunc(size int64, t *types.Type) (fn ir.Node, needsize bool) {
switch size {
default:
fn = syslook("memequal")
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
index e36903cbe0..a470b842ff 100644
--- a/src/cmd/compile/internal/gc/bexport.go
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -14,7 +14,7 @@ type exporter struct {
}
// markObject visits a reachable object.
-func (p *exporter) markObject(n *ir.Node) {
+func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME && n.Class() == ir.PFUNC {
inlFlood(n)
}
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
index 603710d6b1..c0c18e728e 100644
--- a/src/cmd/compile/internal/gc/bimport.go
+++ b/src/cmd/compile/internal/gc/bimport.go
@@ -9,11 +9,11 @@ import (
"cmd/internal/src"
)
-func npos(pos src.XPos, n *ir.Node) *ir.Node {
+func npos(pos src.XPos, n ir.Node) ir.Node {
n.SetPos(pos)
return n
}
-func builtinCall(op ir.Op) *ir.Node {
+func builtinCall(op ir.Op) ir.Node {
return ir.Nod(ir.OCALL, mkname(ir.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index 5016905f22..a57c611559 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -210,132 +210,132 @@ func runtimeTypes() []*types.Type {
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
typs[3] = types.NewPtr(typs[2])
- typs[4] = functype(nil, []*ir.Node{anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])})
+ typs[4] = functype(nil, []ir.Node{anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
typs[5] = types.Types[types.TUINTPTR]
typs[6] = types.Types[types.TBOOL]
typs[7] = types.Types[types.TUNSAFEPTR]
- typs[8] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[7])})
+ typs[8] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []ir.Node{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
typs[10] = types.Types[types.TINTER]
- typs[11] = functype(nil, []*ir.Node{anonfield(typs[10])}, nil)
+ typs[11] = functype(nil, []ir.Node{anonfield(typs[10])}, nil)
typs[12] = types.Types[types.TINT32]
typs[13] = types.NewPtr(typs[12])
- typs[14] = functype(nil, []*ir.Node{anonfield(typs[13])}, []*ir.Node{anonfield(typs[10])})
+ typs[14] = functype(nil, []ir.Node{anonfield(typs[13])}, []ir.Node{anonfield(typs[10])})
typs[15] = types.Types[types.TINT]
- typs[16] = functype(nil, []*ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
+ typs[16] = functype(nil, []ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
typs[17] = types.Types[types.TUINT]
- typs[18] = functype(nil, []*ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
- typs[19] = functype(nil, []*ir.Node{anonfield(typs[6])}, nil)
+ typs[18] = functype(nil, []ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
+ typs[19] = functype(nil, []ir.Node{anonfield(typs[6])}, nil)
typs[20] = types.Types[types.TFLOAT64]
- typs[21] = functype(nil, []*ir.Node{anonfield(typs[20])}, nil)
+ typs[21] = functype(nil, []ir.Node{anonfield(typs[20])}, nil)
typs[22] = types.Types[types.TINT64]
- typs[23] = functype(nil, []*ir.Node{anonfield(typs[22])}, nil)
+ typs[23] = functype(nil, []ir.Node{anonfield(typs[22])}, nil)
typs[24] = types.Types[types.TUINT64]
- typs[25] = functype(nil, []*ir.Node{anonfield(typs[24])}, nil)
+ typs[25] = functype(nil, []ir.Node{anonfield(typs[24])}, nil)
typs[26] = types.Types[types.TCOMPLEX128]
- typs[27] = functype(nil, []*ir.Node{anonfield(typs[26])}, nil)
+ typs[27] = functype(nil, []ir.Node{anonfield(typs[26])}, nil)
typs[28] = types.Types[types.TSTRING]
- typs[29] = functype(nil, []*ir.Node{anonfield(typs[28])}, nil)
- typs[30] = functype(nil, []*ir.Node{anonfield(typs[2])}, nil)
- typs[31] = functype(nil, []*ir.Node{anonfield(typs[5])}, nil)
+ typs[29] = functype(nil, []ir.Node{anonfield(typs[28])}, nil)
+ typs[30] = functype(nil, []ir.Node{anonfield(typs[2])}, nil)
+ typs[31] = functype(nil, []ir.Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
- typs[34] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
- typs[35] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
- typs[36] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
- typs[37] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
+ typs[34] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[35] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[36] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[37] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
- typs[39] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Node{anonfield(typs[28])})
- typs[40] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])})
+ typs[39] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []ir.Node{anonfield(typs[28])})
+ typs[40] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
- typs[43] = functype(nil, []*ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[28])})
- typs[44] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])})
- typs[45] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])})
+ typs[43] = functype(nil, []ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []ir.Node{anonfield(typs[28])})
+ typs[44] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
+ typs[45] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
- typs[48] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Node{anonfield(typs[28])})
+ typs[48] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []ir.Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
- typs[50] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[49])})
+ typs[50] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []ir.Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
- typs[53] = functype(nil, []*ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[47])})
- typs[54] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[15])})
- typs[55] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[46]), anonfield(typs[15])})
- typs[56] = functype(nil, []*ir.Node{anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])})
- typs[57] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2])})
- typs[58] = functype(nil, []*ir.Node{anonfield(typs[2])}, []*ir.Node{anonfield(typs[7])})
- typs[59] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[2])})
- typs[60] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2]), anonfield(typs[6])})
- typs[61] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[62] = functype(nil, []*ir.Node{anonfield(typs[1])}, nil)
+ typs[53] = functype(nil, []ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []ir.Node{anonfield(typs[47])})
+ typs[54] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []ir.Node{anonfield(typs[15])})
+ typs[55] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []ir.Node{anonfield(typs[46]), anonfield(typs[15])})
+ typs[56] = functype(nil, []ir.Node{anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
+ typs[57] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2])})
+ typs[58] = functype(nil, []ir.Node{anonfield(typs[2])}, []ir.Node{anonfield(typs[7])})
+ typs[59] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []ir.Node{anonfield(typs[2])})
+ typs[60] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2]), anonfield(typs[6])})
+ typs[61] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[62] = functype(nil, []ir.Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
- typs[64] = functype(nil, []*ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])})
+ typs[64] = functype(nil, []ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
typs[65] = types.Types[types.TUINT32]
- typs[66] = functype(nil, nil, []*ir.Node{anonfield(typs[65])})
+ typs[66] = functype(nil, nil, []ir.Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
- typs[68] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])})
- typs[69] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])})
- typs[70] = functype(nil, nil, []*ir.Node{anonfield(typs[67])})
- typs[71] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3])})
- typs[72] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3])})
- typs[73] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])})
- typs[74] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[75] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[76] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[77] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
- typs[78] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
- typs[79] = functype(nil, []*ir.Node{anonfield(typs[3])}, nil)
- typs[80] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
+ typs[68] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
+ typs[69] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
+ typs[70] = functype(nil, nil, []ir.Node{anonfield(typs[67])})
+ typs[71] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3])})
+ typs[72] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3])})
+ typs[73] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
+ typs[74] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[75] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[76] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[77] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
+ typs[78] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
+ typs[79] = functype(nil, []ir.Node{anonfield(typs[3])}, nil)
+ typs[80] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
- typs[82] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[81])})
- typs[83] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[81])})
+ typs[82] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []ir.Node{anonfield(typs[81])})
+ typs[83] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
- typs[85] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
- typs[86] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])})
+ typs[85] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
- typs[88] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
+ typs[88] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
- typs[90] = tostruct([]*ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
- typs[91] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[92] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[93] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[15])})
- typs[94] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])})
- typs[95] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])})
+ typs[90] = tostruct([]ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
+ typs[91] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[92] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[93] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []ir.Node{anonfield(typs[15])})
+ typs[94] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
+ typs[95] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
- typs[97] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])})
- typs[98] = functype(nil, []*ir.Node{anonfield(typs[63])}, nil)
- typs[99] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[15]), anonfield(typs[6])})
- typs[100] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[7])})
- typs[101] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[7])})
- typs[102] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[7])})
+ typs[97] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
+ typs[98] = functype(nil, []ir.Node{anonfield(typs[63])}, nil)
+ typs[99] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []ir.Node{anonfield(typs[15]), anonfield(typs[6])})
+ typs[100] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []ir.Node{anonfield(typs[7])})
+ typs[101] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[7])})
+ typs[102] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []ir.Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
- typs[104] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[103])})
- typs[105] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
- typs[106] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
- typs[107] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[6])})
- typs[108] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])})
- typs[109] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])})
- typs[110] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])})
- typs[111] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])})
- typs[112] = functype(nil, []*ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[22])})
- typs[113] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Node{anonfield(typs[24])})
- typs[114] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[22])})
- typs[115] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[24])})
- typs[116] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[65])})
- typs[117] = functype(nil, []*ir.Node{anonfield(typs[22])}, []*ir.Node{anonfield(typs[20])})
- typs[118] = functype(nil, []*ir.Node{anonfield(typs[24])}, []*ir.Node{anonfield(typs[20])})
- typs[119] = functype(nil, []*ir.Node{anonfield(typs[65])}, []*ir.Node{anonfield(typs[20])})
- typs[120] = functype(nil, []*ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Node{anonfield(typs[26])})
- typs[121] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
- typs[122] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
+ typs[104] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []ir.Node{anonfield(typs[103])})
+ typs[105] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
+ typs[106] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
+ typs[107] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []ir.Node{anonfield(typs[6])})
+ typs[108] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
+ typs[109] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
+ typs[110] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
+ typs[111] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
+ typs[112] = functype(nil, []ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[22])})
+ typs[113] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []ir.Node{anonfield(typs[24])})
+ typs[114] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[22])})
+ typs[115] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[24])})
+ typs[116] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[65])})
+ typs[117] = functype(nil, []ir.Node{anonfield(typs[22])}, []ir.Node{anonfield(typs[20])})
+ typs[118] = functype(nil, []ir.Node{anonfield(typs[24])}, []ir.Node{anonfield(typs[20])})
+ typs[119] = functype(nil, []ir.Node{anonfield(typs[65])}, []ir.Node{anonfield(typs[20])})
+ typs[120] = functype(nil, []ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []ir.Node{anonfield(typs[26])})
+ typs[121] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
+ typs[122] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
- typs[124] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
+ typs[124] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
typs[125] = types.Types[types.TUINT8]
- typs[126] = functype(nil, []*ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
+ typs[126] = functype(nil, []ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[types.TUINT16]
- typs[128] = functype(nil, []*ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
- typs[129] = functype(nil, []*ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
- typs[130] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
+ typs[128] = functype(nil, []ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
+ typs[129] = functype(nil, []ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
+ typs[130] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
index 2dce7b7f03..2901ae41d6 100644
--- a/src/cmd/compile/internal/gc/closure.go
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -13,7 +13,7 @@ import (
"fmt"
)
-func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node {
+func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
@@ -78,7 +78,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node {
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
-func typecheckclosure(clo *ir.Node, top int) {
+func typecheckclosure(clo ir.Node, top int) {
fn := clo.Func()
dcl := fn.Decl
// Set current associated iota value, so iota can be used inside
@@ -140,7 +140,7 @@ var globClosgen int
// closurename generates a new unique name for a closure within
// outerfunc.
-func closurename(outerfunc *ir.Node) *types.Sym {
+func closurename(outerfunc ir.Node) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
@@ -172,7 +172,7 @@ var capturevarscomplete bool
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
-func capturevars(dcl *ir.Node) {
+func capturevars(dcl ir.Node) {
lno := base.Pos
base.Pos = dcl.Pos()
fn := dcl.Func()
@@ -227,7 +227,7 @@ func capturevars(dcl *ir.Node) {
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
-func transformclosure(dcl *ir.Node) {
+func transformclosure(dcl ir.Node) {
lno := base.Pos
base.Pos = dcl.Pos()
fn := dcl.Func()
@@ -253,7 +253,7 @@ func transformclosure(dcl *ir.Node) {
// We are going to insert captured variables before input args.
var params []*types.Field
- var decls []*ir.Node
+ var decls []ir.Node
for _, v := range fn.ClosureVars.Slice() {
if !v.Name().Byval() {
// If v of type T is captured by reference,
@@ -284,7 +284,7 @@ func transformclosure(dcl *ir.Node) {
dcl.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
- var body []*ir.Node
+ var body []ir.Node
offset := int64(Widthptr)
for _, v := range fn.ClosureVars.Slice() {
// cv refers to the field inside of closure OSTRUCTLIT.
@@ -332,13 +332,13 @@ func transformclosure(dcl *ir.Node) {
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
-func hasemptycvars(clo *ir.Node) bool {
+func hasemptycvars(clo ir.Node) bool {
return clo.Func().ClosureVars.Len() == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
-func closuredebugruntimecheck(clo *ir.Node) {
+func closuredebugruntimecheck(clo ir.Node) {
if base.Debug.Closure > 0 {
if clo.Esc() == EscHeap {
base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
@@ -354,7 +354,7 @@ func closuredebugruntimecheck(clo *ir.Node) {
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
-func closureType(clo *ir.Node) *types.Type {
+func closureType(clo ir.Node) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
@@ -368,7 +368,7 @@ func closureType(clo *ir.Node) *types.Type {
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
- fields := []*ir.Node{
+ fields := []ir.Node{
namedfield(".F", types.Types[types.TUINTPTR]),
}
for _, v := range clo.Func().ClosureVars.Slice() {
@@ -383,7 +383,7 @@ func closureType(clo *ir.Node) *types.Type {
return typ
}
-func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node {
+func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node {
fn := clo.Func()
// If no closure vars, don't bother wrapping.
@@ -399,7 +399,7 @@ func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node {
clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
clos.SetEsc(clo.Esc())
- clos.PtrList().Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
+ clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
clos = ir.Nod(ir.OADDR, clos, nil)
clos.SetEsc(clo.Esc())
@@ -419,7 +419,7 @@ func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node {
return walkexpr(clos, init)
}
-func typecheckpartialcall(dot *ir.Node, sym *types.Sym) {
+func typecheckpartialcall(dot ir.Node, sym *types.Sym) {
switch dot.Op() {
case ir.ODOTINTER, ir.ODOTMETH:
break
@@ -440,7 +440,7 @@ func typecheckpartialcall(dot *ir.Node, sym *types.Sym) {
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
-func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node {
+func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node {
rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
@@ -484,7 +484,7 @@ func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node {
ptr := NewName(lookup(".this"))
declare(ptr, ir.PAUTO)
ptr.Name().SetUsed(true)
- var body []*ir.Node
+ var body []ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.SetType(rcvrtype)
body = append(body, ir.Nod(ir.OAS, ptr, cv))
@@ -522,8 +522,8 @@ func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node {
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
-func partialCallType(n *ir.Node) *types.Type {
- t := tostruct([]*ir.Node{
+func partialCallType(n ir.Node) *types.Type {
+ t := tostruct([]ir.Node{
namedfield("F", types.Types[types.TUINTPTR]),
namedfield("R", n.Left().Type()),
})
@@ -531,7 +531,7 @@ func partialCallType(n *ir.Node) *types.Type {
return t
}
-func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkpartialcall(n ir.Node, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
@@ -579,7 +579,7 @@ func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node {
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
-func callpartMethod(n *ir.Node) *types.Field {
+func callpartMethod(n ir.Node) *types.Field {
if n.Op() != ir.OCALLPART {
base.Fatalf("expected OCALLPART, got %v", n)
}
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index 27e54b46c8..4beb85245f 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -84,8 +84,8 @@ func trunccmplxlit(v constant.Value, t *types.Type) constant.Value {
}
// TODO(mdempsky): Replace these with better APIs.
-func convlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) }
-func defaultlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) }
+func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+func defaultlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
// convlit1 converts an untyped expression n to type t. If n already
// has a type, convlit1 has no effect.
@@ -98,7 +98,7 @@ func defaultlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, fals
//
// If there's an error converting n to t, context is used in the error
// message.
-func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) *ir.Node {
+func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node {
if explicit && t == nil {
base.Fatalf("explicit conversion missing type")
}
@@ -438,7 +438,7 @@ var tokenForOp = [...]token.Token{
// If n is not a constant, evalConst returns n.
// Otherwise, evalConst returns a new OLITERAL with the same value as n,
// and with .Orig pointing back to n.
-func evalConst(n *ir.Node) *ir.Node {
+func evalConst(n ir.Node) ir.Node {
nl, nr := n.Left(), n.Right()
// Pick off just the opcodes that can be constant evaluated.
@@ -525,7 +525,7 @@ func evalConst(n *ir.Node) *ir.Node {
}
return origConst(n, constant.MakeString(strings.Join(strs, "")))
}
- newList := make([]*ir.Node, 0, need)
+ newList := make([]ir.Node, 0, need)
for i := 0; i < len(s); i++ {
if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) {
// merge from i up to but not including i2
@@ -619,7 +619,7 @@ var overflowNames = [...]string{
}
// origConst returns an OLITERAL with orig n and value v.
-func origConst(n *ir.Node, v constant.Value) *ir.Node {
+func origConst(n ir.Node, v constant.Value) ir.Node {
lno := setlineno(n)
v = convertVal(v, n.Type(), false)
base.Pos = lno
@@ -648,11 +648,11 @@ func origConst(n *ir.Node, v constant.Value) *ir.Node {
return n
}
-func origBoolConst(n *ir.Node, v bool) *ir.Node {
+func origBoolConst(n ir.Node, v bool) ir.Node {
return origConst(n, constant.MakeBool(v))
}
-func origIntConst(n *ir.Node, v int64) *ir.Node {
+func origIntConst(n ir.Node, v int64) ir.Node {
return origConst(n, constant.MakeInt64(v))
}
@@ -662,7 +662,7 @@ func origIntConst(n *ir.Node, v int64) *ir.Node {
// force means must assign concrete (non-ideal) type.
// The results of defaultlit2 MUST be assigned back to l and r, e.g.
// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
-func defaultlit2(l *ir.Node, r *ir.Node, force bool) (*ir.Node, *ir.Node) {
+func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
if l.Type() == nil || r.Type() == nil {
return l, r
}
@@ -747,7 +747,7 @@ func defaultType(t *types.Type) *types.Type {
return nil
}
-func smallintconst(n *ir.Node) bool {
+func smallintconst(n ir.Node) bool {
if n.Op() == ir.OLITERAL {
v, ok := constant.Int64Val(n.Val())
return ok && int64(int32(v)) == v
@@ -760,7 +760,7 @@ func smallintconst(n *ir.Node) bool {
// If n is not a constant expression, not representable as an
// integer, or negative, it returns -1. If n is too large, it
// returns -2.
-func indexconst(n *ir.Node) int64 {
+func indexconst(n ir.Node) int64 {
if n.Op() != ir.OLITERAL {
return -1
}
@@ -783,11 +783,11 @@ func indexconst(n *ir.Node) int64 {
//
// Expressions derived from nil, like string([]byte(nil)), while they
// may be known at compile time, are not Go language constants.
-func isGoConst(n *ir.Node) bool {
+func isGoConst(n ir.Node) bool {
return n.Op() == ir.OLITERAL
}
-func hascallchan(n *ir.Node) bool {
+func hascallchan(n ir.Node) bool {
if n == nil {
return false
}
@@ -851,7 +851,7 @@ type constSetKey struct {
// where are used in the error message.
//
// n must not be an untyped constant.
-func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) {
+func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
if n.Op() == ir.OCONVIFACE && n.Implicit() {
n = n.Left()
}
@@ -908,7 +908,7 @@ func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) {
// the latter is non-obvious.
//
// TODO(mdempsky): This could probably be a fmt.go flag.
-func nodeAndVal(n *ir.Node) string {
+func nodeAndVal(n ir.Node) string {
show := n.String()
val := ir.ConstValue(n)
if s := fmt.Sprintf("%#v", val); show != s {
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index 8980c47e2c..2a7be137c0 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -18,7 +18,7 @@ import (
// Declaration stack & operations
-var externdcl []*ir.Node
+var externdcl []ir.Node
func testdclstack() {
if !types.IsDclstackValid() {
@@ -59,7 +59,7 @@ var declare_typegen int
// declare records that Node n declares symbol n.Sym in the specified
// declaration context.
-func declare(n *ir.Node, ctxt ir.Class) {
+func declare(n ir.Node, ctxt ir.Class) {
if ir.IsBlank(n) {
return
}
@@ -128,7 +128,7 @@ func declare(n *ir.Node, ctxt ir.Class) {
autoexport(n, ctxt)
}
-func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) {
+func addvar(n ir.Node, t *types.Type, ctxt ir.Class) {
if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil {
base.Fatalf("addvar: n=%v t=%v nil", n, t)
}
@@ -140,8 +140,8 @@ func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) {
// declare variables from grammar
// new_name_list (type | [type] = expr_list)
-func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node {
- var init []*ir.Node
+func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node {
+ var init []ir.Node
doexpr := len(el) > 0
if len(el) == 1 && len(vl) > 1 {
@@ -164,7 +164,7 @@ func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node {
nel := len(el)
for _, v := range vl {
- var e *ir.Node
+ var e ir.Node
if doexpr {
if len(el) == 0 {
base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel)
@@ -197,7 +197,7 @@ func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node {
}
// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *types.Sym) *ir.Node {
+func newnoname(s *types.Sym) ir.Node {
if s == nil {
base.Fatalf("newnoname nil")
}
@@ -208,7 +208,7 @@ func newnoname(s *types.Sym) *ir.Node {
}
// newfuncnamel generates a new name node for a function or method.
-func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node {
+func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) ir.Node {
if fn.Nname != nil {
base.Fatalf("newfuncnamel - already have name")
}
@@ -220,17 +220,17 @@ func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node {
// this generates a new name node for a name
// being declared.
-func dclname(s *types.Sym) *ir.Node {
+func dclname(s *types.Sym) ir.Node {
n := NewName(s)
n.SetOp(ir.ONONAME) // caller will correct it
return n
}
-func typenod(t *types.Type) *ir.Node {
+func typenod(t *types.Type) ir.Node {
return typenodl(src.NoXPos, t)
}
-func typenodl(pos src.XPos, t *types.Type) *ir.Node {
+func typenodl(pos src.XPos, t *types.Type) ir.Node {
// if we copied another type with *t = *u
// then t->nod might be out of date, so
// check t->nod->type too
@@ -243,15 +243,15 @@ func typenodl(pos src.XPos, t *types.Type) *ir.Node {
return ir.AsNode(t.Nod)
}
-func anonfield(typ *types.Type) *ir.Node {
+func anonfield(typ *types.Type) ir.Node {
return symfield(nil, typ)
}
-func namedfield(s string, typ *types.Type) *ir.Node {
+func namedfield(s string, typ *types.Type) ir.Node {
return symfield(lookup(s), typ)
}
-func symfield(s *types.Sym, typ *types.Type) *ir.Node {
+func symfield(s *types.Sym, typ *types.Type) ir.Node {
n := nodSym(ir.ODCLFIELD, nil, s)
n.SetType(typ)
return n
@@ -261,7 +261,7 @@ func symfield(s *types.Sym, typ *types.Type) *ir.Node {
// If no such Node currently exists, an ONONAME Node is returned instead.
// Automatically creates a new closure variable if the referenced symbol was
// declared in a different (containing) function.
-func oldname(s *types.Sym) *ir.Node {
+func oldname(s *types.Sym) ir.Node {
n := ir.AsNode(s.Def)
if n == nil {
// Maybe a top-level declaration will come along later to
@@ -302,7 +302,7 @@ func oldname(s *types.Sym) *ir.Node {
}
// importName is like oldname, but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) *ir.Node {
+func importName(sym *types.Sym) ir.Node {
n := oldname(sym)
if !types.IsExported(sym.Name) && sym.Pkg != ir.LocalPkg {
n.SetDiag(true)
@@ -312,7 +312,7 @@ func importName(sym *types.Sym) *ir.Node {
}
// := declarations
-func colasname(n *ir.Node) bool {
+func colasname(n ir.Node) bool {
switch n.Op() {
case ir.ONAME,
ir.ONONAME,
@@ -325,7 +325,7 @@ func colasname(n *ir.Node) bool {
return false
}
-func colasdefn(left []*ir.Node, defn *ir.Node) {
+func colasdefn(left []ir.Node, defn ir.Node) {
for _, n := range left {
if n.Sym() != nil {
n.Sym().SetUniq(true)
@@ -370,7 +370,7 @@ func colasdefn(left []*ir.Node, defn *ir.Node) {
// declare the arguments in an
// interface field declaration.
-func ifacedcl(n *ir.Node) {
+func ifacedcl(n ir.Node) {
if n.Op() != ir.ODCLFIELD || n.Left() == nil {
base.Fatalf("ifacedcl")
}
@@ -384,7 +384,7 @@ func ifacedcl(n *ir.Node) {
// and declare the arguments.
// called in extern-declaration context
// returns in auto-declaration context.
-func funchdr(n *ir.Node) {
+func funchdr(n ir.Node) {
// change the declaration context from extern to auto
funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
Curfn = n
@@ -399,7 +399,7 @@ func funchdr(n *ir.Node) {
}
}
-func funcargs(nt *ir.Node) {
+func funcargs(nt ir.Node) {
if nt.Op() != ir.OTFUNC {
base.Fatalf("funcargs %v", nt.Op())
}
@@ -449,7 +449,7 @@ func funcargs(nt *ir.Node) {
vargen = oldvargen
}
-func funcarg(n *ir.Node, ctxt ir.Class) {
+func funcarg(n ir.Node, ctxt ir.Class) {
if n.Op() != ir.ODCLFIELD {
base.Fatalf("funcarg %v", n.Op())
}
@@ -499,7 +499,7 @@ func funcarg2(f *types.Field, ctxt ir.Class) {
var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
type funcStackEnt struct {
- curfn *ir.Node
+ curfn ir.Node
dclcontext ir.Class
}
@@ -535,7 +535,7 @@ func checkembeddedtype(t *types.Type) {
}
}
-func structfield(n *ir.Node) *types.Field {
+func structfield(n ir.Node) *types.Field {
lno := base.Pos
base.Pos = n.Pos()
@@ -582,7 +582,7 @@ func checkdupfields(what string, fss ...[]*types.Field) {
// convert a parsed id/type list into
// a type for struct/interface/arglist
-func tostruct(l []*ir.Node) *types.Type {
+func tostruct(l []ir.Node) *types.Type {
t := types.New(types.TSTRUCT)
fields := make([]*types.Field, len(l))
@@ -604,7 +604,7 @@ func tostruct(l []*ir.Node) *types.Type {
return t
}
-func tofunargs(l []*ir.Node, funarg types.Funarg) *types.Type {
+func tofunargs(l []ir.Node, funarg types.Funarg) *types.Type {
t := types.New(types.TSTRUCT)
t.StructType().Funarg = funarg
@@ -632,7 +632,7 @@ func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type {
return t
}
-func interfacefield(n *ir.Node) *types.Field {
+func interfacefield(n ir.Node) *types.Field {
lno := base.Pos
base.Pos = n.Pos()
@@ -661,7 +661,7 @@ func interfacefield(n *ir.Node) *types.Field {
return f
}
-func tointerface(l []*ir.Node) *types.Type {
+func tointerface(l []ir.Node) *types.Type {
if len(l) == 0 {
return types.Types[types.TINTER]
}
@@ -678,7 +678,7 @@ func tointerface(l []*ir.Node) *types.Type {
return t
}
-func fakeRecv() *ir.Node {
+func fakeRecv() ir.Node {
return anonfield(types.FakeRecvType())
}
@@ -694,12 +694,12 @@ func isifacemethod(f *types.Type) bool {
}
// turn a parsed function declaration into a type
-func functype(this *ir.Node, in, out []*ir.Node) *types.Type {
+func functype(this ir.Node, in, out []ir.Node) *types.Type {
t := types.New(types.TFUNC)
- var rcvr []*ir.Node
+ var rcvr []ir.Node
if this != nil {
- rcvr = []*ir.Node{this}
+ rcvr = []ir.Node{this}
}
t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
t.FuncType().Params = tofunargs(in, types.FunargParams)
@@ -799,7 +799,7 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy
// - msym is the method symbol
// - t is function type (with receiver)
// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(n *ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+func addmethod(n ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
if msym == nil {
base.Fatalf("no method symbol")
}
@@ -935,7 +935,7 @@ func makefuncsym(s *types.Sym) {
}
// setNodeNameFunc marks a node as a function.
-func setNodeNameFunc(n *ir.Node) {
+func setNodeNameFunc(n ir.Node) {
if n.Op() != ir.ONAME || n.Class() != ir.Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
@@ -944,7 +944,7 @@ func setNodeNameFunc(n *ir.Node) {
n.Sym().SetFunc(true)
}
-func dclfunc(sym *types.Sym, tfn *ir.Node) *ir.Node {
+func dclfunc(sym *types.Sym, tfn ir.Node) ir.Node {
if tfn.Op() != ir.OTFUNC {
base.Fatalf("expected OTFUNC node, got %v", tfn)
}
@@ -963,14 +963,14 @@ type nowritebarrierrecChecker struct {
// extraCalls contains extra function calls that may not be
// visible during later analysis. It maps from the ODCLFUNC of
// the caller to a list of callees.
- extraCalls map[*ir.Node][]nowritebarrierrecCall
+ extraCalls map[ir.Node][]nowritebarrierrecCall
// curfn is the current function during AST walks.
- curfn *ir.Node
+ curfn ir.Node
}
type nowritebarrierrecCall struct {
- target *ir.Node // ODCLFUNC of caller or callee
+ target ir.Node // ODCLFUNC of caller or callee
lineno src.XPos // line of call
}
@@ -978,7 +978,7 @@ type nowritebarrierrecCall struct {
// must be called before transformclosure and walk.
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
- extraCalls: make(map[*ir.Node][]nowritebarrierrecCall),
+ extraCalls: make(map[ir.Node][]nowritebarrierrecCall),
}
// Find all systemstack calls and record their targets. In
@@ -997,7 +997,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
return c
}
-func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool {
+func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool {
if n.Op() != ir.OCALLFUNC {
return true
}
@@ -1009,7 +1009,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool {
return true
}
- var callee *ir.Node
+ var callee ir.Node
arg := n.List().First()
switch arg.Op() {
case ir.ONAME:
@@ -1034,7 +1034,7 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool {
// because that's all we know after we start SSA.
//
// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(from *ir.Node, to *obj.LSym, pos src.XPos) {
+func (c *nowritebarrierrecChecker) recordCall(from ir.Node, to *obj.LSym, pos src.XPos) {
if from.Op() != ir.ODCLFUNC {
base.Fatalf("expected ODCLFUNC, got %v", from)
}
@@ -1052,14 +1052,14 @@ func (c *nowritebarrierrecChecker) check() {
// capture all calls created by lowering, but this means we
// only get to see the obj.LSyms of calls. symToFunc lets us
// get back to the ODCLFUNCs.
- symToFunc := make(map[*obj.LSym]*ir.Node)
+ symToFunc := make(map[*obj.LSym]ir.Node)
// funcs records the back-edges of the BFS call graph walk. It
// maps from the ODCLFUNC of each function that must not have
// write barriers to the call that inhibits them. Functions
// that are directly marked go:nowritebarrierrec are in this
// map with a zero-valued nowritebarrierrecCall. This also
// acts as the set of marks for the BFS of the call graph.
- funcs := make(map[*ir.Node]nowritebarrierrecCall)
+ funcs := make(map[ir.Node]nowritebarrierrecCall)
// q is the queue of ODCLFUNC Nodes to visit in BFS order.
var q ir.NodeQueue
@@ -1083,7 +1083,7 @@ func (c *nowritebarrierrecChecker) check() {
// Perform a BFS of the call graph from all
// go:nowritebarrierrec functions.
- enqueue := func(src, target *ir.Node, pos src.XPos) {
+ enqueue := func(src, target ir.Node, pos src.XPos) {
if target.Func().Pragma&ir.Yeswritebarrierrec != 0 {
// Don't flow into this function.
return
diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go
index 03703f68d5..33b05a5bf0 100644
--- a/src/cmd/compile/internal/gc/embed.go
+++ b/src/cmd/compile/internal/gc/embed.go
@@ -17,7 +17,7 @@ import (
"strings"
)
-var embedlist []*ir.Node
+var embedlist []ir.Node
const (
embedUnknown = iota
@@ -28,7 +28,7 @@ const (
var numLocalEmbed int
-func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds []PragmaEmbed) (newExprs []*ir.Node) {
+func varEmbed(p *noder, names []ir.Node, typ ir.Node, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
@@ -118,7 +118,7 @@ func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds
v.Name().Param.Ntype = typ
v.SetClass(ir.PEXTERN)
externdcl = append(externdcl, v)
- exprs = []*ir.Node{v}
+ exprs = []ir.Node{v}
}
v.Name().Param.SetEmbedFiles(list)
@@ -130,7 +130,7 @@ func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
-func embedKindApprox(typ *ir.Node) int {
+func embedKindApprox(typ ir.Node) int {
if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
@@ -192,7 +192,7 @@ func dumpembeds() {
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
-func initEmbed(v *ir.Node) {
+func initEmbed(v ir.Node) {
files := v.Name().Param.EmbedFiles()
switch kind := embedKind(v.Type()); kind {
case embedUnknown:
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
index f1786e74dc..783bc8c41d 100644
--- a/src/cmd/compile/internal/gc/escape.go
+++ b/src/cmd/compile/internal/gc/escape.go
@@ -86,7 +86,7 @@ import (
type Escape struct {
allLocs []*EscLocation
- curfn *ir.Node
+ curfn ir.Node
// loopDepth counts the current loop nesting depth within
// curfn. It increments within each "for" loop and at each
@@ -101,8 +101,8 @@ type Escape struct {
// An EscLocation represents an abstract location that stores a Go
// variable.
type EscLocation struct {
- n *ir.Node // represented variable or expression, if any
- curfn *ir.Node // enclosing function
+ n ir.Node // represented variable or expression, if any
+ curfn ir.Node // enclosing function
edges []EscEdge // incoming edges
loopDepth int // loopDepth at declaration
@@ -147,7 +147,7 @@ func init() {
}
// escFmt is called from node printing to print information about escape analysis results.
-func escFmt(n *ir.Node, short bool) string {
+func escFmt(n ir.Node, short bool) string {
text := ""
switch n.Esc() {
case EscUnknown:
@@ -179,7 +179,7 @@ func escFmt(n *ir.Node, short bool) string {
// escapeFuncs performs escape analysis on a minimal batch of
// functions.
-func escapeFuncs(fns []*ir.Node, recursive bool) {
+func escapeFuncs(fns []ir.Node, recursive bool) {
for _, fn := range fns {
if fn.Op() != ir.ODCLFUNC {
base.Fatalf("unexpected node: %v", fn)
@@ -202,7 +202,7 @@ func escapeFuncs(fns []*ir.Node, recursive bool) {
e.finish(fns)
}
-func (e *Escape) initFunc(fn *ir.Node) {
+func (e *Escape) initFunc(fn ir.Node) {
if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown {
base.Fatalf("unexpected node: %v", fn)
}
@@ -222,11 +222,11 @@ func (e *Escape) initFunc(fn *ir.Node) {
}
}
-func (e *Escape) walkFunc(fn *ir.Node) {
+func (e *Escape) walkFunc(fn ir.Node) {
fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
- ir.InspectList(fn.Body(), func(n *ir.Node) bool {
+ ir.InspectList(fn.Body(), func(n ir.Node) bool {
switch n.Op() {
case ir.OLABEL:
n.Sym().Label = nonlooping
@@ -274,7 +274,7 @@ func (e *Escape) walkFunc(fn *ir.Node) {
// }
// stmt evaluates a single Go statement.
-func (e *Escape) stmt(n *ir.Node) {
+func (e *Escape) stmt(n ir.Node) {
if n == nil {
return
}
@@ -447,7 +447,7 @@ func (e *Escape) block(l ir.Nodes) {
// expr models evaluating an expression n and flowing the result into
// hole k.
-func (e *Escape) expr(k EscHole, n *ir.Node) {
+func (e *Escape) expr(k EscHole, n ir.Node) {
if n == nil {
return
}
@@ -455,7 +455,7 @@ func (e *Escape) expr(k EscHole, n *ir.Node) {
e.exprSkipInit(k, n)
}
-func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
+func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
if n == nil {
return
}
@@ -653,7 +653,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
-func (e *Escape) unsafeValue(k EscHole, n *ir.Node) {
+func (e *Escape) unsafeValue(k EscHole, n ir.Node) {
if n.Type().Etype != types.TUINTPTR {
base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
@@ -690,7 +690,7 @@ func (e *Escape) unsafeValue(k EscHole, n *ir.Node) {
// discard evaluates an expression n for side-effects, but discards
// its value.
-func (e *Escape) discard(n *ir.Node) {
+func (e *Escape) discard(n ir.Node) {
e.expr(e.discardHole(), n)
}
@@ -702,7 +702,7 @@ func (e *Escape) discards(l ir.Nodes) {
// addr evaluates an addressable expression n and returns an EscHole
// that represents storing into the represented location.
-func (e *Escape) addr(n *ir.Node) EscHole {
+func (e *Escape) addr(n ir.Node) EscHole {
if n == nil || ir.IsBlank(n) {
// Can happen at least in OSELRECV.
// TODO(mdempsky): Anywhere else?
@@ -751,7 +751,7 @@ func (e *Escape) addrs(l ir.Nodes) []EscHole {
}
// assign evaluates the assignment dst = src.
-func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) {
+func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && base.Flag.LowerM != 0 {
@@ -769,14 +769,14 @@ func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) {
}
}
-func (e *Escape) assignHeap(src *ir.Node, why string, where *ir.Node) {
+func (e *Escape) assignHeap(src ir.Node, why string, where ir.Node) {
e.expr(e.heapHole().note(where, why), src)
}
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
+func (e *Escape) call(ks []EscHole, call, where ir.Node) {
topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
@@ -784,7 +784,7 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
where.SetEsc(EscNever)
}
- argument := func(k EscHole, arg *ir.Node) {
+ argument := func(k EscHole, arg ir.Node) {
if topLevelDefer {
// Top level defers arguments don't escape to
// heap, but they do need to last until end of
@@ -805,7 +805,7 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
fixVariadicCall(call)
// Pick out the function callee, if statically known.
- var fn *ir.Node
+ var fn ir.Node
switch call.Op() {
case ir.OCALLFUNC:
switch v := staticValue(call.Left()); {
@@ -894,7 +894,7 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
-func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole {
+func (e *Escape) tagHole(ks []EscHole, fn ir.Node, param *types.Field) EscHole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
@@ -935,7 +935,7 @@ func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn *ir.Node) bool {
+func (e *Escape) inMutualBatch(fn ir.Node) bool {
if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged {
if fn.Name().Defn.Esc() == EscFuncUnknown {
base.Fatalf("graph inconsistency")
@@ -960,11 +960,11 @@ type EscHole struct {
type EscNote struct {
next *EscNote
- where *ir.Node
+ where ir.Node
why string
}
-func (k EscHole) note(where *ir.Node, why string) EscHole {
+func (k EscHole) note(where ir.Node, why string) EscHole {
if where == nil || why == "" {
base.Fatalf("note: missing where/why")
}
@@ -986,10 +986,10 @@ func (k EscHole) shift(delta int) EscHole {
return k
}
-func (k EscHole) deref(where *ir.Node, why string) EscHole { return k.shift(1).note(where, why) }
-func (k EscHole) addr(where *ir.Node, why string) EscHole { return k.shift(-1).note(where, why) }
+func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).note(where, why) }
+func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) }
-func (k EscHole) dotType(t *types.Type, where *ir.Node, why string) EscHole {
+func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole {
if !t.IsInterface() && !isdirectiface(t) {
k = k.shift(1)
}
@@ -1026,7 +1026,7 @@ func (e *Escape) teeHole(ks ...EscHole) EscHole {
return loc.asHole()
}
-func (e *Escape) dcl(n *ir.Node) EscHole {
+func (e *Escape) dcl(n ir.Node) EscHole {
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
@@ -1035,7 +1035,7 @@ func (e *Escape) dcl(n *ir.Node) EscHole {
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
-func (e *Escape) spill(k EscHole, n *ir.Node) EscHole {
+func (e *Escape) spill(k EscHole, n ir.Node) EscHole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
@@ -1052,7 +1052,7 @@ func (e *Escape) later(k EscHole) EscHole {
// canonicalNode returns the canonical *Node that n logically
// represents.
-func canonicalNode(n *ir.Node) *ir.Node {
+func canonicalNode(n ir.Node) ir.Node {
if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() {
n = n.Name().Defn
if n.Name().IsClosureVar() {
@@ -1063,7 +1063,7 @@ func canonicalNode(n *ir.Node) *ir.Node {
return n
}
-func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation {
+func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation {
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
@@ -1096,7 +1096,7 @@ func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation {
return loc
}
-func (e *Escape) oldLoc(n *ir.Node) *EscLocation {
+func (e *Escape) oldLoc(n ir.Node) *EscLocation {
n = canonicalNode(n)
return n.Opt().(*EscLocation)
}
@@ -1394,7 +1394,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool {
}
// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *ir.Node) bool {
+func containsClosure(f, c ir.Node) bool {
if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC {
base.Fatalf("bad containsClosure: %v, %v", f, c)
}
@@ -1429,7 +1429,7 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
l.paramEsc.AddHeap(derefs)
}
-func (e *Escape) finish(fns []*ir.Node) {
+func (e *Escape) finish(fns []ir.Node) {
// Record parameter tags for package export data.
for _, fn := range fns {
fn.SetEsc(EscFuncTagged)
@@ -1574,7 +1574,7 @@ func ParseLeaks(s string) EscLeaks {
return l
}
-func escapes(all []*ir.Node) {
+func escapes(all []ir.Node) {
visitBottomUp(all, escapeFuncs)
}
@@ -1607,7 +1607,7 @@ const (
)
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *ir.Node) *types.Sym {
+func funcSym(fn ir.Node) *types.Sym {
if fn == nil || fn.Func().Nname == nil {
return nil
}
@@ -1622,7 +1622,7 @@ var (
nonlooping = ir.Nod(ir.OXXX, nil, nil)
)
-func isSliceSelfAssign(dst, src *ir.Node) bool {
+func isSliceSelfAssign(dst, src ir.Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
@@ -1672,7 +1672,7 @@ func isSliceSelfAssign(dst, src *ir.Node) bool {
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src *ir.Node) bool {
+func isSelfAssign(dst, src ir.Node) bool {
if isSliceSelfAssign(dst, src) {
return true
}
@@ -1709,7 +1709,7 @@ func isSelfAssign(dst, src *ir.Node) bool {
// mayAffectMemory reports whether evaluation of n may affect the program's
// memory state. If the expression can't affect memory state, then it can be
// safely ignored by the escape analysis.
-func mayAffectMemory(n *ir.Node) bool {
+func mayAffectMemory(n ir.Node) bool {
// We may want to use a list of "memory safe" ops instead of generally
// "side-effect free", which would include all calls and other ops that can
// allocate or change global state. For now, it's safer to start with the latter.
@@ -1736,7 +1736,7 @@ func mayAffectMemory(n *ir.Node) bool {
// heapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
-func heapAllocReason(n *ir.Node) string {
+func heapAllocReason(n ir.Node) string {
if n.Type() == nil {
return ""
}
@@ -1781,7 +1781,7 @@ func heapAllocReason(n *ir.Node) string {
// by "increasing" the "value" of n.Esc to EscHeap.
// Storage is allocated as necessary to allow the address
// to be taken.
-func addrescapes(n *ir.Node) {
+func addrescapes(n ir.Node) {
switch n.Op() {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
@@ -1847,7 +1847,7 @@ func addrescapes(n *ir.Node) {
}
// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *ir.Node) {
+func moveToHeap(n ir.Node) {
if base.Flag.LowerR != 0 {
ir.Dump("MOVE", n)
}
@@ -1939,7 +1939,7 @@ const unsafeUintptrTag = "unsafe-uintptr"
// marked go:uintptrescapes.
const uintptrEscapesTag = "uintptr-escapes"
-func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string {
+func (e *Escape) paramTag(fn ir.Node, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
return f.Sym.Name
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index ace461fc90..10033793bf 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -21,10 +21,10 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) {
}
}
-var asmlist []*ir.Node
+var asmlist []ir.Node
// exportsym marks n for export (or reexport).
-func exportsym(n *ir.Node) {
+func exportsym(n ir.Node) {
if n.Sym().OnExportList() {
return
}
@@ -41,7 +41,7 @@ func initname(s string) bool {
return s == "init"
}
-func autoexport(n *ir.Node, ctxt ir.Class) {
+func autoexport(n ir.Node, ctxt ir.Class) {
if n.Sym().Pkg != ir.LocalPkg {
return
}
@@ -74,7 +74,7 @@ func dumpexport(bout *bio.Writer) {
}
}
-func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Node {
+func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node {
n := ir.AsNode(s.PkgDef())
if n == nil {
// iimport should have created a stub ONONAME
@@ -120,7 +120,7 @@ func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Node {
+func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node {
n := importsym(ipkg, s, op)
if n.Op() != ir.ONONAME {
if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) {
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
index a89ff528e5..44e918f2c1 100644
--- a/src/cmd/compile/internal/gc/gen.go
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -30,13 +30,13 @@ func sysvar(name string) *obj.LSym {
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
-func isParamStackCopy(n *ir.Node) bool {
+func isParamStackCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
-func isParamHeapCopy(n *ir.Node) bool {
+func isParamHeapCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil
}
@@ -52,7 +52,7 @@ func autotmpname(n int) string {
}
// make a new Node off the books
-func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node {
+func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node {
if curfn == nil {
base.Fatalf("no curfn for tempAt")
}
@@ -83,6 +83,6 @@ func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node {
return n.Orig()
}
-func temp(t *types.Type) *ir.Node {
+func temp(t *types.Type) ir.Node {
return tempAt(base.Pos, Curfn, t)
}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index 8642cc4a30..84e6bc5faf 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -128,11 +128,11 @@ var (
iscmp [ir.OEND]bool
)
-var xtop []*ir.Node
+var xtop []ir.Node
-var exportlist []*ir.Node
+var exportlist []ir.Node
-var importlist []*ir.Node // imported functions and methods with inlinable bodies
+var importlist []ir.Node // imported functions and methods with inlinable bodies
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
@@ -141,7 +141,7 @@ var (
var dclcontext ir.Class // PEXTERN/PAUTO
-var Curfn *ir.Node
+var Curfn ir.Node
var Widthptr int
@@ -156,7 +156,7 @@ var instrumenting bool
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
-var nodfp *ir.Node
+var nodfp ir.Node
var autogeneratedPos src.XPos
@@ -193,7 +193,7 @@ var thearch Arch
var (
staticuint64s,
- zerobase *ir.Node
+ zerobase ir.Node
assertE2I,
assertE2I2,
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index 3416a00cd1..950033a8a3 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -47,7 +47,7 @@ type Progs struct {
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
- curfn *ir.Node // fn these Progs are for
+ curfn ir.Node // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
@@ -57,7 +57,7 @@ type Progs struct {
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn *ir.Node, worker int) *Progs {
+func newProgs(fn ir.Node, worker int) *Progs {
pp := new(Progs)
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
@@ -174,7 +174,7 @@ func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16
return q
}
-func (pp *Progs) settext(fn *ir.Node) {
+func (pp *Progs) settext(fn ir.Node) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
}
@@ -290,7 +290,7 @@ func initLSym(f *ir.Func, hasBody bool) {
base.Ctxt.InitTextSym(f.LSym, flag)
}
-func ggloblnod(nam *ir.Node) {
+func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go
index 281e2de43d..ef52e40f21 100644
--- a/src/cmd/compile/internal/gc/iexport.go
+++ b/src/cmd/compile/internal/gc/iexport.go
@@ -259,8 +259,8 @@ func iexport(out *bufio.Writer) {
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
- declIndex: map[*ir.Node]uint64{},
- inlineIndex: map[*ir.Node]uint64{},
+ declIndex: map[ir.Node]uint64{},
+ inlineIndex: map[ir.Node]uint64{},
typIndex: map[*types.Type]uint64{},
}
@@ -314,9 +314,9 @@ func iexport(out *bufio.Writer) {
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
-func (w *exportWriter) writeIndex(index map[*ir.Node]uint64, mainIndex bool) {
+func (w *exportWriter) writeIndex(index map[ir.Node]uint64, mainIndex bool) {
// Build a map from packages to objects from that package.
- pkgObjs := map[*types.Pkg][]*ir.Node{}
+ pkgObjs := map[*types.Pkg][]ir.Node{}
// For the main index, make sure to include every package that
// we reference, even if we're not exporting (or reexporting)
@@ -374,8 +374,8 @@ type iexporter struct {
stringIndex map[string]uint64
data0 intWriter
- declIndex map[*ir.Node]uint64
- inlineIndex map[*ir.Node]uint64
+ declIndex map[ir.Node]uint64
+ inlineIndex map[ir.Node]uint64
typIndex map[*types.Type]uint64
}
@@ -394,7 +394,7 @@ func (p *iexporter) stringOff(s string) uint64 {
}
// pushDecl adds n to the declaration work queue, if not already present.
-func (p *iexporter) pushDecl(n *ir.Node) {
+func (p *iexporter) pushDecl(n ir.Node) {
if n.Sym() == nil || ir.AsNode(n.Sym().Def) != n && n.Op() != ir.OTYPE {
base.Fatalf("weird Sym: %v, %v", n, n.Sym())
}
@@ -423,7 +423,7 @@ type exportWriter struct {
prevColumn int64
}
-func (p *iexporter) doDecl(n *ir.Node) {
+func (p *iexporter) doDecl(n ir.Node) {
w := p.newWriter()
w.setPkg(n.Sym().Pkg, false)
@@ -515,7 +515,7 @@ func (w *exportWriter) tag(tag byte) {
w.data.WriteByte(tag)
}
-func (p *iexporter) doInline(f *ir.Node) {
+func (p *iexporter) doInline(f ir.Node) {
w := p.newWriter()
w.setPkg(fnpkg(f), false)
@@ -570,7 +570,7 @@ func (w *exportWriter) pkg(pkg *types.Pkg) {
w.string(pkg.Path)
}
-func (w *exportWriter) qualifiedIdent(n *ir.Node) {
+func (w *exportWriter) qualifiedIdent(n ir.Node) {
// Ensure any referenced declarations are written out too.
w.p.pushDecl(n)
@@ -955,12 +955,12 @@ func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
// Compiler-specific extensions.
-func (w *exportWriter) varExt(n *ir.Node) {
+func (w *exportWriter) varExt(n ir.Node) {
w.linkname(n.Sym())
w.symIdx(n.Sym())
}
-func (w *exportWriter) funcExt(n *ir.Node) {
+func (w *exportWriter) funcExt(n ir.Node) {
w.linkname(n.Sym())
w.symIdx(n.Sym())
@@ -1037,7 +1037,7 @@ func (w *exportWriter) stmtList(list ir.Nodes) {
w.op(ir.OEND)
}
-func (w *exportWriter) node(n *ir.Node) {
+func (w *exportWriter) node(n ir.Node) {
if ir.OpPrec[n.Op()] < 0 {
w.stmt(n)
} else {
@@ -1047,7 +1047,7 @@ func (w *exportWriter) node(n *ir.Node) {
// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
-func (w *exportWriter) stmt(n *ir.Node) {
+func (w *exportWriter) stmt(n ir.Node) {
if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) {
// can't use stmtList here since we don't want the final OEND
for _, n := range n.Init().Slice() {
@@ -1095,7 +1095,7 @@ func (w *exportWriter) stmt(n *ir.Node) {
w.op(ir.OAS2)
w.pos(n.Pos())
w.exprList(n.List())
- w.exprList(ir.AsNodes([]*ir.Node{n.Right()}))
+ w.exprList(ir.AsNodes([]ir.Node{n.Right()}))
case ir.ORETURN:
w.op(ir.ORETURN)
@@ -1164,7 +1164,7 @@ func (w *exportWriter) stmt(n *ir.Node) {
}
}
-func (w *exportWriter) caseList(sw *ir.Node) {
+func (w *exportWriter) caseList(sw ir.Node) {
namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
cases := sw.List().Slice()
@@ -1189,7 +1189,7 @@ func (w *exportWriter) exprList(list ir.Nodes) {
w.op(ir.OEND)
}
-func (w *exportWriter) expr(n *ir.Node) {
+func (w *exportWriter) expr(n ir.Node) {
// from nodefmt (fmt.go)
//
// nodefmt reverts nodes back to their original - we don't need to do
@@ -1430,7 +1430,7 @@ func (w *exportWriter) op(op ir.Op) {
w.uint64(uint64(op))
}
-func (w *exportWriter) exprsOrNil(a, b *ir.Node) {
+func (w *exportWriter) exprsOrNil(a, b ir.Node) {
ab := 0
if a != nil {
ab |= 1
@@ -1455,7 +1455,7 @@ func (w *exportWriter) elemList(list ir.Nodes) {
}
}
-func (w *exportWriter) localName(n *ir.Node) {
+func (w *exportWriter) localName(n ir.Node) {
// Escape analysis happens after inline bodies are saved, but
// we're using the same ONAME nodes, so we might still see
// PAUTOHEAP here.
diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go
index 5d845d90e8..77078c118a 100644
--- a/src/cmd/compile/internal/gc/iimport.go
+++ b/src/cmd/compile/internal/gc/iimport.go
@@ -41,7 +41,7 @@ var (
inlineImporter = map[*types.Sym]iimporterAndOffset{}
)
-func expandDecl(n *ir.Node) {
+func expandDecl(n ir.Node) {
if n.Op() != ir.ONONAME {
return
}
@@ -55,7 +55,7 @@ func expandDecl(n *ir.Node) {
r.doDecl(n)
}
-func expandInline(fn *ir.Node) {
+func expandInline(fn ir.Node) {
if fn.Func().Inl.Body != nil {
return
}
@@ -68,7 +68,7 @@ func expandInline(fn *ir.Node) {
r.doInline(fn)
}
-func importReaderFor(n *ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
+func importReaderFor(n ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
x, ok := importers[n.Sym()]
if !ok {
return nil
@@ -281,7 +281,7 @@ func (r *importReader) setPkg() {
r.currPkg = r.pkg()
}
-func (r *importReader) doDecl(n *ir.Node) {
+func (r *importReader) doDecl(n ir.Node) {
if n.Op() != ir.ONONAME {
base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym(), n.Op())
}
@@ -635,12 +635,12 @@ func (r *importReader) byte() byte {
// Compiler-specific extensions.
-func (r *importReader) varExt(n *ir.Node) {
+func (r *importReader) varExt(n ir.Node) {
r.linkname(n.Sym())
r.symIdx(n.Sym())
}
-func (r *importReader) funcExt(n *ir.Node) {
+func (r *importReader) funcExt(n ir.Node) {
r.linkname(n.Sym())
r.symIdx(n.Sym())
@@ -695,7 +695,7 @@ func (r *importReader) typeExt(t *types.Type) {
// so we can use index to reference the symbol.
var typeSymIdx = make(map[*types.Type][2]int64)
-func (r *importReader) doInline(n *ir.Node) {
+func (r *importReader) doInline(n ir.Node) {
if len(n.Func().Inl.Body) != 0 {
base.Fatalf("%v already has inline body", n)
}
@@ -710,7 +710,7 @@ func (r *importReader) doInline(n *ir.Node) {
// (not doing so can cause significant performance
// degradation due to unnecessary calls to empty
// functions).
- body = []*ir.Node{}
+ body = []ir.Node{}
}
n.Func().Inl.Body = body
@@ -740,8 +740,8 @@ func (r *importReader) doInline(n *ir.Node) {
// unrefined nodes (since this is what the importer uses). The respective case
// entries are unreachable in the importer.
-func (r *importReader) stmtList() []*ir.Node {
- var list []*ir.Node
+func (r *importReader) stmtList() []ir.Node {
+ var list []ir.Node
for {
n := r.node()
if n == nil {
@@ -758,10 +758,10 @@ func (r *importReader) stmtList() []*ir.Node {
return list
}
-func (r *importReader) caseList(sw *ir.Node) []*ir.Node {
+func (r *importReader) caseList(sw ir.Node) []ir.Node {
namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
- cases := make([]*ir.Node, r.uint64())
+ cases := make([]ir.Node, r.uint64())
for i := range cases {
cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil)
cas.PtrList().Set(r.stmtList())
@@ -780,8 +780,8 @@ func (r *importReader) caseList(sw *ir.Node) []*ir.Node {
return cases
}
-func (r *importReader) exprList() []*ir.Node {
- var list []*ir.Node
+func (r *importReader) exprList() []ir.Node {
+ var list []ir.Node
for {
n := r.expr()
if n == nil {
@@ -792,7 +792,7 @@ func (r *importReader) exprList() []*ir.Node {
return list
}
-func (r *importReader) expr() *ir.Node {
+func (r *importReader) expr() ir.Node {
n := r.node()
if n != nil && n.Op() == ir.OBLOCK {
base.Fatalf("unexpected block node: %v", n)
@@ -801,7 +801,7 @@ func (r *importReader) expr() *ir.Node {
}
// TODO(gri) split into expr and stmt
-func (r *importReader) node() *ir.Node {
+func (r *importReader) node() ir.Node {
switch op := r.op(); op {
// expressions
// case OPAREN:
@@ -814,7 +814,7 @@ func (r *importReader) node() *ir.Node {
pos := r.pos()
typ := r.typ()
- var n *ir.Node
+ var n ir.Node
if typ.HasNil() {
n = nodnil()
} else {
@@ -906,7 +906,7 @@ func (r *importReader) node() *ir.Node {
case ir.OSLICE, ir.OSLICE3:
n := ir.NodAt(r.pos(), op, r.expr(), nil)
low, high := r.exprsOrNil()
- var max *ir.Node
+ var max ir.Node
if n.Op().IsSlice3() {
max = r.expr()
}
@@ -970,7 +970,7 @@ func (r *importReader) node() *ir.Node {
pos := r.pos()
lhs := npos(pos, dclname(r.ident()))
typ := typenod(r.typ())
- return npos(pos, liststmt(variter([]*ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
+ return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
// case ODCLFIELD:
// unimplemented
@@ -1082,9 +1082,9 @@ func (r *importReader) op() ir.Op {
return ir.Op(r.uint64())
}
-func (r *importReader) elemList() []*ir.Node {
+func (r *importReader) elemList() []ir.Node {
c := r.uint64()
- list := make([]*ir.Node, c)
+ list := make([]ir.Node, c)
for i := range list {
s := r.ident()
list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s)
@@ -1092,7 +1092,7 @@ func (r *importReader) elemList() []*ir.Node {
return list
}
-func (r *importReader) exprsOrNil() (a, b *ir.Node) {
+func (r *importReader) exprsOrNil() (a, b ir.Node) {
ab := r.uint64()
if ab&1 != 0 {
a = r.expr()
diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go
index 02a6175c6b..2b7ecd1d05 100644
--- a/src/cmd/compile/internal/gc/init.go
+++ b/src/cmd/compile/internal/gc/init.go
@@ -33,7 +33,7 @@ func renameinit() *types.Sym {
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
-func fninit(n []*ir.Node) {
+func fninit(n []ir.Node) {
nf := initOrder(n)
var deps []*obj.LSym // initTask records for packages the current package depends on
diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go
index 71da72f0cf..1003f131b8 100644
--- a/src/cmd/compile/internal/gc/initorder.go
+++ b/src/cmd/compile/internal/gc/initorder.go
@@ -64,7 +64,7 @@ const (
type InitOrder struct {
// blocking maps initialization assignments to the assignments
// that depend on it.
- blocking map[*ir.Node][]*ir.Node
+ blocking map[ir.Node][]ir.Node
// ready is the queue of Pending initialization assignments
// that are ready for initialization.
@@ -75,13 +75,13 @@ type InitOrder struct {
// package-level declarations (in declaration order) and outputs the
// corresponding list of statements to include in the init() function
// body.
-func initOrder(l []*ir.Node) []*ir.Node {
+func initOrder(l []ir.Node) []ir.Node {
s := InitSchedule{
- initplans: make(map[*ir.Node]*InitPlan),
- inittemps: make(map[*ir.Node]*ir.Node),
+ initplans: make(map[ir.Node]*InitPlan),
+ inittemps: make(map[ir.Node]ir.Node),
}
o := InitOrder{
- blocking: make(map[*ir.Node][]*ir.Node),
+ blocking: make(map[ir.Node][]ir.Node),
}
// Process all package-level assignment in declaration order.
@@ -110,7 +110,7 @@ func initOrder(l []*ir.Node) []*ir.Node {
// first.
base.ExitIfErrors()
- findInitLoopAndExit(firstLHS(n), new([]*ir.Node))
+ findInitLoopAndExit(firstLHS(n), new([]ir.Node))
base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
@@ -125,7 +125,7 @@ func initOrder(l []*ir.Node) []*ir.Node {
return s.out
}
-func (o *InitOrder) processAssign(n *ir.Node) {
+func (o *InitOrder) processAssign(n ir.Node) {
if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
@@ -154,9 +154,9 @@ func (o *InitOrder) processAssign(n *ir.Node) {
// flushReady repeatedly applies initialize to the earliest (in
// declaration order) assignment ready for initialization and updates
// the inverse dependency ("blocking") graph.
-func (o *InitOrder) flushReady(initialize func(*ir.Node)) {
+func (o *InitOrder) flushReady(initialize func(ir.Node)) {
for o.ready.Len() != 0 {
- n := heap.Pop(&o.ready).(*ir.Node)
+ n := heap.Pop(&o.ready).(ir.Node)
if n.Initorder() != InitPending || n.Offset() != 0 {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
@@ -183,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(*ir.Node)) {
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
-func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) {
+func findInitLoopAndExit(n ir.Node, path *[]ir.Node) {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but initialization cycles are rare.
@@ -196,7 +196,7 @@ func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) {
// There might be multiple loops involving n; by sorting
// references, we deterministically pick the one reported.
- refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Node) bool {
+ refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj ir.Node) bool {
return ni.Pos().Before(nj.Pos())
})
@@ -215,7 +215,7 @@ func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) {
// reportInitLoopAndExit reports and initialization loop as an error
// and exits. However, if l is not actually an initialization loop, it
// simply returns instead.
-func reportInitLoopAndExit(l []*ir.Node) {
+func reportInitLoopAndExit(l []ir.Node) {
// Rotate loop so that the earliest variable declaration is at
// the start.
i := -1
@@ -250,7 +250,7 @@ func reportInitLoopAndExit(l []*ir.Node) {
// variables that declaration n depends on. If transitive is true,
// then it also includes the transitive dependencies of any depended
// upon functions (but not variables).
-func collectDeps(n *ir.Node, transitive bool) ir.NodeSet {
+func collectDeps(n ir.Node, transitive bool) ir.NodeSet {
d := initDeps{transitive: transitive}
switch n.Op() {
case ir.OAS:
@@ -270,12 +270,12 @@ type initDeps struct {
seen ir.NodeSet
}
-func (d *initDeps) inspect(n *ir.Node) { ir.Inspect(n, d.visit) }
+func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) }
func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) }
// visit calls foundDep on any package-level functions or variables
// referenced by n, if any.
-func (d *initDeps) visit(n *ir.Node) bool {
+func (d *initDeps) visit(n ir.Node) bool {
switch n.Op() {
case ir.OMETHEXPR:
d.foundDep(methodExprName(n))
@@ -299,7 +299,7 @@ func (d *initDeps) visit(n *ir.Node) bool {
// foundDep records that we've found a dependency on n by adding it to
// seen.
-func (d *initDeps) foundDep(n *ir.Node) {
+func (d *initDeps) foundDep(n ir.Node) {
// Can happen with method expressions involving interface
// types; e.g., fixedbugs/issue4495.go.
if n == nil {
@@ -328,7 +328,7 @@ func (d *initDeps) foundDep(n *ir.Node) {
// an OAS node's Pos may not be unique. For example, given the
// declaration "var a, b = f(), g()", "a" must be ordered before "b",
// but both OAS nodes use the "=" token's position as their Pos.
-type declOrder []*ir.Node
+type declOrder []ir.Node
func (s declOrder) Len() int { return len(s) }
func (s declOrder) Less(i, j int) bool {
@@ -336,7 +336,7 @@ func (s declOrder) Less(i, j int) bool {
}
func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*ir.Node)) }
+func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
func (s *declOrder) Pop() interface{} {
n := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
@@ -345,7 +345,7 @@ func (s *declOrder) Pop() interface{} {
// firstLHS returns the first expression on the left-hand side of
// assignment n.
-func firstLHS(n *ir.Node) *ir.Node {
+func firstLHS(n ir.Node) ir.Node {
switch n.Op() {
case ir.OAS:
return n.Left()
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index f82c128265..6310762c1f 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -53,7 +53,7 @@ const (
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *ir.Node) *types.Pkg {
+func fnpkg(fn ir.Node) *types.Pkg {
if ir.IsMethod(fn) {
// method
rcvr := fn.Type().Recv().Type
@@ -73,7 +73,7 @@ func fnpkg(fn *ir.Node) *types.Pkg {
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
-func typecheckinl(fn *ir.Node) {
+func typecheckinl(fn ir.Node) {
lno := setlineno(fn)
expandInline(fn)
@@ -111,7 +111,7 @@ func typecheckinl(fn *ir.Node) {
// Caninl determines whether fn is inlineable.
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
-func caninl(fn *ir.Node) {
+func caninl(fn ir.Node) {
if fn.Op() != ir.ODCLFUNC {
base.Fatalf("caninl %v", fn)
}
@@ -207,7 +207,7 @@ func caninl(fn *ir.Node) {
visitor := hairyVisitor{
budget: inlineMaxBudget,
extraCallCost: cc,
- usedLocals: make(map[*ir.Node]bool),
+ usedLocals: make(map[ir.Node]bool),
}
if visitor.visitList(fn.Body()) {
reason = visitor.reason
@@ -236,7 +236,7 @@ func caninl(fn *ir.Node) {
// inlFlood marks n's inline body for export and recursively ensures
// all called functions are marked too.
-func inlFlood(n *ir.Node) {
+func inlFlood(n ir.Node) {
if n == nil {
return
}
@@ -260,7 +260,7 @@ func inlFlood(n *ir.Node) {
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
- ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n *ir.Node) bool {
+ ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n ir.Node) bool {
switch n.Op() {
case ir.OMETHEXPR:
inlFlood(methodExprName(n))
@@ -300,7 +300,7 @@ type hairyVisitor struct {
budget int32
reason string
extraCallCost int32
- usedLocals map[*ir.Node]bool
+ usedLocals map[ir.Node]bool
}
// Look for anything we want to punt on.
@@ -313,7 +313,7 @@ func (v *hairyVisitor) visitList(ll ir.Nodes) bool {
return false
}
-func (v *hairyVisitor) visit(n *ir.Node) bool {
+func (v *hairyVisitor) visit(n ir.Node) bool {
if n == nil {
return false
}
@@ -447,15 +447,15 @@ func (v *hairyVisitor) visit(n *ir.Node) bool {
// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
// the body and dcls of an inlineable function.
-func inlcopylist(ll []*ir.Node) []*ir.Node {
- s := make([]*ir.Node, 0, len(ll))
+func inlcopylist(ll []ir.Node) []ir.Node {
+ s := make([]ir.Node, 0, len(ll))
for _, n := range ll {
s = append(s, inlcopy(n))
}
return s
}
-func inlcopy(n *ir.Node) *ir.Node {
+func inlcopy(n ir.Node) ir.Node {
if n == nil {
return nil
}
@@ -479,7 +479,7 @@ func inlcopy(n *ir.Node) *ir.Node {
return m
}
-func countNodes(n *ir.Node) int {
+func countNodes(n ir.Node) int {
if n == nil {
return 0
}
@@ -503,7 +503,7 @@ func countNodes(n *ir.Node) int {
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn *ir.Node) {
+func inlcalls(fn ir.Node) {
savefn := Curfn
Curfn = fn
maxCost := int32(inlineMaxBudget)
@@ -516,7 +516,7 @@ func inlcalls(fn *ir.Node) {
// but allow inlining if there is a recursion cycle of many functions.
// Most likely, the inlining will stop before we even hit the beginning of
// the cycle again, but the map catches the unusual case.
- inlMap := make(map[*ir.Node]bool)
+ inlMap := make(map[ir.Node]bool)
fn = inlnode(fn, maxCost, inlMap)
if fn != Curfn {
base.Fatalf("inlnode replaced curfn")
@@ -525,7 +525,7 @@ func inlcalls(fn *ir.Node) {
}
// Turn an OINLCALL into a statement.
-func inlconv2stmt(n *ir.Node) {
+func inlconv2stmt(n ir.Node) {
n.SetOp(ir.OBLOCK)
// n->ninit stays
@@ -538,7 +538,7 @@ func inlconv2stmt(n *ir.Node) {
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
-func inlconv2expr(n *ir.Node) *ir.Node {
+func inlconv2expr(n ir.Node) ir.Node {
r := n.Rlist().First()
return addinit(r, append(n.Init().Slice(), n.Body().Slice()...))
}
@@ -548,7 +548,7 @@ func inlconv2expr(n *ir.Node) *ir.Node {
// containing the inlined statements on the first list element so
// order will be preserved Used in return, oas2func and call
// statements.
-func inlconv2list(n *ir.Node) []*ir.Node {
+func inlconv2list(n ir.Node) []ir.Node {
if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 {
base.Fatalf("inlconv2list %+v\n", n)
}
@@ -558,7 +558,7 @@ func inlconv2list(n *ir.Node) []*ir.Node {
return s
}
-func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Node]bool) {
+func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[ir.Node]bool) {
s := l.Slice()
for i := range s {
s[i] = inlnode(s[i], maxCost, inlMap)
@@ -578,7 +578,7 @@ func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Node]bool) {
// shorter and less complicated.
// The result of inlnode MUST be assigned back to n, e.g.
// n.Left = inlnode(n.Left)
-func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node {
+func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
if n == nil {
return n
}
@@ -707,7 +707,7 @@ func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node {
// inlCallee takes a function-typed expression and returns the underlying function ONAME
// that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn *ir.Node) *ir.Node {
+func inlCallee(fn ir.Node) ir.Node {
fn = staticValue(fn)
switch {
case fn.Op() == ir.OMETHEXPR:
@@ -729,7 +729,7 @@ func inlCallee(fn *ir.Node) *ir.Node {
return nil
}
-func staticValue(n *ir.Node) *ir.Node {
+func staticValue(n ir.Node) ir.Node {
for {
if n.Op() == ir.OCONVNOP {
n = n.Left()
@@ -747,7 +747,7 @@ func staticValue(n *ir.Node) *ir.Node {
// staticValue1 implements a simple SSA-like optimization. If n is a local variable
// that is initialized and never reassigned, staticValue1 returns the initializer
// expression. Otherwise, it returns nil.
-func staticValue1(n *ir.Node) *ir.Node {
+func staticValue1(n ir.Node) ir.Node {
if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() {
return nil
}
@@ -757,7 +757,7 @@ func staticValue1(n *ir.Node) *ir.Node {
return nil
}
- var rhs *ir.Node
+ var rhs ir.Node
FindRHS:
switch defn.Op() {
case ir.OAS:
@@ -791,7 +791,7 @@ FindRHS:
// useful for -m output documenting the reason for inhibited optimizations.
// NB: global variables are always considered to be re-assigned.
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(n *ir.Node) (bool, *ir.Node) {
+func reassigned(n ir.Node) (bool, ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("reassigned %v", n)
}
@@ -814,10 +814,10 @@ func reassigned(n *ir.Node) (bool, *ir.Node) {
}
type reassignVisitor struct {
- name *ir.Node
+ name ir.Node
}
-func (v *reassignVisitor) visit(n *ir.Node) *ir.Node {
+func (v *reassignVisitor) visit(n ir.Node) ir.Node {
if n == nil {
return nil
}
@@ -854,7 +854,7 @@ func (v *reassignVisitor) visit(n *ir.Node) *ir.Node {
return nil
}
-func (v *reassignVisitor) visitList(l ir.Nodes) *ir.Node {
+func (v *reassignVisitor) visitList(l ir.Nodes) ir.Node {
for _, n := range l.Slice() {
if a := v.visit(n); a != nil {
return a
@@ -863,7 +863,7 @@ func (v *reassignVisitor) visitList(l ir.Nodes) *ir.Node {
return nil
}
-func inlParam(t *types.Field, as *ir.Node, inlvars map[*ir.Node]*ir.Node) *ir.Node {
+func inlParam(t *types.Field, as ir.Node, inlvars map[ir.Node]ir.Node) ir.Node {
n := ir.AsNode(t.Nname)
if n == nil || ir.IsBlank(n) {
return ir.BlankNode
@@ -887,7 +887,7 @@ var inlgen int
// parameters.
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node {
+func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
if fn.Func().Inl == nil {
if logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
@@ -969,10 +969,10 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node
}
// Make temp names to use instead of the originals.
- inlvars := make(map[*ir.Node]*ir.Node)
+ inlvars := make(map[ir.Node]ir.Node)
// record formals/locals for later post-processing
- var inlfvars []*ir.Node
+ var inlfvars []ir.Node
// Handle captured variables when inlining closures.
if fn.Name().Defn != nil {
@@ -1040,7 +1040,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node
}
nreturns := 0
- ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n *ir.Node) bool {
+ ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n ir.Node) bool {
if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
@@ -1053,9 +1053,9 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node
delayretvars := nreturns == 1
// temporaries for return values.
- var retvars []*ir.Node
+ var retvars []ir.Node
for i, t := range fn.Type().Results().Fields().Slice() {
- var m *ir.Node
+ var m ir.Node
if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") {
m = inlvar(n)
m = typecheck(m, ctxExpr)
@@ -1093,7 +1093,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
- var vas *ir.Node
+ var vas ir.Node
if recv := fn.Type().Recv(); recv != nil {
as.PtrList().Append(inlParam(recv, as, inlvars))
@@ -1228,7 +1228,7 @@ func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node
// Every time we expand a function we generate a new set of tmpnames,
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
-func inlvar(var_ *ir.Node) *ir.Node {
+func inlvar(var_ ir.Node) ir.Node {
if base.Flag.LowerM > 3 {
fmt.Printf("inlvar %+v\n", var_)
}
@@ -1245,7 +1245,7 @@ func inlvar(var_ *ir.Node) *ir.Node {
}
// Synthesize a variable to store the inlined function's results in.
-func retvar(t *types.Field, i int) *ir.Node {
+func retvar(t *types.Field, i int) ir.Node {
n := NewName(lookupN("~R", i))
n.SetType(t.Type)
n.SetClass(ir.PAUTO)
@@ -1257,7 +1257,7 @@ func retvar(t *types.Field, i int) *ir.Node {
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
-func argvar(t *types.Type, i int) *ir.Node {
+func argvar(t *types.Type, i int) ir.Node {
n := NewName(lookupN("~arg", i))
n.SetType(t.Elem())
n.SetClass(ir.PAUTO)
@@ -1274,13 +1274,13 @@ type inlsubst struct {
retlabel *types.Sym
// Temporary result variables.
- retvars []*ir.Node
+ retvars []ir.Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
- inlvars map[*ir.Node]*ir.Node
+ inlvars map[ir.Node]ir.Node
// bases maps from original PosBase to PosBase with an extra
// inlined call frame.
@@ -1292,8 +1292,8 @@ type inlsubst struct {
}
// list inlines a list of nodes.
-func (subst *inlsubst) list(ll ir.Nodes) []*ir.Node {
- s := make([]*ir.Node, 0, ll.Len())
+func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
+ s := make([]ir.Node, 0, ll.Len())
for _, n := range ll.Slice() {
s = append(s, subst.node(n))
}
@@ -1304,7 +1304,7 @@ func (subst *inlsubst) list(ll ir.Nodes) []*ir.Node {
// inlined function, substituting references to input/output
// parameters with ones to the tmpnames, and substituting returns with
// assignments to the output.
-func (subst *inlsubst) node(n *ir.Node) *ir.Node {
+func (subst *inlsubst) node(n ir.Node) ir.Node {
if n == nil {
return nil
}
@@ -1409,8 +1409,8 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
return base.Ctxt.PosTable.XPos(pos)
}
-func pruneUnusedAutos(ll []*ir.Node, vis *hairyVisitor) []*ir.Node {
- s := make([]*ir.Node, 0, len(ll))
+func pruneUnusedAutos(ll []ir.Node, vis *hairyVisitor) []ir.Node {
+ s := make([]ir.Node, 0, len(ll))
for _, n := range ll {
if n.Class() == ir.PAUTO {
if _, found := vis.usedLocals[n]; !found {
@@ -1424,9 +1424,9 @@ func pruneUnusedAutos(ll []*ir.Node, vis *hairyVisitor) []*ir.Node {
// devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
-func devirtualize(fn *ir.Node) {
+func devirtualize(fn ir.Node) {
Curfn = fn
- ir.InspectList(fn.Body(), func(n *ir.Node) bool {
+ ir.InspectList(fn.Body(), func(n ir.Node) bool {
if n.Op() == ir.OCALLINTER {
devirtualizeCall(n)
}
@@ -1434,7 +1434,7 @@ func devirtualize(fn *ir.Node) {
})
}
-func devirtualizeCall(call *ir.Node) {
+func devirtualizeCall(call ir.Node) {
recv := staticValue(call.Left().Left())
if recv.Op() != ir.OCONVIFACE {
return
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index a7d605f3ba..30ee57c02d 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -330,7 +330,7 @@ func Main(archInit func(*Arch)) {
if base.Flag.LowerL != 0 {
// Find functions that can be inlined and clone them before walk expands them.
- visitBottomUp(xtop, func(list []*ir.Node, recursive bool) {
+ visitBottomUp(xtop, func(list []ir.Node, recursive bool) {
numfns := numNonClosures(list)
for _, n := range list {
if !recursive || numfns > 1 {
@@ -481,7 +481,7 @@ func Main(archInit func(*Arch)) {
}
// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*ir.Node) int {
+func numNonClosures(list []ir.Node) int {
count := 0
for _, n := range list {
if n.Func().OClosure == nil {
diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go
index 8fa6d02f2c..d763f1ebee 100644
--- a/src/cmd/compile/internal/gc/mkbuiltin.go
+++ b/src/cmd/compile/internal/gc/mkbuiltin.go
@@ -207,7 +207,7 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
}
}
}
- return fmt.Sprintf("[]*ir.Node{%s}", strings.Join(res, ", "))
+ return fmt.Sprintf("[]ir.Node{%s}", strings.Join(res, ", "))
}
func intconst(e ast.Expr) int64 {
diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go
index d9642f4b67..950d509047 100644
--- a/src/cmd/compile/internal/gc/noder.go
+++ b/src/cmd/compile/internal/gc/noder.go
@@ -152,7 +152,7 @@ type noder struct {
lastCloseScopePos syntax.Pos
}
-func (p *noder) funcBody(fn *ir.Node, block *syntax.BlockStmt) {
+func (p *noder) funcBody(fn ir.Node, block *syntax.BlockStmt) {
oldScope := p.scope
p.scope = 0
funchdr(fn)
@@ -160,7 +160,7 @@ func (p *noder) funcBody(fn *ir.Node, block *syntax.BlockStmt) {
if block != nil {
body := p.stmts(block.List)
if body == nil {
- body = []*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}
+ body = []ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}
}
fn.PtrBody().Set(body)
@@ -294,7 +294,7 @@ func (p *noder) node() {
clearImports()
}
-func (p *noder) decls(decls []syntax.Decl) (l []*ir.Node) {
+func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
var cs constState
for _, decl := range decls {
@@ -378,11 +378,11 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
my.Block = 1 // at top level
}
-func (p *noder) varDecl(decl *syntax.VarDecl) []*ir.Node {
+func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
names := p.declNames(decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var exprs []*ir.Node
+ var exprs []ir.Node
if decl.Values != nil {
exprs = p.exprList(decl.Values)
}
@@ -414,12 +414,12 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*ir.Node {
// constant declarations are handled correctly (e.g., issue 15550).
type constState struct {
group *syntax.Group
- typ *ir.Node
- values []*ir.Node
+ typ ir.Node
+ values []ir.Node
iota int64
}
-func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node {
+func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
if decl.Group == nil || decl.Group != cs.group {
*cs = constState{
group: decl.Group,
@@ -433,7 +433,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node {
names := p.declNames(decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var values []*ir.Node
+ var values []ir.Node
if decl.Values != nil {
values = p.exprList(decl.Values)
cs.typ, cs.values = typ, values
@@ -444,7 +444,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node {
typ, values = cs.typ, cs.values
}
- nn := make([]*ir.Node, 0, len(names))
+ nn := make([]ir.Node, 0, len(names))
for i, n := range names {
if i >= len(values) {
base.Errorf("missing value in const declaration")
@@ -474,7 +474,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node {
return nn
}
-func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node {
+func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
n := p.declName(decl.Name)
n.SetOp(ir.OTYPE)
declare(n, dclcontext)
@@ -500,21 +500,21 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node {
return nod
}
-func (p *noder) declNames(names []*syntax.Name) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(names))
+func (p *noder) declNames(names []*syntax.Name) []ir.Node {
+ nodes := make([]ir.Node, 0, len(names))
for _, name := range names {
nodes = append(nodes, p.declName(name))
}
return nodes
}
-func (p *noder) declName(name *syntax.Name) *ir.Node {
+func (p *noder) declName(name *syntax.Name) ir.Node {
n := dclname(p.name(name))
n.SetPos(p.pos(name))
return n
}
-func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node {
+func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
name := p.name(fun.Name)
t := p.signature(fun.Recv, fun.Type)
f := p.nod(fun, ir.ODCLFUNC, nil, nil)
@@ -580,7 +580,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node {
return f
}
-func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node {
+func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) ir.Node {
n := p.nod(typ, ir.OTFUNC, nil, nil)
if recv != nil {
n.SetLeft(p.param(recv, false, false))
@@ -590,8 +590,8 @@ func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node {
return n
}
-func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(params))
+func (p *noder) params(params []*syntax.Field, dddOk bool) []ir.Node {
+ nodes := make([]ir.Node, 0, len(params))
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
@@ -599,7 +599,7 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Node {
return nodes
}
-func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node {
+func (p *noder) param(param *syntax.Field, dddOk, final bool) ir.Node {
var name *types.Sym
if param.Name != nil {
name = p.name(param.Name)
@@ -633,22 +633,22 @@ func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node {
return n
}
-func (p *noder) exprList(expr syntax.Expr) []*ir.Node {
+func (p *noder) exprList(expr syntax.Expr) []ir.Node {
if list, ok := expr.(*syntax.ListExpr); ok {
return p.exprs(list.ElemList)
}
- return []*ir.Node{p.expr(expr)}
+ return []ir.Node{p.expr(expr)}
}
-func (p *noder) exprs(exprs []syntax.Expr) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(exprs))
+func (p *noder) exprs(exprs []syntax.Expr) []ir.Node {
+ nodes := make([]ir.Node, 0, len(exprs))
for _, expr := range exprs {
nodes = append(nodes, p.expr(expr))
}
return nodes
}
-func (p *noder) expr(expr syntax.Expr) *ir.Node {
+func (p *noder) expr(expr syntax.Expr) ir.Node {
p.setlineno(expr)
switch expr := expr.(type) {
case nil, *syntax.BadExpr:
@@ -699,7 +699,7 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node {
op = ir.OSLICE3
}
n := p.nod(expr, op, p.expr(expr.X), nil)
- var index [3]*ir.Node
+ var index [3]ir.Node
for i, x := range &expr.Index {
if x != nil {
index[i] = p.expr(x)
@@ -725,7 +725,7 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node {
return n
case *syntax.ArrayType:
- var len *ir.Node
+ var len ir.Node
if expr.Len != nil {
len = p.expr(expr.Len)
} else {
@@ -765,7 +765,7 @@ func (p *noder) expr(expr syntax.Expr) *ir.Node {
// sum efficiently handles very large summation expressions (such as
// in issue #16394). In particular, it avoids left recursion and
// collapses string literals.
-func (p *noder) sum(x syntax.Expr) *ir.Node {
+func (p *noder) sum(x syntax.Expr) ir.Node {
// While we need to handle long sums with asymptotic
// efficiency, the vast majority of sums are very small: ~95%
// have only 2 or 3 operands, and ~99% of string literals are
@@ -800,7 +800,7 @@ func (p *noder) sum(x syntax.Expr) *ir.Node {
// handle correctly. For now, we avoid these problems by
// treating named string constants the same as non-constant
// operands.
- var nstr *ir.Node
+ var nstr ir.Node
chunks := make([]string, 0, 1)
n := p.expr(x)
@@ -838,12 +838,12 @@ func (p *noder) sum(x syntax.Expr) *ir.Node {
return n
}
-func (p *noder) typeExpr(typ syntax.Expr) *ir.Node {
+func (p *noder) typeExpr(typ syntax.Expr) ir.Node {
// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
return p.expr(typ)
}
-func (p *noder) typeExprOrNil(typ syntax.Expr) *ir.Node {
+func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Node {
if typ != nil {
return p.expr(typ)
}
@@ -862,11 +862,11 @@ func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
panic("unhandled ChanDir")
}
-func (p *noder) structType(expr *syntax.StructType) *ir.Node {
- l := make([]*ir.Node, 0, len(expr.FieldList))
+func (p *noder) structType(expr *syntax.StructType) ir.Node {
+ l := make([]ir.Node, 0, len(expr.FieldList))
for i, field := range expr.FieldList {
p.setlineno(field)
- var n *ir.Node
+ var n ir.Node
if field.Name == nil {
n = p.embedded(field.Type)
} else {
@@ -884,11 +884,11 @@ func (p *noder) structType(expr *syntax.StructType) *ir.Node {
return n
}
-func (p *noder) interfaceType(expr *syntax.InterfaceType) *ir.Node {
- l := make([]*ir.Node, 0, len(expr.MethodList))
+func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node {
+ l := make([]ir.Node, 0, len(expr.MethodList))
for _, method := range expr.MethodList {
p.setlineno(method)
- var n *ir.Node
+ var n ir.Node
if method.Name == nil {
n = p.nodSym(method, ir.ODCLFIELD, importName(p.packname(method.Type)), nil)
} else {
@@ -934,7 +934,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym {
panic(fmt.Sprintf("unexpected packname: %#v", expr))
}
-func (p *noder) embedded(typ syntax.Expr) *ir.Node {
+func (p *noder) embedded(typ syntax.Expr) ir.Node {
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
@@ -953,12 +953,12 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Node {
return n
}
-func (p *noder) stmts(stmts []syntax.Stmt) []*ir.Node {
+func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node {
return p.stmtsFall(stmts, false)
}
-func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*ir.Node {
- var nodes []*ir.Node
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
+ var nodes []ir.Node
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
@@ -971,11 +971,11 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*ir.Node {
return nodes
}
-func (p *noder) stmt(stmt syntax.Stmt) *ir.Node {
+func (p *noder) stmt(stmt syntax.Stmt) ir.Node {
return p.stmtFall(stmt, false)
}
-func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node {
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
p.setlineno(stmt)
switch stmt := stmt.(type) {
case *syntax.EmptyStmt:
@@ -1053,7 +1053,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node {
}
return p.nod(stmt, op, p.expr(stmt.Call), nil)
case *syntax.ReturnStmt:
- var results []*ir.Node
+ var results []ir.Node
if stmt.Results != nil {
results = p.exprList(stmt.Results)
}
@@ -1085,7 +1085,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node {
panic("unhandled Stmt")
}
-func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.Node {
+func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node {
if !colas {
return p.exprList(expr)
}
@@ -1099,7 +1099,7 @@ func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.No
exprs = []syntax.Expr{expr}
}
- res := make([]*ir.Node, len(exprs))
+ res := make([]ir.Node, len(exprs))
seen := make(map[*types.Sym]bool, len(exprs))
newOrErr := false
@@ -1145,14 +1145,14 @@ func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.No
return res
}
-func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*ir.Node {
+func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
p.openScope(stmt.Pos())
nodes := p.stmts(stmt.List)
p.closeScope(stmt.Rbrace)
return nodes
}
-func (p *noder) ifStmt(stmt *syntax.IfStmt) *ir.Node {
+func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OIF, nil, nil)
if stmt.Init != nil {
@@ -1174,9 +1174,9 @@ func (p *noder) ifStmt(stmt *syntax.IfStmt) *ir.Node {
return n
}
-func (p *noder) forStmt(stmt *syntax.ForStmt) *ir.Node {
+func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
p.openScope(stmt.Pos())
- var n *ir.Node
+ var n ir.Node
if r, ok := stmt.Init.(*syntax.RangeClause); ok {
if stmt.Cond != nil || stmt.Post != nil {
panic("unexpected RangeClause")
@@ -1203,7 +1203,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) *ir.Node {
return n
}
-func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *ir.Node {
+func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OSWITCH, nil, nil)
if stmt.Init != nil {
@@ -1223,8 +1223,8 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *ir.Node {
return n
}
-func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbrace syntax.Pos) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(clauses))
+func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch ir.Node, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
@@ -1273,14 +1273,14 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbra
return nodes
}
-func (p *noder) selectStmt(stmt *syntax.SelectStmt) *ir.Node {
+func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
n := p.nod(stmt, ir.OSELECT, nil, nil)
n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace))
return n
}
-func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(clauses))
+func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
@@ -1301,16 +1301,16 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*
return nodes
}
-func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *ir.Node {
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
lhs := p.nodSym(label, ir.OLABEL, nil, p.name(label.Label))
- var ls *ir.Node
+ var ls ir.Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
ls = p.stmtFall(label.Stmt, fallOK)
}
lhs.Name().Defn = ls
- l := []*ir.Node{lhs}
+ l := []ir.Node{lhs}
if ls != nil {
if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 {
l = append(l, ls.List().Slice()...)
@@ -1443,12 +1443,12 @@ func (p *noder) name(name *syntax.Name) *types.Sym {
return lookup(name.Value)
}
-func (p *noder) mkname(name *syntax.Name) *ir.Node {
+func (p *noder) mkname(name *syntax.Name) ir.Node {
// TODO(mdempsky): Set line number?
return mkname(p.name(name))
}
-func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node {
+func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
switch x.Op() {
@@ -1464,11 +1464,11 @@ func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node {
return x
}
-func (p *noder) nod(orig syntax.Node, op ir.Op, left, right *ir.Node) *ir.Node {
+func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node {
return ir.NodAt(p.pos(orig), op, left, right)
}
-func (p *noder) nodSym(orig syntax.Node, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
+func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
n := nodSym(op, left, sym)
n.SetPos(p.pos(orig))
return n
@@ -1668,7 +1668,7 @@ func safeArg(name string) bool {
return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
}
-func mkname(sym *types.Sym) *ir.Node {
+func mkname(sym *types.Sym) ir.Node {
n := oldname(sym)
if n.Name() != nil && n.Name().Pack != nil {
n.Name().Pack.Name().SetUsed(true)
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 05f8358fdf..d566959d9e 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -228,7 +228,7 @@ func addptabs() {
}
}
-func dumpGlobal(n *ir.Node) {
+func dumpGlobal(n ir.Node) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
@@ -242,7 +242,7 @@ func dumpGlobal(n *ir.Node) {
ggloblnod(n)
}
-func dumpGlobalConst(n *ir.Node) {
+func dumpGlobalConst(n ir.Node) {
// only export typed constants
t := n.Type()
if t == nil {
@@ -475,7 +475,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
var slicedataGen int
-func slicedata(pos src.XPos, s string) *ir.Node {
+func slicedata(pos src.XPos, s string) ir.Node {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := ir.LocalPkg.Lookup(symname)
@@ -489,7 +489,7 @@ func slicedata(pos src.XPos, s string) *ir.Node {
return symnode
}
-func slicebytes(nam *ir.Node, s string) {
+func slicebytes(nam ir.Node, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
}
@@ -530,7 +530,7 @@ func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
// arr must be an ONAME. slicesym does not modify n.
-func slicesym(n, arr *ir.Node, lencap int64) {
+func slicesym(n, arr ir.Node, lencap int64) {
s := n.Sym().Linksym()
off := n.Offset()
if arr.Op() != ir.ONAME {
@@ -543,7 +543,7 @@ func slicesym(n, arr *ir.Node, lencap int64) {
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
-func addrsym(n, a *ir.Node) {
+func addrsym(n, a ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("addrsym n op %v", n.Op())
}
@@ -559,7 +559,7 @@ func addrsym(n, a *ir.Node) {
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
-func pfuncsym(n, f *ir.Node) {
+func pfuncsym(n, f ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("pfuncsym n op %v", n.Op())
}
@@ -575,7 +575,7 @@ func pfuncsym(n, f *ir.Node) {
// litsym writes the static literal c to n.
// Neither n nor c is modified.
-func litsym(n, c *ir.Node, wid int) {
+func litsym(n, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
base.Fatalf("litsym n op %v", n.Op())
}
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 36a4095640..b7d713439b 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -44,27 +44,27 @@ import (
// Order holds state during the ordering process.
type Order struct {
- out []*ir.Node // list of generated statements
- temp []*ir.Node // stack of temporary variables
- free map[string][]*ir.Node // free list of unused temporaries, by type.LongString().
+ out []ir.Node // list of generated statements
+ temp []ir.Node // stack of temporary variables
+ free map[string][]ir.Node // free list of unused temporaries, by type.LongString().
}
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
-func order(fn *ir.Node) {
+func order(fn ir.Node) {
if base.Flag.W > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym())
ir.DumpList(s, fn.Body())
}
- orderBlock(fn.PtrBody(), map[string][]*ir.Node{})
+ orderBlock(fn.PtrBody(), map[string][]ir.Node{})
}
// newTemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, newTemp emits code to zero the temporary.
-func (o *Order) newTemp(t *types.Type, clear bool) *ir.Node {
- var v *ir.Node
+func (o *Order) newTemp(t *types.Type, clear bool) ir.Node {
+ var v ir.Node
// Note: LongString is close to the type equality we want,
// but not exactly. We still need to double-check with types.Identical.
key := t.LongString()
@@ -103,7 +103,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *ir.Node {
// (The other candidate would be map access, but map access
// returns a pointer to the result data instead of taking a pointer
// to be filled in.)
-func (o *Order) copyExpr(n *ir.Node, t *types.Type, clear bool) *ir.Node {
+func (o *Order) copyExpr(n ir.Node, t *types.Type, clear bool) ir.Node {
v := o.newTemp(t, clear)
a := ir.Nod(ir.OAS, v, n)
a = typecheck(a, ctxStmt)
@@ -115,7 +115,7 @@ func (o *Order) copyExpr(n *ir.Node, t *types.Type, clear bool) *ir.Node {
// The definition of cheap is that n is a variable or constant.
// If not, cheapExpr allocates a new tmp, emits tmp = n,
// and then returns tmp.
-func (o *Order) cheapExpr(n *ir.Node) *ir.Node {
+func (o *Order) cheapExpr(n ir.Node) ir.Node {
if n == nil {
return nil
}
@@ -143,7 +143,7 @@ func (o *Order) cheapExpr(n *ir.Node) *ir.Node {
// as assigning to the original n.
//
// The intended use is to apply to x when rewriting x += y into x = x + y.
-func (o *Order) safeExpr(n *ir.Node) *ir.Node {
+func (o *Order) safeExpr(n ir.Node) ir.Node {
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
@@ -167,7 +167,7 @@ func (o *Order) safeExpr(n *ir.Node) *ir.Node {
return typecheck(a, ctxExpr)
case ir.OINDEX, ir.OINDEXMAP:
- var l *ir.Node
+ var l ir.Node
if n.Left().Type().IsArray() {
l = o.safeExpr(n.Left())
} else {
@@ -194,7 +194,7 @@ func (o *Order) safeExpr(n *ir.Node) *ir.Node {
// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
-func isaddrokay(n *ir.Node) bool {
+func isaddrokay(n ir.Node) bool {
return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n))
}
@@ -203,7 +203,7 @@ func isaddrokay(n *ir.Node) bool {
// tmp = n, and then returns tmp.
// The result of addrTemp MUST be assigned back to n, e.g.
// n.Left = o.addrTemp(n.Left)
-func (o *Order) addrTemp(n *ir.Node) *ir.Node {
+func (o *Order) addrTemp(n ir.Node) ir.Node {
if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
@@ -225,7 +225,7 @@ func (o *Order) addrTemp(n *ir.Node) *ir.Node {
// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
// It should only be used for map runtime calls which have *_fast* versions.
-func (o *Order) mapKeyTemp(t *types.Type, n *ir.Node) *ir.Node {
+func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
// Most map calls need to take the address of the key.
// Exception: map*_fast* calls. See golang.org/issue/19015.
if mapfast(t) == mapslow {
@@ -248,7 +248,7 @@ func (o *Order) mapKeyTemp(t *types.Type, n *ir.Node) *ir.Node {
// It would be nice to handle these generally, but because
// []byte keys are not allowed in maps, the use of string(k)
// comes up in important cases in practice. See issue 3512.
-func mapKeyReplaceStrConv(n *ir.Node) bool {
+func mapKeyReplaceStrConv(n ir.Node) bool {
var replaced bool
switch n.Op() {
case ir.OBYTES2STR:
@@ -293,8 +293,8 @@ func (o *Order) popTemp(mark ordermarker) {
// cleanTempNoPop emits VARKILL instructions to *out
// for each temporary above the mark on the temporary stack.
// It does not pop the temporaries from the stack.
-func (o *Order) cleanTempNoPop(mark ordermarker) []*ir.Node {
- var out []*ir.Node
+func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node {
+ var out []ir.Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
kill := ir.Nod(ir.OVARKILL, n, nil)
@@ -324,7 +324,7 @@ func (o *Order) stmtList(l ir.Nodes) {
// m = OMAKESLICE([]T, x); OCOPY(m, s)
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
-func orderMakeSliceCopy(s []*ir.Node) {
+func orderMakeSliceCopy(s []ir.Node) {
if base.Flag.N != 0 || instrumenting {
return
}
@@ -406,7 +406,7 @@ func (o *Order) edge() {
// orderBlock orders the block of statements in n into a new slice,
// and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type.
-func orderBlock(n *ir.Nodes, free map[string][]*ir.Node) {
+func orderBlock(n *ir.Nodes, free map[string][]ir.Node) {
var order Order
order.free = free
mark := order.markTemp()
@@ -420,7 +420,7 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Node) {
// leaves them as the init list of the final *np.
// The result of exprInPlace MUST be assigned back to n, e.g.
// n.Left = o.exprInPlace(n.Left)
-func (o *Order) exprInPlace(n *ir.Node) *ir.Node {
+func (o *Order) exprInPlace(n ir.Node) ir.Node {
var order Order
order.free = o.free
n = order.expr(n, nil)
@@ -437,7 +437,7 @@ func (o *Order) exprInPlace(n *ir.Node) *ir.Node {
// The result of orderStmtInPlace MUST be assigned back to n, e.g.
// n.Left = orderStmtInPlace(n.Left)
// free is a map that can be used to obtain temporary variables by type.
-func orderStmtInPlace(n *ir.Node, free map[string][]*ir.Node) *ir.Node {
+func orderStmtInPlace(n ir.Node, free map[string][]ir.Node) ir.Node {
var order Order
order.free = free
mark := order.markTemp()
@@ -447,7 +447,7 @@ func orderStmtInPlace(n *ir.Node, free map[string][]*ir.Node) *ir.Node {
}
// init moves n's init list to o.out.
-func (o *Order) init(n *ir.Node) {
+func (o *Order) init(n ir.Node) {
if ir.MayBeShared(n) {
// For concurrency safety, don't mutate potentially shared nodes.
// First, ensure that no work is required here.
@@ -462,7 +462,7 @@ func (o *Order) init(n *ir.Node) {
// call orders the call expression n.
// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func (o *Order) call(n *ir.Node) {
+func (o *Order) call(n ir.Node) {
if n.Init().Len() > 0 {
// Caller should have already called o.init(n).
base.Fatalf("%v with unexpected ninit", n.Op())
@@ -483,7 +483,7 @@ func (o *Order) call(n *ir.Node) {
if n.Op() == ir.OCALLINTER {
return
}
- keepAlive := func(arg *ir.Node) {
+ keepAlive := func(arg ir.Node) {
// If the argument is really a pointer being converted to uintptr,
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
@@ -525,7 +525,7 @@ func (o *Order) call(n *ir.Node) {
// cases they are also typically registerizable, so not much harm done.
// And this only applies to the multiple-assignment form.
// We could do a more precise analysis if needed, like in walk.go.
-func (o *Order) mapAssign(n *ir.Node) {
+func (o *Order) mapAssign(n ir.Node) {
switch n.Op() {
default:
base.Fatalf("order.mapAssign %v", n.Op())
@@ -546,7 +546,7 @@ func (o *Order) mapAssign(n *ir.Node) {
o.out = append(o.out, n)
case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
- var post []*ir.Node
+ var post []ir.Node
for i, m := range n.List().Slice() {
switch {
case m.Op() == ir.OINDEXMAP:
@@ -574,7 +574,7 @@ func (o *Order) mapAssign(n *ir.Node) {
// stmt orders the statement n, appending to o.out.
// Temporaries created during the statement are cleaned
// up using VARKILL instructions as possible.
-func (o *Order) stmt(n *ir.Node) {
+func (o *Order) stmt(n ir.Node) {
if n == nil {
return
}
@@ -1022,7 +1022,7 @@ func (o *Order) stmt(n *ir.Node) {
base.Pos = lno
}
-func hasDefaultCase(n *ir.Node) bool {
+func hasDefaultCase(n ir.Node) bool {
for _, ncas := range n.List().Slice() {
if ncas.Op() != ir.OCASE {
base.Fatalf("expected case, found %v", ncas.Op())
@@ -1052,7 +1052,7 @@ func (o *Order) exprListInPlace(l ir.Nodes) {
}
// prealloc[x] records the allocation to use for x.
-var prealloc = map[*ir.Node]*ir.Node{}
+var prealloc = map[ir.Node]ir.Node{}
// expr orders a single expression, appending side
// effects to o.out as needed.
@@ -1061,7 +1061,7 @@ var prealloc = map[*ir.Node]*ir.Node{}
// to avoid copying the result of the expression to a temporary.)
// The result of expr MUST be assigned back to n, e.g.
// n.Left = o.expr(n.Left, lhs)
-func (o *Order) expr(n, lhs *ir.Node) *ir.Node {
+func (o *Order) expr(n, lhs ir.Node) ir.Node {
if n == nil {
return n
}
@@ -1329,7 +1329,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node {
// See issue 26552.
entries := n.List().Slice()
statics := entries[:0]
- var dynamics []*ir.Node
+ var dynamics []ir.Node
for _, r := range entries {
if r.Op() != ir.OKEY {
base.Fatalf("OMAPLIT entry not OKEY: %v\n", r)
@@ -1377,7 +1377,7 @@ func (o *Order) expr(n, lhs *ir.Node) *ir.Node {
// okas creates and returns an assignment of val to ok,
// including an explicit conversion if necessary.
-func okas(ok, val *ir.Node) *ir.Node {
+func okas(ok, val ir.Node) ir.Node {
if !ir.IsBlank(ok) {
val = conv(val, ok.Type())
}
@@ -1392,9 +1392,9 @@ func okas(ok, val *ir.Node) *ir.Node {
// tmp1, tmp2, tmp3 = ...
// a, b, a = tmp1, tmp2, tmp3
// This is necessary to ensure left to right assignment order.
-func (o *Order) as2(n *ir.Node) {
- tmplist := []*ir.Node{}
- left := []*ir.Node{}
+func (o *Order) as2(n ir.Node) {
+ tmplist := []ir.Node{}
+ left := []ir.Node{}
for ni, l := range n.List().Slice() {
if !ir.IsBlank(l) {
tmp := o.newTemp(l.Type(), l.Type().HasPointers())
@@ -1415,8 +1415,8 @@ func (o *Order) as2(n *ir.Node) {
// okAs2 orders OAS2XXX with ok.
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *Order) okAs2(n *ir.Node) {
- var tmp1, tmp2 *ir.Node
+func (o *Order) okAs2(n ir.Node) {
+ var tmp1, tmp2 ir.Node
if !ir.IsBlank(n.List().First()) {
typ := n.Right().Type()
tmp1 = o.newTemp(typ, typ.HasPointers())
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 5827b5a7a6..221b733a07 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -24,10 +24,10 @@ import (
// "Portable" code generation.
var (
- compilequeue []*ir.Node // functions waiting to be compiled
+ compilequeue []ir.Node // functions waiting to be compiled
)
-func emitptrargsmap(fn *ir.Node) {
+func emitptrargsmap(fn ir.Node) {
if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" {
return
}
@@ -68,7 +68,7 @@ func emitptrargsmap(fn *ir.Node) {
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
-func cmpstackvarlt(a, b *ir.Node) bool {
+func cmpstackvarlt(a, b ir.Node) bool {
if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
return b.Class() == ir.PAUTO
}
@@ -101,7 +101,7 @@ func cmpstackvarlt(a, b *ir.Node) bool {
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []*ir.Node
+type byStackVar []ir.Node
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
@@ -128,7 +128,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
- if n, ok := v.Aux.(*ir.Node); ok {
+ if n, ok := v.Aux.(ir.Node); ok {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
@@ -193,7 +193,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
-func funccompile(fn *ir.Node) {
+func funccompile(fn ir.Node) {
if Curfn != nil {
base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym())
}
@@ -224,7 +224,7 @@ func funccompile(fn *ir.Node) {
dclcontext = ir.PEXTERN
}
-func compile(fn *ir.Node) {
+func compile(fn ir.Node) {
errorsBefore := base.Errors()
order(fn)
if base.Errors() > errorsBefore {
@@ -284,7 +284,7 @@ func compile(fn *ir.Node) {
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
-func compilenow(fn *ir.Node) bool {
+func compilenow(fn ir.Node) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
@@ -299,7 +299,7 @@ func compilenow(fn *ir.Node) bool {
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
-func isInlinableButNotInlined(fn *ir.Node) bool {
+func isInlinableButNotInlined(fn ir.Node) bool {
if fn.Func().Nname.Func().Inl == nil {
return false
}
@@ -315,7 +315,7 @@ const maxStackSize = 1 << 30
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
-func compileSSA(fn *ir.Node, worker int) {
+func compileSSA(fn ir.Node, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
@@ -360,7 +360,7 @@ func compileFunctions() {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
- tmp := make([]*ir.Node, len(compilequeue))
+ tmp := make([]ir.Node, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
@@ -376,7 +376,7 @@ func compileFunctions() {
}
var wg sync.WaitGroup
base.Ctxt.InParallel = true
- c := make(chan *ir.Node, base.Flag.LowerC)
+ c := make(chan ir.Node, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
@@ -398,7 +398,7 @@ func compileFunctions() {
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
- fn := curfn.(*ir.Node)
+ fn := curfn.(ir.Node)
if fn.Func().Nname != nil {
if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
@@ -432,7 +432,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
// Deciding the right answer is, as they say, future work.
isODCLFUNC := fn.Op() == ir.ODCLFUNC
- var apdecls []*ir.Node
+ var apdecls []ir.Node
// Populate decls for fn.
if isODCLFUNC {
for _, n := range fn.Func().Dcl {
@@ -489,7 +489,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
return scopes, inlcalls
}
-func declPos(decl *ir.Node) src.XPos {
+func declPos(decl ir.Node) src.XPos {
if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) {
// It's not clear which position is correct for captured variables here:
// * decl.Pos is the wrong position for captured variables, in the inner
@@ -512,10 +512,10 @@ func declPos(decl *ir.Node) src.XPos {
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
-func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) {
+func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
var vars []*dwarf.Var
- var decls []*ir.Node
- selected := make(map[*ir.Node]bool)
+ var decls []ir.Node
+ selected := make(map[ir.Node]bool)
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
@@ -528,7 +528,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf
return decls, vars, selected
}
-func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var {
+func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var {
var abbrev int
offs := n.Offset()
@@ -579,13 +579,13 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var {
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
-func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) {
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
- var decls []*ir.Node
+ var decls []ir.Node
var vars []*dwarf.Var
- ssaVars := make(map[*ir.Node]bool)
+ ssaVars := make(map[ir.Node]bool)
for varID, dvar := range debugInfo.Vars {
n := dvar
@@ -605,11 +605,11 @@ func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var,
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
-func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var) {
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
- var decls []*ir.Node
- var selected map[*ir.Node]bool
+ var decls []ir.Node
+ var selected map[ir.Node]bool
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
@@ -708,9 +708,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
-func preInliningDcls(fnsym *obj.LSym) []*ir.Node {
- fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Node)
- var rdcl []*ir.Node
+func preInliningDcls(fnsym *obj.LSym) []ir.Node {
+ fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node)
+ var rdcl []ir.Node
for _, n := range fn.Func().Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go
index efdffe0256..1984f9aa08 100644
--- a/src/cmd/compile/internal/gc/pgen_test.go
+++ b/src/cmd/compile/internal/gc/pgen_test.go
@@ -26,19 +26,19 @@ func typeWithPointers() *types.Type {
return t
}
-func markUsed(n *ir.Node) *ir.Node {
+func markUsed(n ir.Node) ir.Node {
n.Name().SetUsed(true)
return n
}
-func markNeedZero(n *ir.Node) *ir.Node {
+func markNeedZero(n ir.Node) ir.Node {
n.Name().SetNeedzero(true)
return n
}
// Test all code paths for cmpstackvarlt.
func TestCmpstackvar(t *testing.T) {
- nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
if s == nil {
s = &types.Sym{Name: "."}
}
@@ -49,7 +49,7 @@ func TestCmpstackvar(t *testing.T) {
return n
}
testdata := []struct {
- a, b *ir.Node
+ a, b ir.Node
lt bool
}{
{
@@ -156,14 +156,14 @@ func TestCmpstackvar(t *testing.T) {
}
func TestStackvarSort(t *testing.T) {
- nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
n := NewName(s)
n.SetType(t)
n.SetOffset(xoffset)
n.SetClass(cl)
return n
}
- inp := []*ir.Node{
+ inp := []ir.Node{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
@@ -178,7 +178,7 @@ func TestStackvarSort(t *testing.T) {
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
}
- want := []*ir.Node{
+ want := []ir.Node{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go
index 2a88d4a5b4..677bfc92df 100644
--- a/src/cmd/compile/internal/gc/phi.go
+++ b/src/cmd/compile/internal/gc/phi.go
@@ -41,11 +41,11 @@ func (s *state) insertPhis() {
}
type phiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
- varnum map[*ir.Node]int32 // variable numbering
+ varnum map[ir.Node]int32 // variable numbering
// properties of the dominator tree
idom []*ssa.Block // dominator parents
@@ -71,15 +71,15 @@ func (s *phiState) insertPhis() {
// Find all the variables for which we need to match up reads & writes.
// This step prunes any basic-block-only variables from consideration.
// Generate a numbering for these variables.
- s.varnum = map[*ir.Node]int32{}
- var vars []*ir.Node
+ s.varnum = map[ir.Node]int32{}
+ var vars []ir.Node
var vartypes []*types.Type
for _, b := range s.f.Blocks {
for _, v := range b.Values {
if v.Op != ssa.OpFwdRef {
continue
}
- var_ := v.Aux.(*ir.Node)
+ var_ := v.Aux.(ir.Node)
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
@@ -184,7 +184,7 @@ levels:
}
}
-func (s *phiState) insertVarPhis(n int, var_ *ir.Node, defs []*ssa.Block, typ *types.Type) {
+func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
priq := &s.priq
q := s.q
queued := s.queued
@@ -319,7 +319,7 @@ func (s *phiState) resolveFwdRefs() {
if v.Op != ssa.OpFwdRef {
continue
}
- n := s.varnum[v.Aux.(*ir.Node)]
+ n := s.varnum[v.Aux.(ir.Node)]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
@@ -433,11 +433,11 @@ func (s *sparseSet) clear() {
// Variant to use for small functions.
type simplePhiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- fwdrefs []*ssa.Value // list of FwdRefs to be processed
- defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block
- reachable []bool // which blocks are reachable
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ fwdrefs []*ssa.Value // list of FwdRefs to be processed
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+ reachable []bool // which blocks are reachable
}
func (s *simplePhiState) insertPhis() {
@@ -450,7 +450,7 @@ func (s *simplePhiState) insertPhis() {
continue
}
s.fwdrefs = append(s.fwdrefs, v)
- var_ := v.Aux.(*ir.Node)
+ var_ := v.Aux.(ir.Node)
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
@@ -464,7 +464,7 @@ loop:
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
- var_ := v.Aux.(*ir.Node)
+ var_ := v.Aux.(ir.Node)
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
@@ -512,7 +512,7 @@ loop:
}
// lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *ir.Node, line src.XPos) *ssa.Value {
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
for {
if v := s.defvars[b.ID][var_]; v != nil {
return v
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index c1e523f7a0..bd7696d859 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -101,10 +101,10 @@ type BlockEffects struct {
// A collection of global state used by liveness analysis.
type Liveness struct {
- fn *ir.Node
+ fn ir.Node
f *ssa.Func
- vars []*ir.Node
- idx map[*ir.Node]int32
+ vars []ir.Node
+ idx map[ir.Node]int32
stkptrsize int64
be []BlockEffects
@@ -206,20 +206,20 @@ type progeffectscache struct {
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
-func livenessShouldTrack(n *ir.Node) bool {
+func livenessShouldTrack(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
-func getvariables(fn *ir.Node) ([]*ir.Node, map[*ir.Node]int32) {
- var vars []*ir.Node
+func getvariables(fn ir.Node) ([]ir.Node, map[ir.Node]int32) {
+ var vars []ir.Node
for _, n := range fn.Func().Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
}
- idx := make(map[*ir.Node]int32, len(vars))
+ idx := make(map[ir.Node]int32, len(vars))
for i, n := range vars {
idx[n] = int32(i)
}
@@ -312,7 +312,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
}
// affectedNode returns the *Node affected by v
-func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) {
+func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
@@ -323,9 +323,9 @@ func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) {
return n, ssa.SymWrite
case ssa.OpVarLive:
- return v.Aux.(*ir.Node), ssa.SymRead
+ return v.Aux.(ir.Node), ssa.SymRead
case ssa.OpVarDef, ssa.OpVarKill:
- return v.Aux.(*ir.Node), ssa.SymWrite
+ return v.Aux.(ir.Node), ssa.SymWrite
case ssa.OpKeepAlive:
n, _ := AutoVar(v.Args[0])
return n, ssa.SymRead
@@ -340,7 +340,7 @@ func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) {
case nil, *obj.LSym:
// ok, but no node
return nil, e
- case *ir.Node:
+ case ir.Node:
return a, e
default:
base.Fatalf("weird aux: %s", v.LongString())
@@ -356,7 +356,7 @@ type livenessFuncCache struct {
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
-func newliveness(fn *ir.Node, f *ssa.Func, vars []*ir.Node, idx map[*ir.Node]int32, stkptrsize int64) *Liveness {
+func newliveness(fn ir.Node, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
fn: fn,
f: f,
@@ -482,7 +482,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
-func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Node, args, locals bvec) {
+func (lv *Liveness) pointerMap(liveout bvec, vars []ir.Node, args, locals bvec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
@@ -1164,7 +1164,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
- var maxArgNode *ir.Node
+ var maxArgNode ir.Node
for _, n := range lv.vars {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT:
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
index 5ab2821187..c41d923f78 100644
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -60,7 +60,7 @@ func ispkgin(pkgs []string) bool {
return false
}
-func instrument(fn *ir.Node) {
+func instrument(fn ir.Node) {
if fn.Func().Pragma&ir.Norace != 0 {
return
}
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 6a2a65c2df..0ff00cca44 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -13,7 +13,7 @@ import (
)
// range
-func typecheckrange(n *ir.Node) {
+func typecheckrange(n ir.Node) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
@@ -39,7 +39,7 @@ func typecheckrange(n *ir.Node) {
decldepth--
}
-func typecheckrangeExpr(n *ir.Node) {
+func typecheckrangeExpr(n ir.Node) {
n.SetRight(typecheck(n.Right(), ctxExpr))
t := n.Right().Type()
@@ -95,7 +95,7 @@ func typecheckrangeExpr(n *ir.Node) {
base.ErrorfAt(n.Pos(), "too many variables in range")
}
- var v1, v2 *ir.Node
+ var v1, v2 ir.Node
if n.List().Len() != 0 {
v1 = n.List().First()
}
@@ -157,7 +157,7 @@ func cheapComputableIndex(width int64) bool {
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
-func walkrange(n *ir.Node) *ir.Node {
+func walkrange(n ir.Node) ir.Node {
if isMapClear(n) {
m := n.Right()
lno := setlineno(m)
@@ -179,7 +179,7 @@ func walkrange(n *ir.Node) *ir.Node {
lno := setlineno(a)
n.SetRight(nil)
- var v1, v2 *ir.Node
+ var v1, v2 ir.Node
l := n.List().Len()
if l > 0 {
v1 = n.List().First()
@@ -205,12 +205,12 @@ func walkrange(n *ir.Node) *ir.Node {
// to avoid erroneous processing by racewalk.
n.PtrList().Set(nil)
- var ifGuard *ir.Node
+ var ifGuard ir.Node
translatedLoopOp := ir.OFOR
- var body []*ir.Node
- var init []*ir.Node
+ var body []ir.Node
+ var init []ir.Node
switch t.Etype {
default:
base.Fatalf("walkrange")
@@ -240,7 +240,7 @@ func walkrange(n *ir.Node) *ir.Node {
// for v1 := range ha { body }
if v2 == nil {
- body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
break
}
@@ -254,7 +254,7 @@ func walkrange(n *ir.Node) *ir.Node {
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1, tmp)
- body = []*ir.Node{a}
+ body = []ir.Node{a}
break
}
@@ -321,14 +321,14 @@ func walkrange(n *ir.Node) *ir.Node {
if v1 == nil {
body = nil
} else if v2 == nil {
- body = []*ir.Node{ir.Nod(ir.OAS, v1, key)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
} else {
elem := nodSym(ir.ODOT, hit, elemsym)
elem = ir.Nod(ir.ODEREF, elem, nil)
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(key, elem)
- body = []*ir.Node{a}
+ body = []ir.Node{a}
}
case types.TCHAN:
@@ -353,7 +353,7 @@ func walkrange(n *ir.Node) *ir.Node {
if v1 == nil {
body = nil
} else {
- body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
@@ -467,7 +467,7 @@ func walkrange(n *ir.Node) *ir.Node {
// }
//
// where == for keys of map m is reflexive.
-func isMapClear(n *ir.Node) bool {
+func isMapClear(n ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
@@ -509,7 +509,7 @@ func isMapClear(n *ir.Node) bool {
}
// mapClear constructs a call to runtime.mapclear for the map m.
-func mapClear(m *ir.Node) *ir.Node {
+func mapClear(m ir.Node) ir.Node {
t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
@@ -534,7 +534,7 @@ func mapClear(m *ir.Node) *ir.Node {
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
-func arrayClear(n, v1, v2, a *ir.Node) bool {
+func arrayClear(n, v1, v2, a ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
@@ -590,7 +590,7 @@ func arrayClear(n, v1, v2, a *ir.Node) bool {
tmp = conv(tmp, types.Types[types.TUINTPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp))
- var fn *ir.Node
+ var fn ir.Node
if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
Curfn.Func().SetWBPos(stmt.Pos())
@@ -615,7 +615,7 @@ func arrayClear(n, v1, v2, a *ir.Node) bool {
}
// addptr returns (*T)(uintptr(p) + n).
-func addptr(p *ir.Node, n int64) *ir.Node {
+func addptr(p ir.Node, n int64) ir.Node {
t := p.Type()
p = ir.Nod(ir.OCONVNOP, p, nil)
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index 664b3cc942..dc9efc07fe 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -347,7 +347,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
if receiver != nil {
inLen++
}
- in := make([]*ir.Node, 0, inLen)
+ in := make([]ir.Node, 0, inLen)
if receiver != nil {
d := anonfield(receiver)
@@ -361,7 +361,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
}
outLen := f.Results().Fields().Len()
- out := make([]*ir.Node, 0, outLen)
+ out := make([]ir.Node, 0, outLen)
for _, t := range f.Results().Fields().Slice() {
d := anonfield(t.Type)
out = append(out, d)
@@ -990,7 +990,7 @@ func typenamesym(t *types.Type) *types.Sym {
return s
}
-func typename(t *types.Type) *ir.Node {
+func typename(t *types.Type) ir.Node {
s := typenamesym(t)
if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s)
@@ -1006,7 +1006,7 @@ func typename(t *types.Type) *ir.Node {
return n
}
-func itabname(t, itype *types.Type) *ir.Node {
+func itabname(t, itype *types.Type) ir.Node {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("itabname(%v, %v)", t, itype)
}
@@ -1516,7 +1516,7 @@ func addsignat(t *types.Type) {
}
}
-func addsignats(dcls []*ir.Node) {
+func addsignats(dcls []ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
if n.Op() == ir.OTYPE {
@@ -1626,7 +1626,7 @@ func dumpbasictypes() {
// The latter is the type of an auto-generated wrapper.
dtypesym(types.NewPtr(types.Errortype))
- dtypesym(functype(nil, []*ir.Node{anonfield(types.Errortype)}, []*ir.Node{anonfield(types.Types[types.TSTRING])}))
+ dtypesym(functype(nil, []ir.Node{anonfield(types.Errortype)}, []ir.Node{anonfield(types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(Runtimepkg)
@@ -1869,7 +1869,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) {
// zeroaddr returns the address of a symbol with at least
// size bytes of zeros.
-func zeroaddr(size int64) *ir.Node {
+func zeroaddr(size int64) ir.Node {
if size >= 1<<31 {
base.Fatalf("map elem too big %d", size)
}
diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go
index 880eff7595..fe7956d5d5 100644
--- a/src/cmd/compile/internal/gc/scc.go
+++ b/src/cmd/compile/internal/gc/scc.go
@@ -32,10 +32,10 @@ import "cmd/compile/internal/ir"
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
- analyze func([]*ir.Node, bool)
+ analyze func([]ir.Node, bool)
visitgen uint32
- nodeID map[*ir.Node]uint32
- stack []*ir.Node
+ nodeID map[ir.Node]uint32
+ stack []ir.Node
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
@@ -51,10 +51,10 @@ type bottomUpVisitor struct {
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func visitBottomUp(list []*ir.Node, analyze func(list []*ir.Node, recursive bool)) {
+func visitBottomUp(list []ir.Node, analyze func(list []ir.Node, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
- v.nodeID = make(map[*ir.Node]uint32)
+ v.nodeID = make(map[ir.Node]uint32)
for _, n := range list {
if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() {
v.visit(n)
@@ -62,7 +62,7 @@ func visitBottomUp(list []*ir.Node, analyze func(list []*ir.Node, recursive bool
}
}
-func (v *bottomUpVisitor) visit(n *ir.Node) uint32 {
+func (v *bottomUpVisitor) visit(n ir.Node) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
@@ -75,7 +75,7 @@ func (v *bottomUpVisitor) visit(n *ir.Node) uint32 {
min := v.visitgen
v.stack = append(v.stack, n)
- ir.InspectList(n.Body(), func(n *ir.Node) bool {
+ ir.InspectList(n.Body(), func(n ir.Node) bool {
switch n.Op() {
case ir.ONAME:
if n.Class() == ir.PFUNC {
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go
index 16e66dee6c..fe4e1d185a 100644
--- a/src/cmd/compile/internal/gc/scope.go
+++ b/src/cmd/compile/internal/gc/scope.go
@@ -28,7 +28,7 @@ func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
return marks[i-1].Scope
}
-func assembleScopes(fnsym *obj.LSym, fn *ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
+func assembleScopes(fnsym *obj.LSym, fn ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
for i, parent := range fn.Func().Parents {
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
index 73b808b815..116b6f5b6e 100644
--- a/src/cmd/compile/internal/gc/select.go
+++ b/src/cmd/compile/internal/gc/select.go
@@ -11,8 +11,8 @@ import (
)
// select
-func typecheckselect(sel *ir.Node) {
- var def *ir.Node
+func typecheckselect(sel ir.Node) {
+ var def ir.Node
lno := setlineno(sel)
typecheckslice(sel.Init().Slice(), ctxStmt)
for _, ncase := range sel.List().Slice() {
@@ -91,7 +91,7 @@ func typecheckselect(sel *ir.Node) {
base.Pos = lno
}
-func walkselect(sel *ir.Node) {
+func walkselect(sel ir.Node) {
lno := setlineno(sel)
if sel.Body().Len() != 0 {
base.Fatalf("double walkselect")
@@ -109,13 +109,13 @@ func walkselect(sel *ir.Node) {
base.Pos = lno
}
-func walkselectcases(cases *ir.Nodes) []*ir.Node {
+func walkselectcases(cases *ir.Nodes) []ir.Node {
ncas := cases.Len()
sellineno := base.Pos
// optimization: zero-case select
if ncas == 0 {
- return []*ir.Node{mkcall("block", nil, nil)}
+ return []ir.Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
@@ -168,7 +168,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node {
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
- var dflt *ir.Node
+ var dflt ir.Node
for _, cas := range cases.Slice() {
setlineno(cas)
n := cas.Left()
@@ -237,16 +237,16 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node {
r.SetLeft(typecheck(r.Left(), ctxExpr))
r.PtrBody().Set(cas.Body().Slice())
r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
- return []*ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
+ return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
- casorder := make([]*ir.Node, ncas)
+ casorder := make([]ir.Node, ncas)
nsends, nrecvs := 0, 0
- var init []*ir.Node
+ var init []ir.Node
// generate sel-struct
base.Pos = sellineno
@@ -258,7 +258,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node {
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
- var pc0, pcs *ir.Node
+ var pc0, pcs ir.Node
if base.Flag.Race {
pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
@@ -279,7 +279,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node {
}
var i int
- var c, elem *ir.Node
+ var c, elem ir.Node
switch n.Op() {
default:
base.Fatalf("select %v", n.Op())
@@ -297,7 +297,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node {
casorder[i] = cas
- setField := func(f string, val *ir.Node) {
+ setField := func(f string, val ir.Node) {
r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
r = typecheck(r, ctxStmt)
init = append(init, r)
@@ -340,7 +340,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node {
}
// dispatch cases
- dispatch := func(cond, cas *ir.Node) {
+ dispatch := func(cond, cas ir.Node) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
@@ -370,7 +370,7 @@ func walkselectcases(cases *ir.Nodes) []*ir.Node {
}
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
-func bytePtrToIndex(n *ir.Node, i int64) *ir.Node {
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil)
t := types.NewPtr(types.Types[types.TUINT8])
return convnop(s, t)
@@ -381,7 +381,7 @@ var scase *types.Type
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
- scase = tostruct([]*ir.Node{
+ scase = tostruct([]ir.Node{
namedfield("c", types.Types[types.TUNSAFEPTR]),
namedfield("elem", types.Types[types.TUNSAFEPTR]),
})
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index c0f85a1e33..e30663cfbb 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -14,8 +14,8 @@ import (
)
type InitEntry struct {
- Xoffset int64 // struct, array only
- Expr *ir.Node // bytes of run-time computed expressions
+ Xoffset int64 // struct, array only
+ Expr ir.Node // bytes of run-time computed expressions
}
type InitPlan struct {
@@ -29,18 +29,18 @@ type InitPlan struct {
type InitSchedule struct {
// out is the ordered list of dynamic initialization
// statements.
- out []*ir.Node
+ out []ir.Node
- initplans map[*ir.Node]*InitPlan
- inittemps map[*ir.Node]*ir.Node
+ initplans map[ir.Node]*InitPlan
+ inittemps map[ir.Node]ir.Node
}
-func (s *InitSchedule) append(n *ir.Node) {
+func (s *InitSchedule) append(n ir.Node) {
s.out = append(s.out, n)
}
// staticInit adds an initialization statement n to the schedule.
-func (s *InitSchedule) staticInit(n *ir.Node) {
+func (s *InitSchedule) staticInit(n ir.Node) {
if !s.tryStaticInit(n) {
if base.Flag.Percent != 0 {
ir.Dump("nonstatic", n)
@@ -51,7 +51,7 @@ func (s *InitSchedule) staticInit(n *ir.Node) {
// tryStaticInit attempts to statically execute an initialization
// statement and reports whether it succeeded.
-func (s *InitSchedule) tryStaticInit(n *ir.Node) bool {
+func (s *InitSchedule) tryStaticInit(n ir.Node) bool {
// Only worry about simple "l = r" assignments. Multiple
// variable/expression OAS2 assignments have already been
// replaced by multiple simple OAS assignments, and the other
@@ -70,7 +70,7 @@ func (s *InitSchedule) tryStaticInit(n *ir.Node) bool {
// like staticassign but we are copying an already
// initialized value r.
-func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool {
+func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool {
if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR {
return false
}
@@ -168,7 +168,7 @@ func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool {
return false
}
-func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool {
+func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
for r.Op() == ir.OCONVNOP {
r = r.Left()
}
@@ -289,7 +289,7 @@ func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool {
markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
- var itab *ir.Node
+ var itab ir.Node
if l.Type().IsEmptyInterface() {
itab = typename(val.Type())
} else {
@@ -367,7 +367,7 @@ var statuniqgen int // name generator for static temps
// staticname returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
-func staticname(t *types.Type) *ir.Node {
+func staticname(t *types.Type) ir.Node {
// Don't use lookupN; it interns the resulting string, but these are all unique.
n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
@@ -377,18 +377,18 @@ func staticname(t *types.Type) *ir.Node {
}
// readonlystaticname returns a name backed by a (writable) static data symbol.
-func readonlystaticname(t *types.Type) *ir.Node {
+func readonlystaticname(t *types.Type) ir.Node {
n := staticname(t)
n.MarkReadonly()
n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
return n
}
-func isSimpleName(n *ir.Node) bool {
+func isSimpleName(n ir.Node) bool {
return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
}
-func litas(l *ir.Node, r *ir.Node, init *ir.Nodes) {
+func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
a := ir.Nod(ir.OAS, l, r)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
@@ -405,7 +405,7 @@ const (
// getdyn calculates the initGenType for n.
// If top is false, getdyn is recursing.
-func getdyn(n *ir.Node, top bool) initGenType {
+func getdyn(n ir.Node, top bool) initGenType {
switch n.Op() {
default:
if isGoConst(n) {
@@ -447,7 +447,7 @@ func getdyn(n *ir.Node, top bool) initGenType {
}
// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n *ir.Node) bool {
+func isStaticCompositeLiteral(n ir.Node) bool {
switch n.Op() {
case ir.OSLICELIT:
return false
@@ -509,13 +509,13 @@ const (
// fixedlit handles struct, array, and slice literals.
// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
+func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir.Nodes) {
isBlank := var_ == ir.BlankNode
- var splitnode func(*ir.Node) (a *ir.Node, value *ir.Node)
+ var splitnode func(ir.Node) (a ir.Node, value ir.Node)
switch n.Op() {
case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
- splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) {
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
if r.Op() == ir.OKEY {
k = indexconst(r.Left())
if k < 0 {
@@ -531,7 +531,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *
return a, r
}
case ir.OSTRUCTLIT:
- splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) {
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
if r.Op() != ir.OSTRUCTKEY {
base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
}
@@ -576,7 +576,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *
case initKindStatic:
genAsStatic(a)
case initKindDynamic, initKindLocalCode:
- a = orderStmtInPlace(a, map[string][]*ir.Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
default:
@@ -586,7 +586,7 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *
}
}
-func isSmallSliceLit(n *ir.Node) bool {
+func isSmallSliceLit(n ir.Node) bool {
if n.Op() != ir.OSLICELIT {
return false
}
@@ -596,7 +596,7 @@ func isSmallSliceLit(n *ir.Node) bool {
return smallintconst(r) && (n.Type().Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type().Elem().Width)
}
-func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
+func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
// make an array type corresponding the number of elements we have
t := types.NewArray(n.Type().Elem(), n.Right().Int64Val())
dowidth(t)
@@ -639,7 +639,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
// if the literal contains constants,
// make static initialized array (1),(2)
- var vstat *ir.Node
+ var vstat ir.Node
mode := getdyn(n, true)
if mode&initConst != 0 && !isSmallSliceLit(n) {
@@ -655,7 +655,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
vauto := temp(types.NewPtr(t))
// set auto to point at new temp or heap (3 assign)
- var a *ir.Node
+ var a ir.Node
if x := prealloc[n]; x != nil {
// temp allocated during order.go for dddarg
if !types.Identical(t, x.Type()) {
@@ -745,7 +745,7 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
a = ir.Nod(ir.OAS, a, value)
a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*ir.Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
}
@@ -754,12 +754,12 @@ func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil))
a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*ir.Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
}
-func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) {
+func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
// make the map var
a := ir.Nod(ir.OMAKE, nil, nil)
a.SetEsc(n.Esc())
@@ -866,7 +866,7 @@ func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) {
init.Append(a)
}
-func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
t := n.Type()
switch n.Op() {
default:
@@ -882,7 +882,7 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
base.Fatalf("anylit: not ptr")
}
- var r *ir.Node
+ var r ir.Node
if n.Right() != nil {
// n.Right is stack temporary used as backing store.
init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
@@ -959,7 +959,7 @@ func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
}
}
-func oaslit(n *ir.Node, init *ir.Nodes) bool {
+func oaslit(n ir.Node, init *ir.Nodes) bool {
if n.Left() == nil || n.Right() == nil {
// not a special composite literal assignment
return false
@@ -995,7 +995,7 @@ func oaslit(n *ir.Node, init *ir.Nodes) bool {
return true
}
-func getlit(lit *ir.Node) int {
+func getlit(lit ir.Node) int {
if smallintconst(lit) {
return int(lit.Int64Val())
}
@@ -1003,7 +1003,7 @@ func getlit(lit *ir.Node) int {
}
// stataddr returns the static address of n, if n has one, or else nil.
-func stataddr(n *ir.Node) *ir.Node {
+func stataddr(n ir.Node) ir.Node {
if n == nil {
return nil
}
@@ -1046,7 +1046,7 @@ func stataddr(n *ir.Node) *ir.Node {
return nil
}
-func (s *InitSchedule) initplan(n *ir.Node) {
+func (s *InitSchedule) initplan(n ir.Node) {
if s.initplans[n] != nil {
return
}
@@ -1091,7 +1091,7 @@ func (s *InitSchedule) initplan(n *ir.Node) {
}
}
-func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *ir.Node) {
+func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) {
// special case: zero can be dropped entirely
if isZero(n) {
return
@@ -1113,7 +1113,7 @@ func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *ir.Node) {
p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
}
-func isZero(n *ir.Node) bool {
+func isZero(n ir.Node) bool {
switch n.Op() {
case ir.ONIL:
return true
@@ -1151,11 +1151,11 @@ func isZero(n *ir.Node) bool {
return false
}
-func isvaluelit(n *ir.Node) bool {
+func isvaluelit(n ir.Node) bool {
return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
}
-func genAsStatic(as *ir.Node) {
+func genAsStatic(as ir.Node) {
if as.Left().Type() == nil {
base.Fatalf("genAsStatic as.Left not typechecked")
}
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 262aa0e95c..cb73532b48 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -40,7 +40,7 @@ const ssaDumpFile = "ssa.html"
const maxOpenDefers = 8
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
-var ssaDumpInlined []*ir.Node
+var ssaDumpInlined []ir.Node
func initssaconfig() {
types_ := ssa.NewTypes()
@@ -186,7 +186,7 @@ func initssaconfig() {
// function/method/interface call), where the receiver of a method call is
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
-func getParam(n *ir.Node, i int) *types.Field {
+func getParam(n ir.Node, i int) *types.Field {
t := n.Left().Type()
if n.Op() == ir.OCALLMETH {
if i == 0 {
@@ -289,7 +289,7 @@ func (s *state) emitOpenDeferInfo() {
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn *ir.Node, worker int) *ssa.Func {
+func buildssa(fn ir.Node, worker int) *ssa.Func {
name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
@@ -356,8 +356,8 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func {
// Allocate starting values
s.labels = map[string]*ssaLabel{}
- s.labeledNodes = map[*ir.Node]*ssaLabel{}
- s.fwdVars = map[*ir.Node]*ssa.Value{}
+ s.labeledNodes = map[ir.Node]*ssaLabel{}
+ s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed()
@@ -411,7 +411,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func {
}
// Generate addresses of local declarations
- s.decladdrs = map[*ir.Node]*ssa.Value{}
+ s.decladdrs = map[ir.Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
for _, n := range fn.Func().Dcl {
@@ -478,7 +478,7 @@ func buildssa(fn *ir.Node, worker int) *ssa.Func {
return s.f
}
-func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Node) {
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line())
@@ -566,24 +566,24 @@ func (s *state) updateUnsetPredPos(b *ssa.Block) {
// Information about each open-coded defer.
type openDeferInfo struct {
// The ODEFER node representing the function call of the defer
- n *ir.Node
+ n ir.Node
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
- closureNode *ir.Node
+ closureNode ir.Node
// If defer call is interface call, the address of the argtmp where the
// receiver is stored
rcvr *ssa.Value
// The node representing the argtmp where the receiver is stored
- rcvrNode *ir.Node
+ rcvrNode ir.Node
// The addresses of the argtmps where the evaluated arguments of the defer
// function call are stored.
argVals []*ssa.Value
// The nodes representing the argtmps where the args of the defer are stored
- argNodes []*ir.Node
+ argNodes []ir.Node
}
type state struct {
@@ -594,11 +594,11 @@ type state struct {
f *ssa.Func
// Node for function
- curfn *ir.Node
+ curfn ir.Node
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
- labeledNodes map[*ir.Node]*ssaLabel
+ labeledNodes map[ir.Node]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
@@ -610,18 +610,18 @@ type state struct {
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
- vars map[*ir.Node]*ssa.Value
+ vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
- fwdVars map[*ir.Node]*ssa.Value
+ fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
- defvars []map[*ir.Node]*ssa.Value
+ defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
- decladdrs map[*ir.Node]*ssa.Value
+ decladdrs map[ir.Node]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
@@ -629,7 +629,7 @@ type state struct {
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
- deferBitsTemp *ir.Node
+ deferBitsTemp ir.Node
// line number stack. The current line number is top of stack
line []src.XPos
@@ -641,7 +641,7 @@ type state struct {
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
- returns []*ir.Node
+ returns []ir.Node
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
@@ -693,7 +693,7 @@ func (s *state) Fatalf(msg string, args ...interface{}) {
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
-func ssaMarker(name string) *ir.Node {
+func ssaMarker(name string) ir.Node {
return NewName(&types.Sym{Name: name})
}
@@ -717,7 +717,7 @@ func (s *state) startBlock(b *ssa.Block) {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
- s.vars = map[*ir.Node]*ssa.Value{}
+ s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
@@ -1059,7 +1059,7 @@ func (s *state) stmtList(l ir.Nodes) {
}
// stmt converts the statement n to SSA and adds it to s.
-func (s *state) stmt(n *ir.Node) {
+func (s *state) stmt(n ir.Node) {
if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
s.pushLine(n.Pos())
@@ -1999,7 +1999,7 @@ func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
-func (s *state) expr(n *ir.Node) *ssa.Value {
+func (s *state) expr(n ir.Node) *ssa.Value {
if hasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
@@ -2790,7 +2790,7 @@ func (s *state) expr(n *ir.Node) *ssa.Value {
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
-func (s *state) append(n *ir.Node, inplace bool) *ssa.Value {
+func (s *state) append(n ir.Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
@@ -2948,7 +2948,7 @@ func (s *state) append(n *ir.Node, inplace bool) *ssa.Value {
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
-func (s *state) condBranch(cond *ir.Node, yes, no *ssa.Block, likely int8) {
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
switch cond.Op() {
case ir.OANDAND:
mid := s.f.NewBlock(ssa.BlockPlain)
@@ -3000,7 +3000,7 @@ const (
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
-func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
@@ -3254,7 +3254,7 @@ var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
-type intrinsicBuilder func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value
+type intrinsicBuilder func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
@@ -3319,7 +3319,7 @@ func init() {
/******** runtime ********/
if !instrumenting {
add("runtime", "slicebytetostringtmp",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
@@ -3328,7 +3328,7 @@ func init() {
all...)
}
addF("runtime/internal/math", "MulUintptr",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
@@ -3336,90 +3336,90 @@ func init() {
},
sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
@@ -3427,62 +3427,62 @@ func init() {
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
- type atomicOpEmitter func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType)
+ type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
- return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
@@ -3516,7 +3516,7 @@ func init() {
}
}
- atomicXchgXaddEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
@@ -3529,14 +3529,14 @@ func init() {
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
@@ -3551,28 +3551,28 @@ func init() {
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
- atomicCasEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
@@ -3586,31 +3586,31 @@ func init() {
sys.ARM64)
addF("runtime/internal/atomic", "And8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
- atomicAndOrEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
@@ -3659,52 +3659,52 @@ func init() {
/******** math ********/
addF("math", "Sqrt",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
addF("math", "Copysign",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.Wasm)
addF("math", "FMA",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "FMA",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
@@ -3736,7 +3736,7 @@ func init() {
},
sys.AMD64)
addF("math", "FMA",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
@@ -3769,8 +3769,8 @@ func init() {
},
sys.ARM)
- makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
@@ -3812,17 +3812,17 @@ func init() {
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<16)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
@@ -3830,12 +3830,12 @@ func init() {
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<16)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
@@ -3843,7 +3843,7 @@ func init() {
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<8)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
@@ -3851,12 +3851,12 @@ func init() {
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<8)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
@@ -3868,17 +3868,17 @@ func init() {
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
@@ -3887,7 +3887,7 @@ func init() {
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
@@ -3897,12 +3897,12 @@ func init() {
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
@@ -3912,12 +3912,12 @@ func init() {
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
@@ -3926,27 +3926,27 @@ func init() {
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
@@ -3954,29 +3954,29 @@ func init() {
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
- makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
@@ -4011,7 +4011,7 @@ func init() {
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
@@ -4019,7 +4019,7 @@ func init() {
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
@@ -4027,12 +4027,12 @@ func init() {
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
@@ -4040,25 +4040,25 @@ func init() {
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "Mul64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
addF("math/bits", "Add64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
addF("math/bits", "Sub64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, panicdivide)
@@ -4118,7 +4118,7 @@ func init() {
/******** math/big ********/
add("math/big", "mulWW",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
@@ -4156,7 +4156,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
-func isIntrinsicCall(n *ir.Node) bool {
+func isIntrinsicCall(n ir.Node) bool {
if n == nil || n.Left() == nil {
return false
}
@@ -4164,7 +4164,7 @@ func isIntrinsicCall(n *ir.Node) bool {
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
-func (s *state) intrinsicCall(n *ir.Node) *ssa.Value {
+func (s *state) intrinsicCall(n ir.Node) *ssa.Value {
v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
@@ -4180,9 +4180,9 @@ func (s *state) intrinsicCall(n *ir.Node) *ssa.Value {
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
-func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value {
+func (s *state) intrinsicArgs(n ir.Node) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
- temps := map[*ir.Node]*ssa.Value{}
+ temps := map[ir.Node]*ssa.Value{}
for _, a := range n.List().Slice() {
if a.Op() != ir.OAS {
s.Fatalf("non-assignment as a temp function argument %v", a.Op())
@@ -4215,7 +4215,7 @@ func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value {
// call. We will also record funcdata information on where the args are stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
-func (s *state) openDeferRecord(n *ir.Node) {
+func (s *state) openDeferRecord(n ir.Node) {
// Do any needed expression evaluation for the args (including the
// receiver, if any). This may be evaluating something like 'autotmp_3 =
// once.mutex'. Such a statement will create a mapping in s.vars[] from
@@ -4224,7 +4224,7 @@ func (s *state) openDeferRecord(n *ir.Node) {
s.stmtList(n.List())
var args []*ssa.Value
- var argNodes []*ir.Node
+ var argNodes []ir.Node
opendefer := &openDeferInfo{
n: n,
@@ -4236,7 +4236,7 @@ func (s *state) openDeferRecord(n *ir.Node) {
// call the function directly if it is a static function.
closureVal := s.expr(fn)
closure := s.openDeferSave(nil, fn.Type(), closureVal)
- opendefer.closureNode = closure.Aux.(*ir.Node)
+ opendefer.closureNode = closure.Aux.(ir.Node)
if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) {
opendefer.closure = closure
}
@@ -4249,7 +4249,7 @@ func (s *state) openDeferRecord(n *ir.Node) {
// runtime panic code to use. But in the defer exit code, we will
// call the method directly.
closure := s.openDeferSave(nil, fn.Type(), closureVal)
- opendefer.closureNode = closure.Aux.(*ir.Node)
+ opendefer.closureNode = closure.Aux.(ir.Node)
} else {
if fn.Op() != ir.ODOTINTER {
base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
@@ -4259,8 +4259,8 @@ func (s *state) openDeferRecord(n *ir.Node) {
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*ir.Node)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Node)
+ opendefer.closureNode = opendefer.closure.Aux.(ir.Node)
+ opendefer.rcvrNode = opendefer.rcvr.Aux.(ir.Node)
}
for _, argn := range n.Rlist().Slice() {
var v *ssa.Value
@@ -4270,7 +4270,7 @@ func (s *state) openDeferRecord(n *ir.Node) {
v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*ir.Node))
+ argNodes = append(argNodes, v.Aux.(ir.Node))
}
opendefer.argVals = args
opendefer.argNodes = argNodes
@@ -4292,7 +4292,7 @@ func (s *state) openDeferRecord(n *ir.Node) {
// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n *ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
+func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := canSSAType(t)
var pos src.XPos
if canSSA {
@@ -4476,17 +4476,17 @@ func (s *state) openDeferExit() {
}
}
-func (s *state) callResult(n *ir.Node, k callKind) *ssa.Value {
+func (s *state) callResult(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, false)
}
-func (s *state) callAddr(n *ir.Node, k callKind) *ssa.Value {
+func (s *state) callAddr(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
-func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value {
+func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
@@ -4788,7 +4788,7 @@ func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
}
// getMethodClosure returns a value representing the closure for a method call
-func (s *state) getMethodClosure(fn *ir.Node) *ssa.Value {
+func (s *state) getMethodClosure(fn ir.Node) *ssa.Value {
// Make a name n2 for the function.
// fn.Sym might be sync.(*Mutex).Unlock.
// Make a PFUNC node out of that, then evaluate it.
@@ -4805,7 +4805,7 @@ func (s *state) getMethodClosure(fn *ir.Node) *ssa.Value {
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
-func (s *state) getClosureAndRcvr(fn *ir.Node) (*ssa.Value, *ssa.Value) {
+func (s *state) getClosureAndRcvr(fn ir.Node) (*ssa.Value, *ssa.Value) {
i := s.expr(fn.Left())
itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
@@ -4829,7 +4829,7 @@ func etypesign(e types.EType) int8 {
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
-func (s *state) addr(n *ir.Node) *ssa.Value {
+func (s *state) addr(n ir.Node) *ssa.Value {
if n.Op() != ir.ONAME {
s.pushLine(n.Pos())
defer s.popLine()
@@ -4931,7 +4931,7 @@ func (s *state) addr(n *ir.Node) *ssa.Value {
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
-func (s *state) canSSA(n *ir.Node) bool {
+func (s *state) canSSA(n ir.Node) bool {
if base.Flag.N != 0 {
return false
}
@@ -5012,7 +5012,7 @@ func canSSAType(t *types.Type) bool {
}
// exprPtr evaluates n to a pointer and nil-checks it.
-func (s *state) exprPtr(n *ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
@@ -5151,7 +5151,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
s.startBlock(bNext)
}
-func (s *state) intDivide(n *ir.Node, a, b *ssa.Value) *ssa.Value {
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
@@ -5370,7 +5370,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
-func (s *state) putArg(n *ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
+func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
if !canSSAType(t) {
@@ -5384,7 +5384,7 @@ func (s *state) putArg(n *ir.Node, t *types.Type, off int64, forLateExpandedCall
return ssa.Param{Type: t, Offset: int32(off)}, a
}
-func (s *state) storeArgWithBase(n *ir.Node, t *types.Type, base *ssa.Value, off int64) {
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
@@ -5545,15 +5545,15 @@ var u64_f32 = u642fcvtTab{
one: (*state).constInt64,
}
-func (s *state) uint64Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
-func (s *state) uint64Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
@@ -5626,15 +5626,15 @@ var u32_f32 = u322fcvtTab{
cvtF2F: ssa.OpCvt64Fto32F,
}
-func (s *state) uint32Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
-func (s *state) uint32Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
@@ -5673,7 +5673,7 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *ir.Node, x *ssa.Value, ft,
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
-func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value {
+func (s *state) referenceTypeBuiltin(n ir.Node, x *ssa.Value) *ssa.Value {
if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
@@ -5771,22 +5771,22 @@ var f64_u32 = f2uCvtTab{
cutoff: 1 << 31,
}
-func (s *state) float32ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
-func (s *state) float64ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
-func (s *state) float32ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
-func (s *state) float64ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
@@ -5830,7 +5830,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *ir.Node, x *ssa.Value, ft, tt
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
-func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) {
+func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left()) // input interface
target := s.expr(n.Right()) // target type
byteptr := s.f.Config.Types.BytePtr
@@ -5942,7 +5942,7 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) {
targetITab = s.expr(n.List().First())
}
- var tmp *ir.Node // temporary for use with large types
+ var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !canSSAType(n.Type()) {
// unSSAable type, use temporary.
@@ -6032,7 +6032,7 @@ func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) {
}
// variable returns the value of a variable at the current location.
-func (s *state) variable(name *ir.Node, t *types.Type) *ssa.Value {
+func (s *state) variable(name ir.Node, t *types.Type) *ssa.Value {
v := s.vars[name]
if v != nil {
return v
@@ -6058,7 +6058,7 @@ func (s *state) mem() *ssa.Value {
return s.variable(memVar, types.TypeMem)
}
-func (s *state) addNamedValue(n *ir.Node, v *ssa.Value) {
+func (s *state) addNamedValue(n ir.Node, v *ssa.Value) {
if n.Class() == ir.Pxxx {
// Don't track our marker nodes (memVar etc.).
return
@@ -6111,7 +6111,7 @@ type SSAGenState struct {
bstart []*obj.Prog
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
- ScratchFpMem *ir.Node
+ ScratchFpMem ir.Node
maxarg int64 // largest frame size for arguments to calls made by the function
@@ -6194,14 +6194,14 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
}
// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
-type byXoffset []*ir.Node
+type byXoffset []ir.Node
func (s byXoffset) Len() int { return len(s) }
func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
- var vars []*ir.Node
+ var vars []ir.Node
for _, n := range e.curfn.Func().Dcl {
if livenessShouldTrack(n) && n.Name().Addrtaken() {
vars = append(vars, n)
@@ -6677,7 +6677,7 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
- case *ir.Node:
+ case ir.Node:
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
a.Sym = n.Orig().Sym().Linksym()
@@ -6816,7 +6816,7 @@ func CheckLoweredGetClosurePtr(v *ssa.Value) {
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
-func AutoVar(v *ssa.Value) (*ir.Node, int64) {
+func AutoVar(v *ssa.Value) (ir.Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
@@ -6927,7 +6927,7 @@ func (s *SSAGenState) UseArgs(n int64) {
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
-func fieldIdx(n *ir.Node) int {
+func fieldIdx(n ir.Node) int {
t := n.Left().Type()
f := n.Sym()
if !t.IsStruct() {
@@ -6954,9 +6954,9 @@ func fieldIdx(n *ir.Node) int {
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
- curfn *ir.Node
+ curfn ir.Node
strings map[string]*obj.LSym // map from constant string to data symbols
- scratchFpMem *ir.Node // temp for floating point register / memory moves on some architectures
+ scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
@@ -6976,7 +6976,7 @@ func (e *ssafn) StringData(s string) *obj.LSym {
return data
}
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Node {
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) ir.Node {
n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
@@ -7148,7 +7148,7 @@ func (e *ssafn) MyImportPath() string {
return base.Ctxt.Pkgpath
}
-func clobberBase(n *ir.Node) *ir.Node {
+func clobberBase(n ir.Node) ir.Node {
if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 {
return clobberBase(n.Left())
}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 542dc49bb0..fcda219737 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -40,7 +40,7 @@ var (
// It's primarily used to distinguish references to named objects,
// whose Pos will point back to their declaration position rather than
// their usage position.
-func hasUniquePos(n *ir.Node) bool {
+func hasUniquePos(n ir.Node) bool {
switch n.Op() {
case ir.ONAME, ir.OPACK:
return false
@@ -60,7 +60,7 @@ func hasUniquePos(n *ir.Node) bool {
return true
}
-func setlineno(n *ir.Node) src.XPos {
+func setlineno(n ir.Node) src.XPos {
lno := base.Pos
if n != nil && hasUniquePos(n) {
base.Pos = n.Pos()
@@ -102,7 +102,7 @@ func autolabel(prefix string) *types.Sym {
// find all the exported symbols in package opkg
// and make them available in the current package
-func importdot(opkg *types.Pkg, pack *ir.Node) {
+func importdot(opkg *types.Pkg, pack ir.Node) {
n := 0
for _, s := range opkg.Syms {
if s.Def == nil {
@@ -136,7 +136,7 @@ func importdot(opkg *types.Pkg, pack *ir.Node) {
}
// newname returns a new ONAME Node associated with symbol s.
-func NewName(s *types.Sym) *ir.Node {
+func NewName(s *types.Sym) ir.Node {
n := ir.NewNameAt(base.Pos, s)
n.Name().Curfn = Curfn
return n
@@ -144,13 +144,13 @@ func NewName(s *types.Sym) *ir.Node {
// nodSym makes a Node with Op op and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
+func nodSym(op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
return nodlSym(base.Pos, op, left, sym)
}
// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodlSym(pos src.XPos, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
+func nodlSym(pos src.XPos, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
n := ir.NodAt(pos, op, left, nil)
n.SetSym(sym)
return n
@@ -163,21 +163,21 @@ func (x methcmp) Len() int { return len(x) }
func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-func nodintconst(v int64) *ir.Node {
+func nodintconst(v int64) ir.Node {
return ir.NewLiteral(constant.MakeInt64(v))
}
-func nodnil() *ir.Node {
+func nodnil() ir.Node {
n := ir.Nod(ir.ONIL, nil, nil)
n.SetType(types.Types[types.TNIL])
return n
}
-func nodbool(b bool) *ir.Node {
+func nodbool(b bool) ir.Node {
return ir.NewLiteral(constant.MakeBool(b))
}
-func nodstr(s string) *ir.Node {
+func nodstr(s string) ir.Node {
return ir.NewLiteral(constant.MakeString(s))
}
@@ -185,7 +185,7 @@ func nodstr(s string) *ir.Node {
// ONAME, OLITERAL, OTYPE, and ONONAME leaves.
// If pos.IsKnown(), it sets the source position of newly
// allocated nodes to pos.
-func treecopy(n *ir.Node, pos src.XPos) *ir.Node {
+func treecopy(n ir.Node, pos src.XPos) ir.Node {
if n == nil {
return nil
}
@@ -511,12 +511,12 @@ func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
return ir.OXXX, ""
}
-func assignconv(n *ir.Node, t *types.Type, context string) *ir.Node {
+func assignconv(n ir.Node, t *types.Type, context string) ir.Node {
return assignconvfn(n, t, func() string { return context })
}
// Convert node n for assignment to type t.
-func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node {
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
if n == nil || n.Type() == nil || n.Type().Broke() {
return n
}
@@ -565,7 +565,7 @@ func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node {
// backingArrayPtrLen extracts the pointer and length from a slice or string.
// This constructs two nodes referring to n, so n must be a cheapexpr.
-func backingArrayPtrLen(n *ir.Node) (ptr, len *ir.Node) {
+func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) {
var init ir.Nodes
c := cheapexpr(n, &init)
if c != n || init.Len() != 0 {
@@ -584,7 +584,7 @@ func backingArrayPtrLen(n *ir.Node) (ptr, len *ir.Node) {
// labeledControl returns the control flow Node (for, switch, select)
// associated with the label n, if any.
-func labeledControl(n *ir.Node) *ir.Node {
+func labeledControl(n ir.Node) ir.Node {
if n.Op() != ir.OLABEL {
base.Fatalf("labeledControl %v", n.Op())
}
@@ -599,7 +599,7 @@ func labeledControl(n *ir.Node) *ir.Node {
return nil
}
-func syslook(name string) *ir.Node {
+func syslook(name string) ir.Node {
s := Runtimepkg.Lookup(name)
if s == nil || s.Def == nil {
base.Fatalf("syslook: can't find runtime.%s", name)
@@ -618,14 +618,14 @@ func typehash(t *types.Type) uint32 {
// updateHasCall checks whether expression n contains any function
// calls and sets the n.HasCall flag if so.
-func updateHasCall(n *ir.Node) {
+func updateHasCall(n ir.Node) {
if n == nil {
return
}
n.SetHasCall(calcHasCall(n))
}
-func calcHasCall(n *ir.Node) bool {
+func calcHasCall(n ir.Node) bool {
if n.Init().Len() != 0 {
// TODO(mdempsky): This seems overly conservative.
return true
@@ -740,7 +740,7 @@ func brrev(op ir.Op) ir.Op {
// return side effect-free n, appending side effects to init.
// result is assignable if n is.
-func safeexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return nil
}
@@ -800,7 +800,7 @@ func safeexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
return cheapexpr(n, init)
}
-func copyexpr(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node {
+func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
l := temp(t)
a := ir.Nod(ir.OAS, l, n)
a = typecheck(a, ctxStmt)
@@ -811,7 +811,7 @@ func copyexpr(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node {
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
-func cheapexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
@@ -957,7 +957,7 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (
// find missing fields that
// will give shortest unique addressing.
// modify the tree with missing type names.
-func adddot(n *ir.Node) *ir.Node {
+func adddot(n ir.Node) ir.Node {
n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
if n.Left().Diag() {
n.SetDiag(true)
@@ -1116,8 +1116,8 @@ func expandmeth(t *types.Type) {
}
// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl *types.Type, mustname bool) []*ir.Node {
- var args []*ir.Node
+func structargs(tl *types.Type, mustname bool) []ir.Node {
+ var args []ir.Node
gen := 0
for _, t := range tl.Fields().Slice() {
s := t.Sym
@@ -1250,30 +1250,30 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
inlcalls(fn)
}
- escapeFuncs([]*ir.Node{fn}, false)
+ escapeFuncs([]ir.Node{fn}, false)
Curfn = nil
xtop = append(xtop, fn)
}
-func paramNnames(ft *types.Type) []*ir.Node {
- args := make([]*ir.Node, ft.NumParams())
+func paramNnames(ft *types.Type) []ir.Node {
+ args := make([]ir.Node, ft.NumParams())
for i, f := range ft.Params().FieldSlice() {
args[i] = ir.AsNode(f.Nname)
}
return args
}
-func hashmem(t *types.Type) *ir.Node {
+func hashmem(t *types.Type) ir.Node {
sym := Runtimepkg.Lookup("memhash")
n := NewName(sym)
setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[types.TUINTPTR]),
anonfield(types.Types[types.TUINTPTR]),
- }, []*ir.Node{
+ }, []ir.Node{
anonfield(types.Types[types.TUINTPTR]),
}))
return n
@@ -1393,15 +1393,15 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
return true
}
-func listtreecopy(l []*ir.Node, pos src.XPos) []*ir.Node {
- var out []*ir.Node
+func listtreecopy(l []ir.Node, pos src.XPos) []ir.Node {
+ var out []ir.Node
for _, n := range l {
out = append(out, treecopy(n, pos))
}
return out
}
-func liststmt(l []*ir.Node) *ir.Node {
+func liststmt(l []ir.Node) ir.Node {
n := ir.Nod(ir.OBLOCK, nil, nil)
n.PtrList().Set(l)
if len(l) != 0 {
@@ -1410,7 +1410,7 @@ func liststmt(l []*ir.Node) *ir.Node {
return n
}
-func ngotype(n *ir.Node) *types.Sym {
+func ngotype(n ir.Node) *types.Sym {
if n.Type() != nil {
return typenamesym(n.Type())
}
@@ -1419,7 +1419,7 @@ func ngotype(n *ir.Node) *types.Sym {
// The result of addinit MUST be assigned back to n, e.g.
// n.Left = addinit(n.Left, init)
-func addinit(n *ir.Node, init []*ir.Node) *ir.Node {
+func addinit(n ir.Node, init []ir.Node) ir.Node {
if len(init) == 0 {
return n
}
@@ -1518,7 +1518,7 @@ func isdirectiface(t *types.Type) bool {
}
// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab *ir.Node) *ir.Node {
+func itabType(itab ir.Node) ir.Node {
typ := nodSym(ir.ODOTPTR, itab, nil)
typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
typ.SetTypecheck(1)
@@ -1530,7 +1530,7 @@ func itabType(itab *ir.Node) *ir.Node {
// ifaceData loads the data field from an interface.
// The concrete type must be known to have type t.
// It follows the pointer if !isdirectiface(t).
-func ifaceData(pos src.XPos, n *ir.Node, t *types.Type) *ir.Node {
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
if t.IsInterface() {
base.Fatalf("ifaceData interface: %v", t)
}
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
index c85483fafa..02d38ac4b1 100644
--- a/src/cmd/compile/internal/gc/swt.go
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -15,7 +15,7 @@ import (
)
// typecheckswitch typechecks a switch statement.
-func typecheckswitch(n *ir.Node) {
+func typecheckswitch(n ir.Node) {
typecheckslice(n.Init().Slice(), ctxStmt)
if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
@@ -24,7 +24,7 @@ func typecheckswitch(n *ir.Node) {
}
}
-func typecheckTypeSwitch(n *ir.Node) {
+func typecheckTypeSwitch(n ir.Node) {
n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr))
t := n.Left().Right().Type()
if t != nil && !t.IsInterface() {
@@ -39,7 +39,7 @@ func typecheckTypeSwitch(n *ir.Node) {
base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
- var defCase, nilCase *ir.Node
+ var defCase, nilCase ir.Node
var ts typeSet
for _, ncase := range n.List().Slice() {
ls := ncase.List().Slice()
@@ -144,7 +144,7 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) {
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
-func typecheckExprSwitch(n *ir.Node) {
+func typecheckExprSwitch(n ir.Node) {
t := types.Types[types.TBOOL]
if n.Left() != nil {
n.SetLeft(typecheck(n.Left(), ctxExpr))
@@ -172,7 +172,7 @@ func typecheckExprSwitch(n *ir.Node) {
}
}
- var defCase *ir.Node
+ var defCase ir.Node
var cs constSet
for _, ncase := range n.List().Slice() {
ls := ncase.List().Slice()
@@ -225,7 +225,7 @@ func typecheckExprSwitch(n *ir.Node) {
}
// walkswitch walks a switch statement.
-func walkswitch(sw *ir.Node) {
+func walkswitch(sw ir.Node) {
// Guard against double walk, see #25776.
if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
@@ -240,7 +240,7 @@ func walkswitch(sw *ir.Node) {
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
-func walkExprSwitch(sw *ir.Node) {
+func walkExprSwitch(sw ir.Node) {
lno := setlineno(sw)
cond := sw.Left()
@@ -275,7 +275,7 @@ func walkExprSwitch(sw *ir.Node) {
exprname: cond,
}
- var defaultGoto *ir.Node
+ var defaultGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
label := autolabel(".s")
@@ -318,7 +318,7 @@ func walkExprSwitch(sw *ir.Node) {
// An exprSwitch walks an expression switch.
type exprSwitch struct {
- exprname *ir.Node // value being switched on
+ exprname ir.Node // value being switched on
done ir.Nodes
clauses []exprClause
@@ -326,11 +326,11 @@ type exprSwitch struct {
type exprClause struct {
pos src.XPos
- lo, hi *ir.Node
- jmp *ir.Node
+ lo, hi ir.Node
+ jmp ir.Node
}
-func (s *exprSwitch) Add(pos src.XPos, expr, jmp *ir.Node) {
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
@@ -390,10 +390,10 @@ func (s *exprSwitch) flush() {
// Perform two-level binary search.
binarySearch(len(runs), &s.done,
- func(i int) *ir.Node {
+ func(i int) ir.Node {
return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
},
- func(i int, nif *ir.Node) {
+ func(i int, nif ir.Node) {
run := runs[i]
nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
s.search(run, nif.PtrBody())
@@ -425,10 +425,10 @@ func (s *exprSwitch) flush() {
func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
binarySearch(len(cc), out,
- func(i int) *ir.Node {
+ func(i int) ir.Node {
return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
},
- func(i int, nif *ir.Node) {
+ func(i int, nif ir.Node) {
c := &cc[i]
nif.SetLeft(c.test(s.exprname))
nif.PtrBody().Set1(c.jmp)
@@ -436,7 +436,7 @@ func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
)
}
-func (c *exprClause) test(exprname *ir.Node) *ir.Node {
+func (c *exprClause) test(exprname ir.Node) ir.Node {
// Integer range.
if c.hi != c.lo {
low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo)
@@ -456,7 +456,7 @@ func (c *exprClause) test(exprname *ir.Node) *ir.Node {
return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
}
-func allCaseExprsAreSideEffectFree(sw *ir.Node) bool {
+func allCaseExprsAreSideEffectFree(sw ir.Node) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
@@ -478,7 +478,7 @@ func allCaseExprsAreSideEffectFree(sw *ir.Node) bool {
}
// hasFall reports whether stmts ends with a "fallthrough" statement.
-func hasFall(stmts []*ir.Node) (bool, src.XPos) {
+func hasFall(stmts []ir.Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
@@ -497,7 +497,7 @@ func hasFall(stmts []*ir.Node) (bool, src.XPos) {
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
-func walkTypeSwitch(sw *ir.Node) {
+func walkTypeSwitch(sw ir.Node) {
var s typeSwitch
s.facename = sw.Left().Right()
sw.SetLeft(nil)
@@ -538,10 +538,10 @@ func walkTypeSwitch(sw *ir.Node) {
s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
br := ir.Nod(ir.OBREAK, nil, nil)
- var defaultGoto, nilGoto *ir.Node
+ var defaultGoto, nilGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
- var caseVar *ir.Node
+ var caseVar ir.Node
if ncase.Rlist().Len() != 0 {
caseVar = ncase.Rlist().First()
}
@@ -592,7 +592,7 @@ func walkTypeSwitch(sw *ir.Node) {
}
val = ifaceData(ncase.Pos(), s.facename, singleType)
}
- l := []*ir.Node{
+ l := []ir.Node{
ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
}
@@ -622,9 +622,9 @@ func walkTypeSwitch(sw *ir.Node) {
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
- facename *ir.Node // value being type-switched on
- hashname *ir.Node // type hash of the value being type-switched on
- okname *ir.Node // boolean used for comma-ok type assertions
+ facename ir.Node // value being type-switched on
+ hashname ir.Node // type hash of the value being type-switched on
+ okname ir.Node // boolean used for comma-ok type assertions
done ir.Nodes
clauses []typeClause
@@ -635,10 +635,10 @@ type typeClause struct {
body ir.Nodes
}
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *ir.Node) {
+func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
var body ir.Nodes
if caseVar != nil {
- l := []*ir.Node{
+ l := []ir.Node{
ir.NodAt(pos, ir.ODCL, caseVar, nil),
ir.NodAt(pos, ir.OAS, caseVar, nil),
}
@@ -703,10 +703,10 @@ func (s *typeSwitch) flush() {
cc = merged
binarySearch(len(cc), &s.done,
- func(i int) *ir.Node {
+ func(i int) ir.Node {
return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
- func(i int, nif *ir.Node) {
+ func(i int, nif ir.Node) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
@@ -725,7 +725,7 @@ func (s *typeSwitch) flush() {
//
// leaf(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
-func binarySearch(n int, out *ir.Nodes, less func(i int) *ir.Node, leaf func(i int, nif *ir.Node)) {
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif ir.Node)) {
const binarySearchMin = 4 // minimum number of cases for binary search
var do func(lo, hi int, out *ir.Nodes)
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index 0559dabe32..4e2f205312 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -20,7 +20,7 @@ const enableTrace = false
var traceIndent []byte
var skipDowidthForTracing bool
-func tracePrint(title string, n *ir.Node) func(np **ir.Node) {
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
indent := traceIndent
// guard against nil
@@ -37,7 +37,7 @@ func tracePrint(title string, n *ir.Node) func(np **ir.Node) {
fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
traceIndent = append(traceIndent, ". "...)
- return func(np **ir.Node) {
+ return func(np *ir.Node) {
traceIndent = traceIndent[:len(traceIndent)-2]
// if we have a result, use that
@@ -77,10 +77,10 @@ const (
// marks variables that escape the local frame.
// rewrites n.Op to be more specific in some cases.
-var typecheckdefstack []*ir.Node
+var typecheckdefstack []ir.Node
// resolve ONONAME to definition, if any.
-func resolve(n *ir.Node) (res *ir.Node) {
+func resolve(n ir.Node) (res ir.Node) {
if n == nil || n.Op() != ir.ONONAME {
return n
}
@@ -115,7 +115,7 @@ func resolve(n *ir.Node) (res *ir.Node) {
return r
}
-func typecheckslice(l []*ir.Node, top int) {
+func typecheckslice(l []ir.Node, top int) {
for i := range l {
l[i] = typecheck(l[i], top)
}
@@ -166,7 +166,7 @@ func typekind(t *types.Type) string {
return fmt.Sprintf("etype=%d", et)
}
-func cycleFor(start *ir.Node) []*ir.Node {
+func cycleFor(start ir.Node) []ir.Node {
// Find the start node in typecheck_tcstack.
// We know that it must exist because each time we mark
// a node with n.SetTypecheck(2) we push it on the stack,
@@ -179,7 +179,7 @@ func cycleFor(start *ir.Node) []*ir.Node {
}
// collect all nodes with same Op
- var cycle []*ir.Node
+ var cycle []ir.Node
for _, n := range typecheck_tcstack[i:] {
if n.Op() == start.Op() {
cycle = append(cycle, n)
@@ -189,7 +189,7 @@ func cycleFor(start *ir.Node) []*ir.Node {
return cycle
}
-func cycleTrace(cycle []*ir.Node) string {
+func cycleTrace(cycle []ir.Node) string {
var s string
for i, n := range cycle {
s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
@@ -197,12 +197,12 @@ func cycleTrace(cycle []*ir.Node) string {
return s
}
-var typecheck_tcstack []*ir.Node
+var typecheck_tcstack []ir.Node
// typecheck type checks node n.
// The result of typecheck MUST be assigned back to n, e.g.
// n.Left = typecheck(n.Left, top)
-func typecheck(n *ir.Node, top int) (res *ir.Node) {
+func typecheck(n ir.Node, top int) (res ir.Node) {
// cannot type check until all the source has been parsed
if !typecheckok {
base.Fatalf("early typecheck")
@@ -317,7 +317,7 @@ func typecheck(n *ir.Node, top int) (res *ir.Node) {
// value of type int (see also checkmake for comparison).
// The result of indexlit MUST be assigned back to n, e.g.
// n.Left = indexlit(n.Left)
-func indexlit(n *ir.Node) *ir.Node {
+func indexlit(n ir.Node) ir.Node {
if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL {
return defaultlit(n, types.Types[types.TINT])
}
@@ -326,7 +326,7 @@ func indexlit(n *ir.Node) *ir.Node {
// The result of typecheck1 MUST be assigned back to n, e.g.
// n.Left = typecheck1(n.Left, top)
-func typecheck1(n *ir.Node, top int) (res *ir.Node) {
+func typecheck1(n ir.Node, top int) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheck1", n)(&res)
}
@@ -569,9 +569,9 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) {
ir.OOROR,
ir.OSUB,
ir.OXOR:
- var l *ir.Node
+ var l ir.Node
var op ir.Op
- var r *ir.Node
+ var r ir.Node
if n.Op() == ir.OASOP {
ok |= ctxStmt
n.SetLeft(typecheck(n.Left(), ctxExpr))
@@ -1762,7 +1762,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) {
l = args[i]
i++
l = typecheck(l, ctxExpr)
- var r *ir.Node
+ var r ir.Node
if i < len(args) {
r = args[i]
i++
@@ -2129,7 +2129,7 @@ func typecheck1(n *ir.Node, top int) (res *ir.Node) {
return n
}
-func typecheckargs(n *ir.Node) {
+func typecheckargs(n ir.Node) {
if n.List().Len() != 1 || n.IsDDD() {
typecheckslice(n.List().Slice(), ctxExpr)
return
@@ -2174,7 +2174,7 @@ func typecheckargs(n *ir.Node) {
n.PtrInit().Append(as)
}
-func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool {
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
t := r.Type()
if t == nil {
return false
@@ -2204,7 +2204,7 @@ func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool {
return true
}
-func checksliceconst(lo *ir.Node, hi *ir.Node) bool {
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
base.Errorf("invalid slice index: %v > %v", lo, hi)
return false
@@ -2213,7 +2213,7 @@ func checksliceconst(lo *ir.Node, hi *ir.Node) bool {
return true
}
-func checkdefergo(n *ir.Node) {
+func checkdefergo(n ir.Node) {
what := "defer"
if n.Op() == ir.OGO {
what = "go"
@@ -2268,7 +2268,7 @@ func checkdefergo(n *ir.Node) {
// The result of implicitstar MUST be assigned back to n, e.g.
// n.Left = implicitstar(n.Left)
-func implicitstar(n *ir.Node) *ir.Node {
+func implicitstar(n ir.Node) ir.Node {
// insert implicit * if needed for fixed array
t := n.Type()
if t == nil || !t.IsPtr() {
@@ -2287,7 +2287,7 @@ func implicitstar(n *ir.Node) *ir.Node {
return n
}
-func onearg(n *ir.Node, f string, args ...interface{}) bool {
+func onearg(n ir.Node, f string, args ...interface{}) bool {
if n.Left() != nil {
return true
}
@@ -2310,7 +2310,7 @@ func onearg(n *ir.Node, f string, args ...interface{}) bool {
return true
}
-func twoarg(n *ir.Node) bool {
+func twoarg(n ir.Node) bool {
if n.Left() != nil {
return true
}
@@ -2328,7 +2328,7 @@ func twoarg(n *ir.Node) bool {
return true
}
-func lookdot1(errnode *ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
var r *types.Field
for _, f := range fs.Slice() {
if dostrcmp != 0 && f.Sym.Name == s.Name {
@@ -2359,7 +2359,7 @@ func lookdot1(errnode *ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, d
// typecheckMethodExpr checks selector expressions (ODOT) where the
// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *ir.Node) (res *ir.Node) {
+func typecheckMethodExpr(n ir.Node) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckMethodExpr", n)(&res)
}
@@ -2447,7 +2447,7 @@ func derefall(t *types.Type) *types.Type {
return t
}
-func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field {
+func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
s := n.Sym()
dowidth(t)
@@ -2572,7 +2572,7 @@ func hasddd(t *types.Type) bool {
}
// typecheck assignment: type list = expression list
-func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
var t *types.Type
var i int
@@ -2583,7 +2583,7 @@ func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl
return
}
- var n *ir.Node
+ var n ir.Node
if nl.Len() == 1 {
n = nl.First()
}
@@ -2774,7 +2774,7 @@ func iscomptype(t *types.Type) bool {
// pushtype adds elided type information for composite literals if
// appropriate, and returns the resulting expression.
-func pushtype(n *ir.Node, t *types.Type) *ir.Node {
+func pushtype(n ir.Node, t *types.Type) ir.Node {
if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil {
return n
}
@@ -2797,7 +2797,7 @@ func pushtype(n *ir.Node, t *types.Type) *ir.Node {
// The result of typecheckcomplit MUST be assigned back to n, e.g.
// n.Left = typecheckcomplit(n.Left)
-func typecheckcomplit(n *ir.Node) (res *ir.Node) {
+func typecheckcomplit(n ir.Node) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckcomplit", n)(&res)
}
@@ -3008,7 +3008,7 @@ func typecheckcomplit(n *ir.Node) (res *ir.Node) {
}
// typecheckarraylit type-checks a sequence of slice/array literal elements.
-func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx string) int64 {
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
// If there are key/value pairs, create a map to keep seen
// keys so we can check for duplicate indices.
var indices map[int64]bool
@@ -3023,7 +3023,7 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx s
for i, elt := range elts {
setlineno(elt)
r := elts[i]
- var kv *ir.Node
+ var kv ir.Node
if elt.Op() == ir.OKEY {
elt.SetLeft(typecheck(elt.Left(), ctxExpr))
key = indexconst(elt.Left())
@@ -3086,7 +3086,7 @@ func nonexported(sym *types.Sym) bool {
}
// lvalue etc
-func islvalue(n *ir.Node) bool {
+func islvalue(n ir.Node) bool {
switch n.Op() {
case ir.OINDEX:
if n.Left().Type() != nil && n.Left().Type().IsArray() {
@@ -3112,13 +3112,13 @@ func islvalue(n *ir.Node) bool {
return false
}
-func checklvalue(n *ir.Node, verb string) {
+func checklvalue(n ir.Node, verb string) {
if !islvalue(n) {
base.Errorf("cannot %s %v", verb, n)
}
}
-func checkassign(stmt *ir.Node, n *ir.Node) {
+func checkassign(stmt ir.Node, n ir.Node) {
// Variables declared in ORANGE are assigned on every iteration.
if n.Name() == nil || n.Name().Defn != stmt || stmt.Op() == ir.ORANGE {
r := outervalue(n)
@@ -3156,7 +3156,7 @@ func checkassign(stmt *ir.Node, n *ir.Node) {
n.SetType(nil)
}
-func checkassignlist(stmt *ir.Node, l ir.Nodes) {
+func checkassignlist(stmt ir.Node, l ir.Nodes) {
for _, n := range l.Slice() {
checkassign(stmt, n)
}
@@ -3177,7 +3177,7 @@ func checkassignlist(stmt *ir.Node, l ir.Nodes) {
// currently OK, since the only place samesafeexpr gets used on an
// lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings.
-func samesafeexpr(l *ir.Node, r *ir.Node) bool {
+func samesafeexpr(l ir.Node, r ir.Node) bool {
if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
return false
}
@@ -3215,7 +3215,7 @@ func samesafeexpr(l *ir.Node, r *ir.Node) bool {
// type check assignment.
// if this assignment is the definition of a var on the left side,
// fill in the var's type.
-func typecheckas(n *ir.Node) {
+func typecheckas(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas", n)(nil)
}
@@ -3266,14 +3266,14 @@ func typecheckas(n *ir.Node) {
}
}
-func checkassignto(src *types.Type, dst *ir.Node) {
+func checkassignto(src *types.Type, dst ir.Node) {
if op, why := assignop(src, dst.Type()); op == ir.OXXX {
base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
}
-func typecheckas2(n *ir.Node) {
+func typecheckas2(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas2", n)(nil)
}
@@ -3298,8 +3298,8 @@ func typecheckas2(n *ir.Node) {
}
checkassignlist(n, n.List())
- var l *ir.Node
- var r *ir.Node
+ var l ir.Node
+ var r ir.Node
if cl == cr {
// easy
ls := n.List().Slice()
@@ -3406,7 +3406,7 @@ out:
}
// type check function definition
-func typecheckfunc(n *ir.Node) {
+func typecheckfunc(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckfunc", n)(nil)
}
@@ -3441,12 +3441,12 @@ func typecheckfunc(n *ir.Node) {
// The result of stringtoruneslit MUST be assigned back to n, e.g.
// n.Left = stringtoruneslit(n.Left)
-func stringtoruneslit(n *ir.Node) *ir.Node {
+func stringtoruneslit(n ir.Node) ir.Node {
if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String {
base.Fatalf("stringtoarraylit %v", n)
}
- var l []*ir.Node
+ var l []ir.Node
i := 0
for _, r := range n.Left().StringVal() {
l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
@@ -3459,7 +3459,7 @@ func stringtoruneslit(n *ir.Node) *ir.Node {
return nn
}
-var mapqueue []*ir.Node
+var mapqueue []ir.Node
func checkMapKeys() {
for _, n := range mapqueue {
@@ -3520,7 +3520,7 @@ func setUnderlying(t, underlying *types.Type) {
}
}
-func typecheckdeftype(n *ir.Node) {
+func typecheckdeftype(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdeftype", n)(nil)
}
@@ -3540,7 +3540,7 @@ func typecheckdeftype(n *ir.Node) {
}
}
-func typecheckdef(n *ir.Node) {
+func typecheckdef(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdef", n)(nil)
}
@@ -3727,7 +3727,7 @@ ret:
n.SetWalkdef(1)
}
-func checkmake(t *types.Type, arg string, np **ir.Node) bool {
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
n := *np
if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
@@ -3759,7 +3759,7 @@ func checkmake(t *types.Type, arg string, np **ir.Node) bool {
return true
}
-func markbreak(n *ir.Node, implicit *ir.Node) {
+func markbreak(n ir.Node, implicit ir.Node) {
if n == nil {
return
}
@@ -3789,7 +3789,7 @@ func markbreak(n *ir.Node, implicit *ir.Node) {
}
}
-func markbreaklist(l ir.Nodes, implicit *ir.Node) {
+func markbreaklist(l ir.Nodes, implicit ir.Node) {
s := l.Slice()
for i := 0; i < len(s); i++ {
n := s[i]
@@ -3823,7 +3823,7 @@ func isTermNodes(l ir.Nodes) bool {
// Isterminating reports whether the node n, the last one in a
// statement list, is a terminating statement.
-func isTermNode(n *ir.Node) bool {
+func isTermNode(n ir.Node) bool {
switch n.Op() {
// NOTE: OLABEL is treated as a separate statement,
// not a separate prefix, so skipping to the last statement
@@ -3872,7 +3872,7 @@ func isTermNode(n *ir.Node) bool {
}
// checkreturn makes sure that fn terminates appropriately.
-func checkreturn(fn *ir.Node) {
+func checkreturn(fn ir.Node) {
if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 {
markbreaklist(fn.Body(), nil)
if !isTermNodes(fn.Body()) {
@@ -3881,12 +3881,12 @@ func checkreturn(fn *ir.Node) {
}
}
-func deadcode(fn *ir.Node) {
+func deadcode(fn ir.Node) {
deadcodeslice(fn.PtrBody())
deadcodefn(fn)
}
-func deadcodefn(fn *ir.Node) {
+func deadcodefn(fn ir.Node) {
if fn.Body().Len() == 0 {
return
}
@@ -3909,7 +3909,7 @@ func deadcodefn(fn *ir.Node) {
}
}
- fn.PtrBody().Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)})
+ fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OEMPTY, nil, nil)})
}
func deadcodeslice(nn *ir.Nodes) {
@@ -3965,7 +3965,7 @@ func deadcodeslice(nn *ir.Nodes) {
}
}
-func deadcodeexpr(n *ir.Node) *ir.Node {
+func deadcodeexpr(n ir.Node) ir.Node {
// Perform dead-code elimination on short-circuited boolean
// expressions involving constants with the intent of
// producing a constant 'if' condition.
@@ -3995,7 +3995,7 @@ func deadcodeexpr(n *ir.Node) *ir.Node {
}
// setTypeNode sets n to an OTYPE node representing t.
-func setTypeNode(n *ir.Node, t *types.Type) {
+func setTypeNode(n ir.Node, t *types.Type) {
n.SetOp(ir.OTYPE)
n.SetType(t)
n.Type().Nod = n
@@ -4037,12 +4037,12 @@ func curpkg() *types.Pkg {
// MethodName returns the ONAME representing the method
// referenced by expression n, which must be a method selector,
// method expression, or method value.
-func methodExprName(n *ir.Node) *ir.Node {
+func methodExprName(n ir.Node) ir.Node {
return ir.AsNode(methodExprFunc(n).Nname)
}
// MethodFunc is like MethodName, but returns the types.Field instead.
-func methodExprFunc(n *ir.Node) *types.Field {
+func methodExprFunc(n ir.Node) *types.Field {
switch n.Op() {
case ir.ODOTMETH, ir.OMETHEXPR:
return n.Opt().(*types.Field)
diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go
index c9b0dbcf2f..678924b229 100644
--- a/src/cmd/compile/internal/gc/unsafe.go
+++ b/src/cmd/compile/internal/gc/unsafe.go
@@ -10,7 +10,7 @@ import (
)
// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n *ir.Node) int64 {
+func evalunsafe(n ir.Node) int64 {
switch n.Op() {
case ir.OALIGNOF, ir.OSIZEOF:
n.SetLeft(typecheck(n.Left(), ctxExpr))
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 77cf59bde8..db8791ee05 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -22,7 +22,7 @@ import (
const tmpstringbufsize = 32
const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-func walk(fn *ir.Node) {
+func walk(fn ir.Node) {
Curfn = fn
errorsBefore := base.Errors()
@@ -81,13 +81,13 @@ func walk(fn *ir.Node) {
}
}
-func walkstmtlist(s []*ir.Node) {
+func walkstmtlist(s []ir.Node) {
for i := range s {
s[i] = walkstmt(s[i])
}
}
-func paramoutheap(fn *ir.Node) bool {
+func paramoutheap(fn ir.Node) bool {
for _, ln := range fn.Func().Dcl {
switch ln.Class() {
case ir.PPARAMOUT:
@@ -106,7 +106,7 @@ func paramoutheap(fn *ir.Node) bool {
// The result of walkstmt MUST be assigned back to n, e.g.
// n.Left = walkstmt(n.Left)
-func walkstmt(n *ir.Node) *ir.Node {
+func walkstmt(n ir.Node) ir.Node {
if n == nil {
return n
}
@@ -275,7 +275,7 @@ func walkstmt(n *ir.Node) *ir.Node {
if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
- var rl []*ir.Node
+ var rl []ir.Node
for _, ln := range Curfn.Func().Dcl {
cl := ln.Class()
@@ -308,7 +308,7 @@ func walkstmt(n *ir.Node) *ir.Node {
// For each return parameter (lhs), assign the corresponding result (rhs).
lhs := Curfn.Type().Results()
rhs := n.List().Slice()
- res := make([]*ir.Node, lhs.NumFields())
+ res := make([]ir.Node, lhs.NumFields())
for i, nl := range lhs.FieldSlice() {
nname := ir.AsNode(nl.Nname)
if isParamHeapCopy(nname) {
@@ -346,20 +346,20 @@ func walkstmt(n *ir.Node) *ir.Node {
// the types expressions are calculated.
// compile-time constants are evaluated.
// complex side effects like statements are appended to init
-func walkexprlist(s []*ir.Node, init *ir.Nodes) {
+func walkexprlist(s []ir.Node, init *ir.Nodes) {
for i := range s {
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistsafe(s []*ir.Node, init *ir.Nodes) {
+func walkexprlistsafe(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = safeexpr(n, init)
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistcheap(s []*ir.Node, init *ir.Nodes) {
+func walkexprlistcheap(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = cheapexpr(n, init)
s[i] = walkexpr(s[i], init)
@@ -413,7 +413,7 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
// The result of walkexpr MUST be assigned back to n, e.g.
// n.Left = walkexpr(n.Left, init)
-func walkexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return n
}
@@ -700,7 +700,7 @@ opswitch:
r := n.Right()
walkexprlistsafe(n.List().Slice(), init)
r.SetLeft(walkexpr(r.Left(), init))
- var n1 *ir.Node
+ var n1 ir.Node
if ir.IsBlank(n.List().First()) {
n1 = nodnil()
} else {
@@ -723,7 +723,7 @@ opswitch:
t := r.Left().Type()
fast := mapfast(t)
- var key *ir.Node
+ var key ir.Node
if fast != mapslow {
// fast versions take key by value
key = r.Right()
@@ -802,7 +802,7 @@ opswitch:
}
// typeword generates the type word of the interface value.
- typeword := func() *ir.Node {
+ typeword := func() ir.Node {
if toType.IsEmptyInterface() {
return typename(fromType)
}
@@ -832,7 +832,7 @@ opswitch:
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
- var value *ir.Node
+ var value ir.Node
switch {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
@@ -918,7 +918,7 @@ opswitch:
break
}
- var tab *ir.Node
+ var tab ir.Node
if fromType.IsInterface() {
// convI2I
tab = typename(toType)
@@ -1208,7 +1208,7 @@ opswitch:
hint := n.Left()
// var h *hmap
- var h *ir.Node
+ var h ir.Node
if n.Esc() == EscNone {
// Allocate hmap on stack.
@@ -1494,7 +1494,7 @@ opswitch:
// Allocate a [n]byte of the right size.
t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
- var a *ir.Node
+ var a ir.Node
if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
a = ir.Nod(ir.OADDR, temp(t), nil)
} else {
@@ -1619,7 +1619,7 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
// markUsedIfaceMethod marks that an interface method is used in the current
// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *ir.Node) {
+func markUsedIfaceMethod(n ir.Node) {
ityp := n.Left().Left().Type()
tsym := typenamesym(ityp).Linksym()
r := obj.Addrel(Curfn.Func().LSym)
@@ -1678,7 +1678,7 @@ func rtconvfn(src, dst *types.Type) (param, result types.EType) {
}
// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *ir.Node) *ir.Node {
+func reduceSlice(n ir.Node) ir.Node {
low, high, max := n.SliceBounds()
if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) {
// Reduce x[i:len(x)] to x[i:].
@@ -1695,7 +1695,7 @@ func reduceSlice(n *ir.Node) *ir.Node {
return n
}
-func ascompatee1(l *ir.Node, r *ir.Node, init *ir.Nodes) *ir.Node {
+func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) ir.Node {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
n := ir.Nod(ir.OAS, l, r)
@@ -1707,7 +1707,7 @@ func ascompatee1(l *ir.Node, r *ir.Node, init *ir.Nodes) *ir.Node {
return convas(n, init)
}
-func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node {
+func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
// check assign expression list to
// an expression list. called in
// expr-list = expr-list
@@ -1720,7 +1720,7 @@ func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node {
nr[i1] = safeexpr(nr[i1], init)
}
- var nn []*ir.Node
+ var nn []ir.Node
i := 0
for ; i < len(nl); i++ {
if i >= len(nr) {
@@ -1744,7 +1744,7 @@ func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node {
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l *ir.Node, rt *types.Type) bool {
+func fncall(l ir.Node, rt *types.Type) bool {
if l.HasCall() || l.Op() == ir.OINDEXMAP {
return true
}
@@ -1758,7 +1758,7 @@ func fncall(l *ir.Node, rt *types.Type) bool {
// check assign type list to
// an expression list. called in
// expr-list = func()
-func ascompatet(nl ir.Nodes, nr *types.Type) []*ir.Node {
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
if nl.Len() != nr.NumFields() {
base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
}
@@ -1800,8 +1800,8 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []*ir.Node {
}
// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node {
- var n *ir.Node
+func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node {
+ var n ir.Node
if len(args) == 0 {
n = nodnil()
n.SetType(typ)
@@ -1820,7 +1820,7 @@ func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node {
// fixVariadicCall rewrites calls to variadic functions to use an
// explicit ... argument if one is not already present.
-func fixVariadicCall(call *ir.Node) {
+func fixVariadicCall(call ir.Node) {
fntype := call.Left().Type()
if !fntype.IsVariadic() || call.IsDDD() {
return
@@ -1840,7 +1840,7 @@ func fixVariadicCall(call *ir.Node) {
call.SetIsDDD(true)
}
-func walkCall(n *ir.Node, init *ir.Nodes) {
+func walkCall(n ir.Node, init *ir.Nodes) {
if n.Rlist().Len() != 0 {
return // already walked
}
@@ -1853,7 +1853,7 @@ func walkCall(n *ir.Node, init *ir.Nodes) {
// If this is a method call, add the receiver at the beginning of the args.
if n.Op() == ir.OCALLMETH {
- withRecv := make([]*ir.Node, len(args)+1)
+ withRecv := make([]ir.Node, len(args)+1)
withRecv[0] = n.Left().Left()
n.Left().SetLeft(nil)
copy(withRecv[1:], args)
@@ -1864,7 +1864,7 @@ func walkCall(n *ir.Node, init *ir.Nodes) {
// store that argument into a temporary variable,
// to prevent that calls from clobbering arguments already on the stack.
// When instrumenting, all arguments might require function calls.
- var tempAssigns []*ir.Node
+ var tempAssigns []ir.Node
for i, arg := range args {
updateHasCall(arg)
// Determine param type.
@@ -1894,14 +1894,14 @@ func walkCall(n *ir.Node, init *ir.Nodes) {
}
// generate code for print
-func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node {
+func walkprint(nn ir.Node, init *ir.Nodes) ir.Node {
// Hoist all the argument evaluation up before the lock.
walkexprlistcheap(nn.List().Slice(), init)
// For println, add " " between elements and "\n" at the end.
if nn.Op() == ir.OPRINTN {
s := nn.List().Slice()
- t := make([]*ir.Node, 0, len(s)*2)
+ t := make([]ir.Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, nodstr(" "))
@@ -1914,7 +1914,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node {
// Collapse runs of constant strings.
s := nn.List().Slice()
- t := make([]*ir.Node, 0, len(s))
+ t := make([]ir.Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
for i < len(s) && ir.IsConst(s[i], constant.String) {
@@ -1931,7 +1931,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node {
}
nn.PtrList().Set(t)
- calls := []*ir.Node{mkcall("printlock", nil, init)}
+ calls := []ir.Node{mkcall("printlock", nil, init)}
for i, n := range nn.List().Slice() {
if n.Op() == ir.OLITERAL {
if n.Type() == types.UntypedRune {
@@ -1956,7 +1956,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node {
continue
}
- var on *ir.Node
+ var on ir.Node
switch n.Type().Etype {
case types.TINTER:
if n.Type().IsEmptyInterface() {
@@ -2037,7 +2037,7 @@ func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node {
return r
}
-func callnew(t *types.Type) *ir.Node {
+func callnew(t *types.Type) ir.Node {
dowidth(t)
n := ir.Nod(ir.ONEWOBJ, typename(t), nil)
n.SetType(types.NewPtr(t))
@@ -2048,7 +2048,7 @@ func callnew(t *types.Type) *ir.Node {
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
-func isReflectHeaderDataField(l *ir.Node) bool {
+func isReflectHeaderDataField(l ir.Node) bool {
if l.Type() != types.Types[types.TUINTPTR] {
return false
}
@@ -2069,7 +2069,7 @@ func isReflectHeaderDataField(l *ir.Node) bool {
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
-func convas(n *ir.Node, init *ir.Nodes) *ir.Node {
+func convas(n ir.Node, init *ir.Nodes) ir.Node {
if n.Op() != ir.OAS {
base.Fatalf("convas: not OAS %v", n.Op())
}
@@ -2107,11 +2107,11 @@ func convas(n *ir.Node, init *ir.Nodes) *ir.Node {
// be later use of an earlier lvalue.
//
// function calls have been removed.
-func reorder3(all []*ir.Node) []*ir.Node {
+func reorder3(all []ir.Node) []ir.Node {
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
- var early []*ir.Node
+ var early []ir.Node
var mapinit ir.Nodes
for i, n := range all {
@@ -2166,7 +2166,7 @@ func reorder3(all []*ir.Node) []*ir.Node {
// replace *np with that temp.
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n *ir.Node, all []*ir.Node, i int, early *[]*ir.Node) *ir.Node {
+func reorder3save(n ir.Node, all []ir.Node, i int, early *[]ir.Node) ir.Node {
if !aliased(n, all[:i]) {
return n
}
@@ -2180,7 +2180,7 @@ func reorder3save(n *ir.Node, all []*ir.Node, i int, early *[]*ir.Node) *ir.Node
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
-func outervalue(n *ir.Node) *ir.Node {
+func outervalue(n ir.Node) ir.Node {
for {
switch n.Op() {
case ir.OXDOT:
@@ -2201,7 +2201,7 @@ func outervalue(n *ir.Node) *ir.Node {
// Is it possible that the computation of r might be
// affected by assignments in all?
-func aliased(r *ir.Node, all []*ir.Node) bool {
+func aliased(r ir.Node, all []ir.Node) bool {
if r == nil {
return false
}
@@ -2275,7 +2275,7 @@ func aliased(r *ir.Node, all []*ir.Node) bool {
// does the evaluation of n only refer to variables
// whose addresses have not been taken?
// (and no other memory)
-func varexpr(n *ir.Node) bool {
+func varexpr(n ir.Node) bool {
if n == nil {
return true
}
@@ -2327,7 +2327,7 @@ func varexpr(n *ir.Node) bool {
}
// is the name l mentioned in r?
-func vmatch2(l *ir.Node, r *ir.Node) bool {
+func vmatch2(l ir.Node, r ir.Node) bool {
if r == nil {
return false
}
@@ -2356,7 +2356,7 @@ func vmatch2(l *ir.Node, r *ir.Node) bool {
// is any name mentioned in l also mentioned in r?
// called by sinit.go
-func vmatch1(l *ir.Node, r *ir.Node) bool {
+func vmatch1(l ir.Node, r ir.Node) bool {
// isolate all left sides
if l == nil || r == nil {
return false
@@ -2397,8 +2397,8 @@ func vmatch1(l *ir.Node, r *ir.Node) bool {
// paramstoheap returns code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []*ir.Node {
- var nn []*ir.Node
+func paramstoheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
v := ir.AsNode(t.Nname)
if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
@@ -2451,8 +2451,8 @@ func zeroResults() {
// returnsfromheap returns code to copy values for heap-escaped parameters
// back to the stack.
-func returnsfromheap(params *types.Type) []*ir.Node {
- var nn []*ir.Node
+func returnsfromheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
v := ir.AsNode(t.Nname)
if v == nil {
@@ -2481,7 +2481,7 @@ func heapmoves() {
base.Pos = lno
}
-func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node {
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node {
if fn.Type() == nil || fn.Type().Etype != types.TFUNC {
base.Fatalf("mkcall %v %v", fn, fn.Type())
}
@@ -2503,15 +2503,15 @@ func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node
return r
}
-func mkcall(name string, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node {
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(syslook(name), t, init, args)
}
-func mkcall1(fn *ir.Node, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node {
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(fn, t, init, args)
}
-func conv(n *ir.Node, t *types.Type) *ir.Node {
+func conv(n ir.Node, t *types.Type) ir.Node {
if types.Identical(n.Type(), t) {
return n
}
@@ -2523,7 +2523,7 @@ func conv(n *ir.Node, t *types.Type) *ir.Node {
// convnop converts node n to type t using the OCONVNOP op
// and typechecks the result with ctxExpr.
-func convnop(n *ir.Node, t *types.Type) *ir.Node {
+func convnop(n ir.Node, t *types.Type) ir.Node {
if types.Identical(n.Type(), t) {
return n
}
@@ -2536,7 +2536,7 @@ func convnop(n *ir.Node, t *types.Type) *ir.Node {
// byteindex converts n, which is byte-sized, to an int used to index into an array.
// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
-func byteindex(n *ir.Node) *ir.Node {
+func byteindex(n ir.Node) ir.Node {
// We cannot convert from bool to int directly.
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
@@ -2552,7 +2552,7 @@ func byteindex(n *ir.Node) *ir.Node {
return n
}
-func chanfn(name string, n int, t *types.Type) *ir.Node {
+func chanfn(name string, n int, t *types.Type) ir.Node {
if !t.IsChan() {
base.Fatalf("chanfn %v", t)
}
@@ -2568,7 +2568,7 @@ func chanfn(name string, n int, t *types.Type) *ir.Node {
return fn
}
-func mapfn(name string, t *types.Type) *ir.Node {
+func mapfn(name string, t *types.Type) ir.Node {
if !t.IsMap() {
base.Fatalf("mapfn %v", t)
}
@@ -2577,7 +2577,7 @@ func mapfn(name string, t *types.Type) *ir.Node {
return fn
}
-func mapfndel(name string, t *types.Type) *ir.Node {
+func mapfndel(name string, t *types.Type) ir.Node {
if !t.IsMap() {
base.Fatalf("mapfn %v", t)
}
@@ -2636,13 +2636,13 @@ func mapfast(t *types.Type) int {
return mapslow
}
-func writebarrierfn(name string, l *types.Type, r *types.Type) *ir.Node {
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)
return fn
}
-func addstr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func addstr(n ir.Node, init *ir.Nodes) ir.Node {
// order.expr rewrote OADDSTR to have a list of strings.
c := n.List().Len()
@@ -2668,7 +2668,7 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node {
}
// build list of string arguments
- args := []*ir.Node{buf}
+ args := []ir.Node{buf}
for _, n2 := range n.List().Slice() {
args = append(args, conv(n2, types.Types[types.TSTRING]))
}
@@ -2688,7 +2688,7 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node {
prealloc[slice] = prealloc[n]
}
slice.PtrList().Set(args[1:]) // skip buf arg
- args = []*ir.Node{buf, slice}
+ args = []ir.Node{buf, slice}
slice.SetEsc(EscNone)
}
@@ -2702,7 +2702,7 @@ func addstr(n *ir.Node, init *ir.Nodes) *ir.Node {
return r
}
-func walkAppendArgs(n *ir.Node, init *ir.Nodes) {
+func walkAppendArgs(n ir.Node, init *ir.Nodes) {
walkexprlistsafe(n.List().Slice(), init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
@@ -2728,7 +2728,7 @@ func walkAppendArgs(n *ir.Node, init *ir.Nodes) {
// s
//
// l2 is allowed to be a string.
-func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
+func appendslice(n ir.Node, init *ir.Nodes) ir.Node {
walkAppendArgs(n, init)
l1 := n.List().First()
@@ -2768,7 +2768,7 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
nt.SetBounded(true)
nodes.Append(ir.Nod(ir.OAS, s, nt))
- var ncopy *ir.Node
+ var ncopy ir.Node
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
nptr1 := ir.Nod(ir.OSLICE, s, nil)
@@ -2828,7 +2828,7 @@ func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n *ir.Node) bool {
+func isAppendOfMake(n ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
@@ -2887,7 +2887,7 @@ func isAppendOfMake(n *ir.Node) bool {
// }
// }
// s
-func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
+func extendslice(n ir.Node, init *ir.Nodes) ir.Node {
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
// check of l2 < 0 at runtime which is generated below.
@@ -2900,7 +2900,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
l1 := n.List().First()
l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs
- var nodes []*ir.Node
+ var nodes []ir.Node
// if l2 >= 0 (likely happens), do nothing
nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil)
@@ -3006,7 +3006,7 @@ func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
// ...
// }
// s
-func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node {
+func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node {
if !samesafeexpr(dst, n.List().First()) {
n.List().SetFirst(safeexpr(n.List().First(), init))
n.List().SetFirst(walkexpr(n.List().First(), init))
@@ -3042,7 +3042,7 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node {
return n
}
- var l []*ir.Node
+ var l []ir.Node
ns := temp(nsrc.Type())
l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src
@@ -3095,7 +3095,7 @@ func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node {
//
// Also works if b is a string.
//
-func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node {
+func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node {
if n.Left().Type().Elem().HasPointers() {
Curfn.Func().SetWBPos(n.Pos())
fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
@@ -3126,7 +3126,7 @@ func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node {
n.SetRight(walkexpr(n.Right(), init))
nl := temp(n.Left().Type())
nr := temp(n.Right().Type())
- var l []*ir.Node
+ var l []ir.Node
l = append(l, ir.Nod(ir.OAS, nl, n.Left()))
l = append(l, ir.Nod(ir.OAS, nr, n.Right()))
@@ -3165,7 +3165,7 @@ func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node {
return nlen
}
-func eqfor(t *types.Type) (n *ir.Node, needsize bool) {
+func eqfor(t *types.Type) (n ir.Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
@@ -3179,10 +3179,10 @@ func eqfor(t *types.Type) (n *ir.Node, needsize bool) {
sym := typesymprefix(".eq", t)
n := NewName(sym)
setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
- }, []*ir.Node{
+ }, []ir.Node{
anonfield(types.Types[types.TBOOL]),
}))
return n, false
@@ -3193,7 +3193,7 @@ func eqfor(t *types.Type) (n *ir.Node, needsize bool) {
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
-func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkcompare(n ir.Node, init *ir.Nodes) ir.Node {
if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL {
return walkcompareInterface(n, init)
}
@@ -3228,7 +3228,7 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node {
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
- var eqtype *ir.Node
+ var eqtype ir.Node
tab := ir.Nod(ir.OITAB, l, nil)
rtyp := typename(r.Type())
if l.Type().IsEmptyInterface() {
@@ -3354,8 +3354,8 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node {
if n.Op() == ir.ONE {
andor = ir.OOROR
}
- var expr *ir.Node
- compare := func(el, er *ir.Node) {
+ var expr ir.Node
+ compare := func(el, er ir.Node) {
a := ir.Nod(n.Op(), el, er)
if expr == nil {
expr = a
@@ -3447,7 +3447,7 @@ func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node {
return n
}
-func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node {
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 {
n = copyexpr(n, n.Type(), init)
@@ -3456,11 +3456,11 @@ func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node {
return conv(n, t)
}
-func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkcompareInterface(n ir.Node, init *ir.Nodes) ir.Node {
n.SetRight(cheapexpr(n.Right(), init))
n.SetLeft(cheapexpr(n.Left(), init))
eqtab, eqdata := eqinterface(n.Left(), n.Right())
- var cmp *ir.Node
+ var cmp ir.Node
if n.Op() == ir.OEQ {
cmp = ir.Nod(ir.OANDAND, eqtab, eqdata)
} else {
@@ -3470,9 +3470,9 @@ func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node {
return finishcompare(n, cmp, init)
}
-func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node {
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs *ir.Node // const string, non-const string
+ var cs, ncs ir.Node // const string, non-const string
switch {
case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String):
// ignore; will be constant evaluated
@@ -3570,7 +3570,7 @@ func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node {
}
}
- var r *ir.Node
+ var r ir.Node
if n.Op() == ir.OEQ || n.Op() == ir.ONE {
// prepare for rewrite below
n.SetLeft(cheapexpr(n.Left(), init))
@@ -3597,7 +3597,7 @@ func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node {
// The result of finishcompare MUST be assigned back to n, e.g.
// n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node {
+func finishcompare(n, r ir.Node, init *ir.Nodes) ir.Node {
r = typecheck(r, ctxExpr)
r = conv(r, n.Type())
r = walkexpr(r, init)
@@ -3605,7 +3605,7 @@ func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node {
}
// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *ir.Node, max int64) bool {
+func bounded(n ir.Node, max int64) bool {
if n.Type() == nil || !n.Type().IsInteger() {
return false
}
@@ -3672,7 +3672,7 @@ func bounded(n *ir.Node, max int64) bool {
}
// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *ir.Node) {
+func usemethod(n ir.Node) {
t := n.Left().Type()
// Looking for either of:
@@ -3717,7 +3717,7 @@ func usemethod(n *ir.Node) {
}
}
-func usefield(n *ir.Node) {
+func usefield(n ir.Node) {
if objabi.Fieldtrack_enabled == 0 {
return
}
@@ -3777,7 +3777,7 @@ func candiscardlist(l ir.Nodes) bool {
return true
}
-func candiscard(n *ir.Node) bool {
+func candiscard(n ir.Node) bool {
if n == nil {
return true
}
@@ -3891,7 +3891,7 @@ var wrapCall_prgen int
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node {
+func wrapCall(n ir.Node, init *ir.Nodes) ir.Node {
if n.Init().Len() != 0 {
walkstmtlist(n.Init().Slice())
init.AppendNodes(n.PtrInit())
@@ -3909,7 +3909,7 @@ func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node {
}
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]*ir.Node, n.List().Len())
+ origArgs := make([]ir.Node, n.List().Len())
t := ir.Nod(ir.OTFUNC, nil, nil)
for i, arg := range n.List().Slice() {
s := lookupN("a", i)
@@ -3962,7 +3962,7 @@ func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node {
// type syntax expression n.Type.
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *ir.Node, types_ ...*types.Type) *ir.Node {
+func substArgTypes(old ir.Node, types_ ...*types.Type) ir.Node {
n := ir.Copy(old)
for _, t := range types_ {
@@ -3992,11 +3992,11 @@ func canMergeLoads() bool {
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n *ir.Node) bool {
+func isRuneCount(n ir.Node) bool {
return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES
}
-func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node {
+func walkCheckPtrAlignment(n ir.Node, init *ir.Nodes, count ir.Node) ir.Node {
if !n.Type().IsPtr() {
base.Fatalf("expected pointer type: %v", n.Type())
}
@@ -4024,7 +4024,7 @@ func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node
var walkCheckPtrArithmeticMarker byte
-func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node {
// Calling cheapexpr(n, init) below leads to a recursive call
// to walkexpr, which leads us back here again. Use n.Opt to
// prevent infinite loops.
@@ -4055,9 +4055,9 @@ func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node {
// "It is valid both to add and to subtract offsets from a
// pointer in this way. It is also valid to use &^ to round
// pointers, usually for alignment."
- var originals []*ir.Node
- var walk func(n *ir.Node)
- walk = func(n *ir.Node) {
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
switch n.Op() {
case ir.OADD:
walk(n.Left())
@@ -4088,6 +4088,6 @@ func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node {
// checkPtr reports whether pointer checking should be enabled for
// function fn at a given level. See debugHelpFooter for defined
// levels.
-func checkPtr(fn *ir.Node, level int) bool {
+func checkPtr(fn ir.Node, level int) bool {
return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0
}
diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go
index c4ea5af3d1..fe1410969f 100644
--- a/src/cmd/compile/internal/ir/dump.go
+++ b/src/cmd/compile/internal/ir/dump.go
@@ -200,7 +200,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
typ := x.Type()
isNode := false
- if n, ok := x.Interface().(Node); ok {
+ if n, ok := x.Interface().(node); ok {
isNode = true
p.printf("%s %s {", n.op.String(), p.addr(x))
} else {
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index 9682bae39b..f394219c05 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -243,7 +243,7 @@ func (o Op) oconv(s fmt.State, flag FmtFlag, mode FmtMode) {
type FmtMode int
type fmtNode struct {
- x *Node
+ x Node
m FmtMode
}
@@ -277,11 +277,11 @@ type fmtNodes struct {
func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) }
-func (n *Node) Format(s fmt.State, verb rune) {
+func (n *node) Format(s fmt.State, verb rune) {
FmtNode(n, s, verb)
}
-func FmtNode(n *Node, s fmt.State, verb rune) {
+func FmtNode(n Node, s fmt.State, verb rune) {
nodeFormat(n, s, verb, FErr)
}
@@ -311,7 +311,7 @@ func (m FmtMode) prepareArgs(args []interface{}) {
switch arg := arg.(type) {
case Op:
args[i] = &fmtOp{arg, m}
- case *Node:
+ case Node:
args[i] = &fmtNode{arg, m}
case nil:
args[i] = &fmtNode{nil, m} // assume this was a node interface
@@ -329,7 +329,7 @@ func (m FmtMode) prepareArgs(args []interface{}) {
}
}
-func nodeFormat(n *Node, s fmt.State, verb rune, mode FmtMode) {
+func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) {
switch verb {
case 'v', 'S', 'L':
nconvFmt(n, s, fmtFlag(s, verb), mode)
@@ -343,10 +343,10 @@ func nodeFormat(n *Node, s fmt.State, verb rune, mode FmtMode) {
}
// EscFmt is set by the escape analysis code to add escape analysis details to the node print.
-var EscFmt func(n *Node, short bool) string
+var EscFmt func(n Node, short bool) string
// *Node details
-func jconvFmt(n *Node, s fmt.State, flag FmtFlag) {
+func jconvFmt(n Node, s fmt.State, flag FmtFlag) {
short := flag&FmtShort != 0
// Useful to see which nodes in an AST printout are actually identical
@@ -894,7 +894,7 @@ func StmtWithInit(op Op) bool {
return false
}
-func stmtFmt(n *Node, s fmt.State, mode FmtMode) {
+func stmtFmt(n Node, s fmt.State, mode FmtMode) {
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
// and inlining. If it doesn't fit the syntax, emit an enclosing
@@ -1194,7 +1194,7 @@ var OpPrec = []int{
OEND: 0,
}
-func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) {
+func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) {
for n != nil && n.Implicit() && (n.Op() == ODEREF || n.Op() == OADDR) {
n = n.Left()
}
@@ -1556,7 +1556,7 @@ func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) {
}
}
-func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
+func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
t := n.Type()
// We almost always want the original.
@@ -1586,7 +1586,7 @@ func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
exprFmt(n, s, 0, mode)
}
-func nodeDumpFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
+func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
recur := flag&FmtShort == 0
if recur {
@@ -1794,12 +1794,12 @@ func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) {
}
}
-func (n *Node) String() string { return fmt.Sprint(n) }
-func modeString(n *Node, mode FmtMode) string { return mode.Sprint(n) }
+func (n *node) String() string { return fmt.Sprint(n) }
+func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) }
// "%L" suffix with "(type %T)" where possible
// "%+S" in debug mode, don't recurse, no multiline output
-func nconvFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
+func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
if n == nil {
fmt.Fprint(s, "<N>")
return
@@ -1866,7 +1866,7 @@ func FDumpList(w io.Writer, s string, l Nodes) {
fmt.Fprintf(w, "%s%+v\n", s, l)
}
-func Dump(s string, n *Node) {
+func Dump(s string, n Node) {
fmt.Printf("%s [%p]%+v\n", s, n, n)
}
@@ -1911,6 +1911,6 @@ func InstallTypeFormats() {
// Line returns n's position as a string. If n has been inlined,
// it uses the outermost position where n has been inlined.
-func Line(n *Node) string {
+func Line(n Node) string {
return base.FmtPos(n.Pos())
}
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index d700c59390..477d07f502 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -20,7 +20,7 @@ import (
)
// A Node is the abstract interface to an IR node.
-type INode interface {
+type Node interface {
// Formatting
Format(s fmt.State, verb rune)
String() string
@@ -30,19 +30,19 @@ type INode interface {
SetPos(x src.XPos)
// For making copies. Mainly used by Copy and SepCopy.
- RawCopy() *Node
+ RawCopy() Node
// Abstract graph structure, for generic traversals.
Op() Op
SetOp(x Op)
- Orig() *Node
- SetOrig(x *Node)
+ Orig() Node
+ SetOrig(x Node)
SubOp() Op
SetSubOp(x Op)
- Left() *Node
- SetLeft(x *Node)
- Right() *Node
- SetRight(x *Node)
+ Left() Node
+ SetLeft(x Node)
+ Right() Node
+ SetRight(x Node)
Init() Nodes
PtrInit() *Nodes
SetInit(x Nodes)
@@ -71,8 +71,8 @@ type INode interface {
SetClass(x Class)
Likely() bool
SetLikely(x bool)
- SliceBounds() (low, high, max *Node)
- SetSliceBounds(low, high, max *Node)
+ SliceBounds() (low, high, max Node)
+ SetSliceBounds(low, high, max Node)
Iota() int64
SetIota(x int64)
Colas() bool
@@ -130,17 +130,17 @@ type INode interface {
CanBeAnSSASym()
}
-var _ INode = (*Node)(nil)
+var _ Node = (*node)(nil)
// A Node is a single node in the syntax tree.
// Actually the syntax tree is a syntax DAG, because there is only one
// node with Op=ONAME for a given instance of a variable x.
// The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared.
-type Node struct {
+type node struct {
// Tree structure.
// Generic recursive walks should follow these fields.
- left *Node
- right *Node
+ left Node
+ right Node
init Nodes
body Nodes
list Nodes
@@ -148,7 +148,7 @@ type Node struct {
// most nodes
typ *types.Type
- orig *Node // original form, for printing, and tracking copies of ONAMEs
+ orig Node // original form, for printing, and tracking copies of ONAMEs
// func
fn *Func
@@ -179,46 +179,46 @@ type Node struct {
aux uint8
}
-func (n *Node) Left() *Node { return n.left }
-func (n *Node) SetLeft(x *Node) { n.left = x }
-func (n *Node) Right() *Node { return n.right }
-func (n *Node) SetRight(x *Node) { n.right = x }
-func (n *Node) Orig() *Node { return n.orig }
-func (n *Node) SetOrig(x *Node) { n.orig = x }
-func (n *Node) Type() *types.Type { return n.typ }
-func (n *Node) SetType(x *types.Type) { n.typ = x }
-func (n *Node) Func() *Func { return n.fn }
-func (n *Node) SetFunc(x *Func) { n.fn = x }
-func (n *Node) Name() *Name { return n.name }
-func (n *Node) SetName(x *Name) { n.name = x }
-func (n *Node) Sym() *types.Sym { return n.sym }
-func (n *Node) SetSym(x *types.Sym) { n.sym = x }
-func (n *Node) Pos() src.XPos { return n.pos }
-func (n *Node) SetPos(x src.XPos) { n.pos = x }
-func (n *Node) Offset() int64 { return n.offset }
-func (n *Node) SetOffset(x int64) { n.offset = x }
-func (n *Node) Esc() uint16 { return n.esc }
-func (n *Node) SetEsc(x uint16) { n.esc = x }
-func (n *Node) Op() Op { return n.op }
-func (n *Node) SetOp(x Op) { n.op = x }
-func (n *Node) Init() Nodes { return n.init }
-func (n *Node) SetInit(x Nodes) { n.init = x }
-func (n *Node) PtrInit() *Nodes { return &n.init }
-func (n *Node) Body() Nodes { return n.body }
-func (n *Node) SetBody(x Nodes) { n.body = x }
-func (n *Node) PtrBody() *Nodes { return &n.body }
-func (n *Node) List() Nodes { return n.list }
-func (n *Node) SetList(x Nodes) { n.list = x }
-func (n *Node) PtrList() *Nodes { return &n.list }
-func (n *Node) Rlist() Nodes { return n.rlist }
-func (n *Node) SetRlist(x Nodes) { n.rlist = x }
-func (n *Node) PtrRlist() *Nodes { return &n.rlist }
-
-func (n *Node) ResetAux() {
+func (n *node) Left() Node { return n.left }
+func (n *node) SetLeft(x Node) { n.left = x }
+func (n *node) Right() Node { return n.right }
+func (n *node) SetRight(x Node) { n.right = x }
+func (n *node) Orig() Node { return n.orig }
+func (n *node) SetOrig(x Node) { n.orig = x }
+func (n *node) Type() *types.Type { return n.typ }
+func (n *node) SetType(x *types.Type) { n.typ = x }
+func (n *node) Func() *Func { return n.fn }
+func (n *node) SetFunc(x *Func) { n.fn = x }
+func (n *node) Name() *Name { return n.name }
+func (n *node) SetName(x *Name) { n.name = x }
+func (n *node) Sym() *types.Sym { return n.sym }
+func (n *node) SetSym(x *types.Sym) { n.sym = x }
+func (n *node) Pos() src.XPos { return n.pos }
+func (n *node) SetPos(x src.XPos) { n.pos = x }
+func (n *node) Offset() int64 { return n.offset }
+func (n *node) SetOffset(x int64) { n.offset = x }
+func (n *node) Esc() uint16 { return n.esc }
+func (n *node) SetEsc(x uint16) { n.esc = x }
+func (n *node) Op() Op { return n.op }
+func (n *node) SetOp(x Op) { n.op = x }
+func (n *node) Init() Nodes { return n.init }
+func (n *node) SetInit(x Nodes) { n.init = x }
+func (n *node) PtrInit() *Nodes { return &n.init }
+func (n *node) Body() Nodes { return n.body }
+func (n *node) SetBody(x Nodes) { n.body = x }
+func (n *node) PtrBody() *Nodes { return &n.body }
+func (n *node) List() Nodes { return n.list }
+func (n *node) SetList(x Nodes) { n.list = x }
+func (n *node) PtrList() *Nodes { return &n.list }
+func (n *node) Rlist() Nodes { return n.rlist }
+func (n *node) SetRlist(x Nodes) { n.rlist = x }
+func (n *node) PtrRlist() *Nodes { return &n.rlist }
+
+func (n *node) ResetAux() {
n.aux = 0
}
-func (n *Node) SubOp() Op {
+func (n *node) SubOp() Op {
switch n.Op() {
case OASOP, ONAME:
default:
@@ -227,7 +227,7 @@ func (n *Node) SubOp() Op {
return Op(n.aux)
}
-func (n *Node) SetSubOp(op Op) {
+func (n *node) SetSubOp(op Op) {
switch n.Op() {
case OASOP, ONAME:
default:
@@ -236,14 +236,14 @@ func (n *Node) SetSubOp(op Op) {
n.aux = uint8(op)
}
-func (n *Node) IndexMapLValue() bool {
+func (n *node) IndexMapLValue() bool {
if n.Op() != OINDEXMAP {
base.Fatalf("unexpected op: %v", n.Op())
}
return n.aux != 0
}
-func (n *Node) SetIndexMapLValue(b bool) {
+func (n *node) SetIndexMapLValue(b bool) {
if n.Op() != OINDEXMAP {
base.Fatalf("unexpected op: %v", n.Op())
}
@@ -254,28 +254,28 @@ func (n *Node) SetIndexMapLValue(b bool) {
}
}
-func (n *Node) TChanDir() types.ChanDir {
+func (n *node) TChanDir() types.ChanDir {
if n.Op() != OTCHAN {
base.Fatalf("unexpected op: %v", n.Op())
}
return types.ChanDir(n.aux)
}
-func (n *Node) SetTChanDir(dir types.ChanDir) {
+func (n *node) SetTChanDir(dir types.ChanDir) {
if n.Op() != OTCHAN {
base.Fatalf("unexpected op: %v", n.Op())
}
n.aux = uint8(dir)
}
-func IsSynthetic(n *Node) bool {
+func IsSynthetic(n Node) bool {
name := n.Sym().Name
return name[0] == '.' || name[0] == '~'
}
// IsAutoTmp indicates if n was created by the compiler as a temporary,
// based on the setting of the .AutoTemp flag in n's Name.
-func IsAutoTmp(n *Node) bool {
+func IsAutoTmp(n Node) bool {
if n == nil || n.Op() != ONAME {
return false
}
@@ -308,49 +308,49 @@ const (
_, nodeEmbedded // ODCLFIELD embedded type
)
-func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) }
-func (n *Node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
-func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
-func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
-
-func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
-func (n *Node) NoInline() bool { return n.flags&nodeNoInline != 0 }
-func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 }
-func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
-func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
-func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
-func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
-func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
-func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
-func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
-func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 }
-func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 }
-func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
-func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
-
-func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
-func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
-func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
-func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
-
-func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
-func (n *Node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
-func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
-func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
-func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
-func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
-func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
-func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
-func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
-func (n *Node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) }
-func (n *Node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
-func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
+func (n *node) Class() Class { return Class(n.flags.get3(nodeClass)) }
+func (n *node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
+func (n *node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
+func (n *node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
+
+func (n *node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
+func (n *node) NoInline() bool { return n.flags&nodeNoInline != 0 }
+func (n *node) Implicit() bool { return n.flags&nodeImplicit != 0 }
+func (n *node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
+func (n *node) Diag() bool { return n.flags&nodeDiag != 0 }
+func (n *node) Colas() bool { return n.flags&nodeColas != 0 }
+func (n *node) NonNil() bool { return n.flags&nodeNonNil != 0 }
+func (n *node) Transient() bool { return n.flags&nodeTransient != 0 }
+func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 }
+func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 }
+func (n *node) Likely() bool { return n.flags&nodeLikely != 0 }
+func (n *node) HasVal() bool { return n.flags&nodeHasVal != 0 }
+func (n *node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
+func (n *node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
+
+func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
+func (n *node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
+func (n *node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
+func (n *node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
+
+func (n *node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
+func (n *node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
+func (n *node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
+func (n *node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
+func (n *node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
+func (n *node) SetColas(b bool) { n.flags.set(nodeColas, b) }
+func (n *node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
+func (n *node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
+func (n *node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
+func (n *node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) }
+func (n *node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
+func (n *node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
// MarkNonNil marks a pointer n as being guaranteed non-nil,
// on all code paths, at all times.
// During conversion to SSA, non-nil pointers won't have nil checks
// inserted before dereferencing. See state.exprPtr.
-func (n *Node) MarkNonNil() {
+func (n *node) MarkNonNil() {
if !n.Type().IsPtr() && !n.Type().IsUnsafePtr() {
base.Fatalf("MarkNonNil(%v), type %v", n, n.Type())
}
@@ -361,7 +361,7 @@ func (n *Node) MarkNonNil() {
// When n is an index or slice operation, n does not need bounds checks.
// When n is a dereferencing operation, n does not need nil checks.
// When n is a makeslice+copy operation, n does not need length and cap checks.
-func (n *Node) SetBounded(b bool) {
+func (n *node) SetBounded(b bool) {
switch n.Op() {
case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
// No bounds checks needed.
@@ -377,7 +377,7 @@ func (n *Node) SetBounded(b bool) {
}
// MarkReadonly indicates that n is an ONAME with readonly contents.
-func (n *Node) MarkReadonly() {
+func (n *node) MarkReadonly() {
if n.Op() != ONAME {
base.Fatalf("Node.MarkReadonly %v", n.Op())
}
@@ -389,7 +389,7 @@ func (n *Node) MarkReadonly() {
}
// Val returns the constant.Value for the node.
-func (n *Node) Val() constant.Value {
+func (n *node) Val() constant.Value {
if !n.HasVal() {
return constant.MakeUnknown()
}
@@ -398,7 +398,7 @@ func (n *Node) Val() constant.Value {
// SetVal sets the constant.Value for the node,
// which must not have been used with SetOpt.
-func (n *Node) SetVal(v constant.Value) {
+func (n *node) SetVal(v constant.Value) {
if n.HasOpt() {
base.Flag.LowerH = 1
Dump("have Opt", n)
@@ -412,7 +412,7 @@ func (n *Node) SetVal(v constant.Value) {
}
// Opt returns the optimizer data for the node.
-func (n *Node) Opt() interface{} {
+func (n *node) Opt() interface{} {
if !n.HasOpt() {
return nil
}
@@ -421,7 +421,7 @@ func (n *Node) Opt() interface{} {
// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts.
-func (n *Node) SetOpt(x interface{}) {
+func (n *node) SetOpt(x interface{}) {
if x == nil {
if n.HasOpt() {
n.setHasOpt(false)
@@ -438,17 +438,17 @@ func (n *Node) SetOpt(x interface{}) {
n.e = x
}
-func (n *Node) Iota() int64 {
+func (n *node) Iota() int64 {
return n.Offset()
}
-func (n *Node) SetIota(x int64) {
+func (n *node) SetIota(x int64) {
n.SetOffset(x)
}
// mayBeShared reports whether n may occur in multiple places in the AST.
// Extra care must be taken when mutating such a node.
-func MayBeShared(n *Node) bool {
+func MayBeShared(n Node) bool {
switch n.Op() {
case ONAME, OLITERAL, ONIL, OTYPE:
return true
@@ -457,7 +457,7 @@ func MayBeShared(n *Node) bool {
}
// funcname returns the name (without the package) of the function n.
-func FuncName(n *Node) string {
+func FuncName(n Node) string {
if n == nil || n.Func() == nil || n.Func().Nname == nil {
return "<nil>"
}
@@ -468,7 +468,7 @@ func FuncName(n *Node) string {
// This differs from the compiler's internal convention where local functions lack a package
// because the ultimate consumer of this is a human looking at an IDE; package is only empty
// if the compilation package is actually the empty string.
-func PkgFuncName(n *Node) string {
+func PkgFuncName(n Node) string {
var s *types.Sym
if n == nil {
return "<nil>"
@@ -494,19 +494,19 @@ func PkgFuncName(n *Node) string {
}
// The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym.
-func (n *Node) CanBeAnSSASym() {
+func (n *node) CanBeAnSSASym() {
}
// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL).
type Name struct {
- Pack *Node // real package for import . names
+ Pack Node // real package for import . names
Pkg *types.Pkg // pkg for OPACK nodes
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
// For a closure var, the ONAME node of the outer captured variable
- Defn *Node
+ Defn Node
// The ODCLFUNC node (for a static function/method or a closure) in which
// local variable or param is declared.
- Curfn *Node
+ Curfn Node
Param *Param // additional fields for ONAME, OTYPE
Decldepth int32 // declaration loop depth, increased for every loop or label
// Unique number for ONAME nodes within a function. Function outputs
@@ -565,11 +565,11 @@ func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot,
func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
type Param struct {
- Ntype *Node
- Heapaddr *Node // temp holding heap address of param
+ Ntype Node
+ Heapaddr Node // temp holding heap address of param
// ONAME PAUTOHEAP
- Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
+ Stackcopy Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
// ONAME closure linkage
// Consider:
@@ -640,8 +640,8 @@ type Param struct {
//
// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
- Innermost *Node
- Outer *Node
+ Innermost Node
+ Outer Node
// OTYPE & ONAME //go:embed info,
// sharing storage to reduce gc.Param size.
@@ -762,9 +762,9 @@ func (p *Param) SetEmbedFiles(list []string) {
// the generated ODCLFUNC (as n.Func.Decl), but there is no
// pointer from the Func back to the OCALLPART.
type Func struct {
- Nname *Node // ONAME node
- Decl *Node // ODCLFUNC node
- OClosure *Node // OCLOSURE node
+ Nname Node // ONAME node
+ Decl Node // ODCLFUNC node
+ OClosure Node // OCLOSURE node
Shortname *types.Sym
@@ -774,10 +774,10 @@ type Func struct {
Exit Nodes
// ONAME nodes for all params/locals for this func/closure, does NOT
// include closurevars until transformclosure runs.
- Dcl []*Node
+ Dcl []Node
ClosureEnter Nodes // list of ONAME nodes of captured variables
- ClosureType *Node // closure representation type
+ ClosureType Node // closure representation type
ClosureCalled bool // closure is only immediately called
ClosureVars Nodes // closure params; each has closurevar set
@@ -822,8 +822,8 @@ type Inline struct {
Cost int32 // heuristic cost of inlining this function
// Copies of Func.Dcl and Nbody for use during inlining.
- Dcl []*Node
- Body []*Node
+ Dcl []Node
+ Body []Node
}
// A Mark represents a scope boundary.
@@ -1108,17 +1108,17 @@ const (
// Nodes is a pointer to a slice of *Node.
// For fields that are not used in most nodes, this is used instead of
// a slice to save space.
-type Nodes struct{ slice *[]*Node }
+type Nodes struct{ slice *[]Node }
// asNodes returns a slice of *Node as a Nodes value.
-func AsNodes(s []*Node) Nodes {
+func AsNodes(s []Node) Nodes {
return Nodes{&s}
}
// Slice returns the entries in Nodes as a slice.
// Changes to the slice entries (as in s[i] = n) will be reflected in
// the Nodes.
-func (n Nodes) Slice() []*Node {
+func (n Nodes) Slice() []Node {
if n.slice == nil {
return nil
}
@@ -1135,25 +1135,25 @@ func (n Nodes) Len() int {
// Index returns the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) Index(i int) *Node {
+func (n Nodes) Index(i int) Node {
return (*n.slice)[i]
}
// First returns the first element of Nodes (same as n.Index(0)).
// It panics if n has no elements.
-func (n Nodes) First() *Node {
+func (n Nodes) First() Node {
return (*n.slice)[0]
}
// Second returns the second element of Nodes (same as n.Index(1)).
// It panics if n has fewer than two elements.
-func (n Nodes) Second() *Node {
+func (n Nodes) Second() Node {
return (*n.slice)[1]
}
// Set sets n to a slice.
// This takes ownership of the slice.
-func (n *Nodes) Set(s []*Node) {
+func (n *Nodes) Set(s []Node) {
if len(s) == 0 {
n.slice = nil
} else {
@@ -1166,18 +1166,18 @@ func (n *Nodes) Set(s []*Node) {
}
// Set1 sets n to a slice containing a single node.
-func (n *Nodes) Set1(n1 *Node) {
- n.slice = &[]*Node{n1}
+func (n *Nodes) Set1(n1 Node) {
+ n.slice = &[]Node{n1}
}
// Set2 sets n to a slice containing two nodes.
-func (n *Nodes) Set2(n1, n2 *Node) {
- n.slice = &[]*Node{n1, n2}
+func (n *Nodes) Set2(n1, n2 Node) {
+ n.slice = &[]Node{n1, n2}
}
// Set3 sets n to a slice containing three nodes.
-func (n *Nodes) Set3(n1, n2, n3 *Node) {
- n.slice = &[]*Node{n1, n2, n3}
+func (n *Nodes) Set3(n1, n2, n3 Node) {
+ n.slice = &[]Node{n1, n2, n3}
}
// MoveNodes sets n to the contents of n2, then clears n2.
@@ -1188,35 +1188,35 @@ func (n *Nodes) MoveNodes(n2 *Nodes) {
// SetIndex sets the i'th element of Nodes to node.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) SetIndex(i int, node *Node) {
+func (n Nodes) SetIndex(i int, node Node) {
(*n.slice)[i] = node
}
// SetFirst sets the first element of Nodes to node.
// It panics if n does not have at least one elements.
-func (n Nodes) SetFirst(node *Node) {
+func (n Nodes) SetFirst(node Node) {
(*n.slice)[0] = node
}
// SetSecond sets the second element of Nodes to node.
// It panics if n does not have at least two elements.
-func (n Nodes) SetSecond(node *Node) {
+func (n Nodes) SetSecond(node Node) {
(*n.slice)[1] = node
}
// Addr returns the address of the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) Addr(i int) **Node {
+func (n Nodes) Addr(i int) *Node {
return &(*n.slice)[i]
}
// Append appends entries to Nodes.
-func (n *Nodes) Append(a ...*Node) {
+func (n *Nodes) Append(a ...Node) {
if len(a) == 0 {
return
}
if n.slice == nil {
- s := make([]*Node, len(a))
+ s := make([]Node, len(a))
copy(s, a)
n.slice = &s
return
@@ -1226,7 +1226,7 @@ func (n *Nodes) Append(a ...*Node) {
// Prepend prepends entries to Nodes.
// If a slice is passed in, this will take ownership of it.
-func (n *Nodes) Prepend(a ...*Node) {
+func (n *Nodes) Prepend(a ...Node) {
if len(a) == 0 {
return
}
@@ -1251,7 +1251,7 @@ func (n *Nodes) AppendNodes(n2 *Nodes) {
// inspect invokes f on each node in an AST in depth-first order.
// If f(n) returns false, inspect skips visiting n's children.
-func Inspect(n *Node, f func(*Node) bool) {
+func Inspect(n Node, f func(Node) bool) {
if n == nil || !f(n) {
return
}
@@ -1263,7 +1263,7 @@ func Inspect(n *Node, f func(*Node) bool) {
InspectList(n.Rlist(), f)
}
-func InspectList(l Nodes, f func(*Node) bool) {
+func InspectList(l Nodes, f func(Node) bool) {
for _, n := range l.Slice() {
Inspect(n, f)
}
@@ -1272,7 +1272,7 @@ func InspectList(l Nodes, f func(*Node) bool) {
// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is
// a ready-to-use empty queue.
type NodeQueue struct {
- ring []*Node
+ ring []Node
head, tail int
}
@@ -1282,12 +1282,12 @@ func (q *NodeQueue) Empty() bool {
}
// pushRight appends n to the right of the queue.
-func (q *NodeQueue) PushRight(n *Node) {
+func (q *NodeQueue) PushRight(n Node) {
if len(q.ring) == 0 {
- q.ring = make([]*Node, 16)
+ q.ring = make([]Node, 16)
} else if q.head+len(q.ring) == q.tail {
// Grow the ring.
- nring := make([]*Node, len(q.ring)*2)
+ nring := make([]Node, len(q.ring)*2)
// Copy the old elements.
part := q.ring[q.head%len(q.ring):]
if q.tail-q.head <= len(part) {
@@ -1306,7 +1306,7 @@ func (q *NodeQueue) PushRight(n *Node) {
// popLeft pops a node from the left of the queue. It panics if q is
// empty.
-func (q *NodeQueue) PopLeft() *Node {
+func (q *NodeQueue) PopLeft() Node {
if q.Empty() {
panic("dequeue empty")
}
@@ -1316,25 +1316,25 @@ func (q *NodeQueue) PopLeft() *Node {
}
// NodeSet is a set of Nodes.
-type NodeSet map[*Node]struct{}
+type NodeSet map[Node]struct{}
// Has reports whether s contains n.
-func (s NodeSet) Has(n *Node) bool {
+func (s NodeSet) Has(n Node) bool {
_, isPresent := s[n]
return isPresent
}
// Add adds n to s.
-func (s *NodeSet) Add(n *Node) {
+func (s *NodeSet) Add(n Node) {
if *s == nil {
- *s = make(map[*Node]struct{})
+ *s = make(map[Node]struct{})
}
(*s)[n] = struct{}{}
}
// Sorted returns s sorted according to less.
-func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node {
- var res []*Node
+func (s NodeSet) Sorted(less func(Node, Node) bool) []Node {
+ var res []Node
for n := range s {
res = append(res, n)
}
@@ -1342,16 +1342,16 @@ func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node {
return res
}
-func Nod(op Op, nleft, nright *Node) *Node {
+func Nod(op Op, nleft, nright Node) Node {
return NodAt(base.Pos, op, nleft, nright)
}
-func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node {
- var n *Node
+func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
+ var n Node
switch op {
case ODCLFUNC:
var x struct {
- n Node
+ n node
f Func
}
n = &x.n
@@ -1361,13 +1361,13 @@ func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node {
base.Fatalf("use newname instead")
case OLABEL, OPACK:
var x struct {
- n Node
+ n node
m Name
}
n = &x.n
n.SetName(&x.m)
default:
- n = new(Node)
+ n = new(node)
}
n.SetOp(op)
n.SetLeft(nleft)
@@ -1380,13 +1380,13 @@ func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node {
// newnamel returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting n.Name.Curfn.
-func NewNameAt(pos src.XPos, s *types.Sym) *Node {
+func NewNameAt(pos src.XPos, s *types.Sym) Node {
if s == nil {
base.Fatalf("newnamel nil")
}
var x struct {
- n Node
+ n node
m Name
p Param
}
@@ -1453,14 +1453,14 @@ type SymAndPos struct {
Pos src.XPos // line of call
}
-func AsNode(n types.IRNode) *Node {
+func AsNode(n types.IRNode) Node {
if n == nil {
return nil
}
- return n.(*Node)
+ return n.(Node)
}
-var BlankNode *Node
+var BlankNode Node
// origSym returns the original symbol written by the user.
func OrigSym(s *types.Sym) *types.Sym {
@@ -1489,7 +1489,7 @@ func OrigSym(s *types.Sym) *types.Sym {
// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
// n must be a slice expression. max is nil if n is a simple slice expression.
-func (n *Node) SliceBounds() (low, high, max *Node) {
+func (n *node) SliceBounds() (low, high, max Node) {
if n.List().Len() == 0 {
return nil, nil, nil
}
@@ -1508,7 +1508,7 @@ func (n *Node) SliceBounds() (low, high, max *Node) {
// SetSliceBounds sets n's slice bounds, where n is a slice expression.
// n must be a slice expression. If max is non-nil, n must be a full slice expression.
-func (n *Node) SetSliceBounds(low, high, max *Node) {
+func (n *node) SetSliceBounds(low, high, max Node) {
switch n.Op() {
case OSLICE, OSLICEARR, OSLICESTR:
if max != nil {
@@ -1555,13 +1555,13 @@ func (o Op) IsSlice3() bool {
return false
}
-func IsConst(n *Node, ct constant.Kind) bool {
+func IsConst(n Node, ct constant.Kind) bool {
return ConstType(n) == ct
}
// Int64Val returns n as an int64.
// n must be an integer or rune constant.
-func (n *Node) Int64Val() int64 {
+func (n *node) Int64Val() int64 {
if !IsConst(n, constant.Int) {
base.Fatalf("Int64Val(%v)", n)
}
@@ -1573,7 +1573,7 @@ func (n *Node) Int64Val() int64 {
}
// CanInt64 reports whether it is safe to call Int64Val() on n.
-func (n *Node) CanInt64() bool {
+func (n *node) CanInt64() bool {
if !IsConst(n, constant.Int) {
return false
}
@@ -1586,7 +1586,7 @@ func (n *Node) CanInt64() bool {
// Uint64Val returns n as an uint64.
// n must be an integer or rune constant.
-func (n *Node) Uint64Val() uint64 {
+func (n *node) Uint64Val() uint64 {
if !IsConst(n, constant.Int) {
base.Fatalf("Uint64Val(%v)", n)
}
@@ -1599,7 +1599,7 @@ func (n *Node) Uint64Val() uint64 {
// BoolVal returns n as a bool.
// n must be a boolean constant.
-func (n *Node) BoolVal() bool {
+func (n *node) BoolVal() bool {
if !IsConst(n, constant.Bool) {
base.Fatalf("BoolVal(%v)", n)
}
@@ -1608,7 +1608,7 @@ func (n *Node) BoolVal() bool {
// StringVal returns the value of a literal string Node as a string.
// n must be a string constant.
-func (n *Node) StringVal() string {
+func (n *node) StringVal() string {
if !IsConst(n, constant.String) {
base.Fatalf("StringVal(%v)", n)
}
@@ -1618,14 +1618,14 @@ func (n *Node) StringVal() string {
// rawcopy returns a shallow copy of n.
// Note: copy or sepcopy (rather than rawcopy) is usually the
// correct choice (see comment with Node.copy, below).
-func (n *Node) RawCopy() *Node {
+func (n *node) RawCopy() Node {
copy := *n
return &copy
}
// sepcopy returns a separate shallow copy of n, with the copy's
// Orig pointing to itself.
-func SepCopy(n *Node) *Node {
+func SepCopy(n Node) Node {
n = n.RawCopy()
n.SetOrig(n)
return n
@@ -1638,7 +1638,7 @@ func SepCopy(n *Node) *Node {
// represent the original node anymore.
// (This caused the wrong complit Op to be used when printing error
// messages; see issues #26855, #27765).
-func Copy(n *Node) *Node {
+func Copy(n Node) Node {
copy := n.RawCopy()
if n.Orig() == n {
copy.SetOrig(copy)
@@ -1647,13 +1647,13 @@ func Copy(n *Node) *Node {
}
// isNil reports whether n represents the universal untyped zero value "nil".
-func IsNil(n *Node) bool {
+func IsNil(n Node) bool {
// Check n.Orig because constant propagation may produce typed nil constants,
// which don't exist in the Go spec.
return n.Orig().Op() == ONIL
}
-func IsBlank(n *Node) bool {
+func IsBlank(n Node) bool {
if n == nil {
return false
}
@@ -1662,6 +1662,6 @@ func IsBlank(n *Node) bool {
// IsMethod reports whether n is a method.
// n must be a function or a method.
-func IsMethod(n *Node) bool {
+func IsMethod(n Node) bool {
return n.Type().Recv() != nil
}
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index 1ec89c338d..0a9542fa44 100644
--- a/src/cmd/compile/internal/ir/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -20,10 +20,10 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 136, 248},
- {Name{}, 32, 56},
- {Param{}, 24, 48},
- {Node{}, 76, 128},
+ {Func{}, 152, 280},
+ {Name{}, 44, 80},
+ {Param{}, 44, 88},
+ {node{}, 88, 152},
}
for _, tt := range tests {
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
index 6bcee7c01c..9035e90084 100644
--- a/src/cmd/compile/internal/ir/val.go
+++ b/src/cmd/compile/internal/ir/val.go
@@ -12,7 +12,7 @@ import (
"cmd/compile/internal/types"
)
-func ConstType(n *Node) constant.Kind {
+func ConstType(n Node) constant.Kind {
if n == nil || n.Op() != OLITERAL {
return constant.Unknown
}
@@ -22,7 +22,7 @@ func ConstType(n *Node) constant.Kind {
// ValueInterface returns the constant value stored in n as an interface{}.
// It returns int64s for ints and runes, float64s for floats,
// and complex128s for complex values.
-func ConstValue(n *Node) interface{} {
+func ConstValue(n Node) interface{} {
switch v := n.Val(); v.Kind() {
default:
base.Fatalf("unexpected constant: %v", v)
@@ -91,7 +91,7 @@ func ValidTypeForConst(t *types.Type, v constant.Value) bool {
}
// nodlit returns a new untyped constant with value v.
-func NewLiteral(v constant.Value) *Node {
+func NewLiteral(v constant.Value) Node {
n := Nod(OLITERAL, nil, nil)
if k := v.Kind(); k != constant.Unknown {
n.SetType(idealType(k))
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
index 87e6f5b0c7..bd71b2fcd8 100644
--- a/src/cmd/compile/internal/mips/ssa.go
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -289,7 +289,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index ea22c488aa..bcadebde4e 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -263,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 848f27af84..32e9be8417 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -752,7 +752,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
}
- case *obj.LSym, *ir.Node:
+ case *obj.LSym, ir.Node:
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index a3dc07fe03..c81b6897a6 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -324,7 +324,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 62abbdc223..eeabd81d03 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -139,7 +139,7 @@ type Frontend interface {
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
- Auto(src.XPos, *types.Type) *ir.Node
+ Auto(src.XPos, *types.Type) ir.Node
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 0f1cd4bc9f..f3ef33d670 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -137,9 +137,9 @@ func dse(f *Func) {
// reaches stores then we delete all the stores. The other operations will then
// be eliminated by the dead code elimination pass.
func elimDeadAutosGeneric(f *Func) {
- addr := make(map[*Value]*ir.Node) // values that the address of the auto reaches
- elim := make(map[*Value]*ir.Node) // values that could be eliminated if the auto is
- used := make(map[*ir.Node]bool) // used autos that must be kept
+ addr := make(map[*Value]ir.Node) // values that the address of the auto reaches
+ elim := make(map[*Value]ir.Node) // values that could be eliminated if the auto is
+ used := make(map[ir.Node]bool) // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
@@ -147,7 +147,7 @@ func elimDeadAutosGeneric(f *Func) {
switch v.Op {
case OpAddr, OpLocalAddr:
// Propagate the address if it points to an auto.
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
@@ -158,7 +158,7 @@ func elimDeadAutosGeneric(f *Func) {
return
case OpVarDef, OpVarKill:
// v should be eliminated if we eliminate the auto.
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
@@ -174,7 +174,7 @@ func elimDeadAutosGeneric(f *Func) {
// for open-coded defers from being removed (since they
// may not be used by the inline code, but will be used by
// panic processing).
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
@@ -222,7 +222,7 @@ func elimDeadAutosGeneric(f *Func) {
}
// Propagate any auto addresses through v.
- var node *ir.Node
+ var node ir.Node
for _, a := range args {
if n, ok := addr[a]; ok && !used[n] {
if node == nil {
@@ -299,11 +299,11 @@ func elimUnreadAutos(f *Func) {
// Loop over all ops that affect autos taking note of which
// autos we need and also stores that we might be able to
// eliminate.
- seen := make(map[*ir.Node]bool)
+ seen := make(map[ir.Node]bool)
var stores []*Value
for _, b := range f.Blocks {
for _, v := range b.Values {
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok {
continue
}
@@ -335,7 +335,7 @@ func elimUnreadAutos(f *Func) {
// Eliminate stores to unread autos.
for _, store := range stores {
- n, _ := store.Aux.(*ir.Node)
+ n, _ := store.Aux.(ir.Node)
if seen[n] {
continue
}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index 9de5f427c0..0d660361b1 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -25,7 +25,7 @@ type FuncDebug struct {
// Slots is all the slots used in the debug info, indexed by their SlotID.
Slots []LocalSlot
// The user variables, indexed by VarID.
- Vars []*ir.Node
+ Vars []ir.Node
// The slots that make up each variable, indexed by VarID.
VarSlots [][]SlotID
// The location list data, indexed by VarID. Must be processed by PutLocationList.
@@ -166,7 +166,7 @@ func (s *debugState) logf(msg string, args ...interface{}) {
type debugState struct {
// See FuncDebug.
slots []LocalSlot
- vars []*ir.Node
+ vars []ir.Node
varSlots [][]SlotID
lists [][]byte
@@ -190,7 +190,7 @@ type debugState struct {
// The pending location list entry for each user variable, indexed by VarID.
pendingEntries []pendingEntry
- varParts map[*ir.Node][]SlotID
+ varParts map[ir.Node][]SlotID
blockDebug []BlockDebug
pendingSlotLocs []VarLoc
liveSlots []liveSlot
@@ -347,7 +347,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
}
if state.varParts == nil {
- state.varParts = make(map[*ir.Node][]SlotID)
+ state.varParts = make(map[ir.Node][]SlotID)
} else {
for n := range state.varParts {
delete(state.varParts, n)
@@ -380,7 +380,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op == OpVarDef || v.Op == OpVarKill {
- n := v.Aux.(*ir.Node)
+ n := v.Aux.(ir.Node)
if ir.IsSynthetic(n) {
continue
}
@@ -718,7 +718,7 @@ func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register)
switch {
case v.Op == OpVarDef, v.Op == OpVarKill:
- n := v.Aux.(*ir.Node)
+ n := v.Aux.(ir.Node)
if ir.IsSynthetic(n) {
break
}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 3d142a2272..df83383308 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -69,7 +69,7 @@ type TestFrontend struct {
func (TestFrontend) StringData(s string) *obj.LSym {
return nil
}
-func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Node {
+func (TestFrontend) Auto(pos src.XPos, t *types.Type) ir.Node {
n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
n.SetClass(ir.PAUTO)
return n
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index 2f456c9f89..3dc3a81703 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -60,7 +60,7 @@ func (r *Register) GCNum() int16 {
// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
// parent = &{N: s, Type: string}
type LocalSlot struct {
- N *ir.Node // an ONAME *gc.Node representing a stack location.
+ N ir.Node // an ONAME *gc.Node representing a stack location.
Type *types.Type // type of slot
Off int64 // offset of slot in N
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index 3c1fa600a3..b36f6b97e1 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -236,7 +236,7 @@ func nilcheckelim2(f *Func) {
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
- if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type().HasPointers()) {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(ir.Node).Type().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 9841883939..459a9923f7 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -1249,7 +1249,7 @@ func (s *regAllocState) regalloc(f *Func) {
// This forces later liveness analysis to make the
// value live at this point.
v.SetArg(0, s.makeSpill(a, b))
- } else if _, ok := a.Aux.(*ir.Node); ok && vi.rematerializeable {
+ } else if _, ok := a.Aux.(ir.Node); ok && vi.rematerializeable {
// Rematerializeable value with a gc.Node. This is the address of
// a stack object (e.g. an LEAQ). Keep the object live.
// Change it to VarLive, which is what plive expects for locals.
diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go
index a27002ee3a..60ada011e3 100644
--- a/src/cmd/compile/internal/ssa/sizeof_test.go
+++ b/src/cmd/compile/internal/ssa/sizeof_test.go
@@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
}{
{Value{}, 72, 112},
{Block{}, 164, 304},
- {LocalSlot{}, 28, 40},
+ {LocalSlot{}, 32, 48},
{valState{}, 28, 40},
}
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index eee0a21a66..5257d44cfe 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -157,7 +157,7 @@ func (s *stackAllocState) stackalloc() {
if v.Aux == nil {
f.Fatalf("%s has nil Aux\n", v.LongString())
}
- loc := LocalSlot{N: v.Aux.(*ir.Node), Type: v.Type, Off: v.AuxInt}
+ loc := LocalSlot{N: v.Aux.(ir.Node), Type: v.Type, Off: v.AuxInt}
if f.pass.debug > stackDebug {
fmt.Printf("stackalloc %s to %s\n", v, loc)
}
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
index 1a8b5691ef..e7451381b4 100644
--- a/src/cmd/compile/internal/wasm/ssa.go
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -237,7 +237,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
switch v.Aux.(type) {
case *obj.LSym:
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
default: