aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2020-11-25 12:44:11 -0500
committerRuss Cox <rsc@golang.org>2020-11-25 13:15:41 -0500
commit5c2e14872c70a8ad9fd27033a2451531c6c00c0e (patch)
tree394a86bb4ae567d3e1a19ba67d51049ad504bfa0
parent2c25cd5ba7772a97ee63787e3986b6ec231e8c3d (diff)
parent41f3af9d04362a56c1af186af134c704a03fa97b (diff)
downloadgo-5c2e14872c70a8ad9fd27033a2451531c6c00c0e.tar.gz
go-5c2e14872c70a8ad9fd27033a2451531c6c00c0e.zip
[dev.typeparams] merge dev.regabi 41f3af9d04 into dev.typeparams
This brings in the new ir.Node interface, replacing *gc.Node. Change-Id: I82c623655eee08d77d623babf22ec4d91f9aa3cd
-rw-r--r--doc/diagnostics.html2
-rw-r--r--doc/go1.16.html4
-rw-r--r--src/cmd/cgo/out.go23
-rw-r--r--src/cmd/compile/fmtmap_test.go57
-rw-r--r--src/cmd/compile/internal/amd64/ggen.go3
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go9
-rw-r--r--src/cmd/compile/internal/arm/ssa.go10
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go10
-rw-r--r--src/cmd/compile/internal/base/base.go28
-rw-r--r--src/cmd/compile/internal/base/debug.go194
-rw-r--r--src/cmd/compile/internal/base/flag.go454
-rw-r--r--src/cmd/compile/internal/base/print.go260
-rw-r--r--src/cmd/compile/internal/gc/alg.go384
-rw-r--r--src/cmd/compile/internal/gc/align.go123
-rw-r--r--src/cmd/compile/internal/gc/bexport.go73
-rw-r--r--src/cmd/compile/internal/gc/bimport.go15
-rw-r--r--src/cmd/compile/internal/gc/bootstrap.go7
-rw-r--r--src/cmd/compile/internal/gc/builtin.go219
-rw-r--r--src/cmd/compile/internal/gc/bv.go12
-rw-r--r--src/cmd/compile/internal/gc/closure.go429
-rw-r--r--src/cmd/compile/internal/gc/const.go1349
-rw-r--r--src/cmd/compile/internal/gc/dcl.go662
-rw-r--r--src/cmd/compile/internal/gc/dep_test.go2
-rw-r--r--src/cmd/compile/internal/gc/dwinl.go67
-rw-r--r--src/cmd/compile/internal/gc/embed.go107
-rw-r--r--src/cmd/compile/internal/gc/esc.go472
-rw-r--r--src/cmd/compile/internal/gc/escape.go1260
-rw-r--r--src/cmd/compile/internal/gc/export.go136
-rw-r--r--src/cmd/compile/internal/gc/gen.go50
-rw-r--r--src/cmd/compile/internal/gc/go.go154
-rw-r--r--src/cmd/compile/internal/gc/gsubr.go90
-rw-r--r--src/cmd/compile/internal/gc/iexport.go753
-rw-r--r--src/cmd/compile/internal/gc/iimport.go543
-rw-r--r--src/cmd/compile/internal/gc/init.go45
-rw-r--r--src/cmd/compile/internal/gc/initorder.go167
-rw-r--r--src/cmd/compile/internal/gc/inl.go949
-rw-r--r--src/cmd/compile/internal/gc/lex.go95
-rw-r--r--src/cmd/compile/internal/gc/main.go862
-rw-r--r--src/cmd/compile/internal/gc/mkbuiltin.go13
-rw-r--r--src/cmd/compile/internal/gc/mpfloat.go357
-rw-r--r--src/cmd/compile/internal/gc/mpint.go304
-rw-r--r--src/cmd/compile/internal/gc/noder.go790
-rw-r--r--src/cmd/compile/internal/gc/obj.go285
-rw-r--r--src/cmd/compile/internal/gc/op_string.go175
-rw-r--r--src/cmd/compile/internal/gc/order.go928
-rw-r--r--src/cmd/compile/internal/gc/pgen.go425
-rw-r--r--src/cmd/compile/internal/gc/pgen_test.go170
-rw-r--r--src/cmd/compile/internal/gc/phi.go37
-rw-r--r--src/cmd/compile/internal/gc/plive.go154
-rw-r--r--src/cmd/compile/internal/gc/racewalk.go38
-rw-r--r--src/cmd/compile/internal/gc/range.go378
-rw-r--r--src/cmd/compile/internal/gc/reflect.go458
-rw-r--r--src/cmd/compile/internal/gc/scc.go60
-rw-r--r--src/cmd/compile/internal/gc/scope.go20
-rw-r--r--src/cmd/compile/internal/gc/select.go326
-rw-r--r--src/cmd/compile/internal/gc/sinit.go738
-rw-r--r--src/cmd/compile/internal/gc/ssa.go3267
-rw-r--r--src/cmd/compile/internal/gc/subr.go1028
-rw-r--r--src/cmd/compile/internal/gc/swt.go465
-rw-r--r--src/cmd/compile/internal/gc/trace.go8
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go3029
-rw-r--r--src/cmd/compile/internal/gc/types.go53
-rw-r--r--src/cmd/compile/internal/gc/types_acc.go8
-rw-r--r--src/cmd/compile/internal/gc/universe.go425
-rw-r--r--src/cmd/compile/internal/gc/unsafe.go65
-rw-r--r--src/cmd/compile/internal/gc/util.go68
-rw-r--r--src/cmd/compile/internal/gc/walk.go2745
-rw-r--r--src/cmd/compile/internal/ir/bitset.go (renamed from src/cmd/compile/internal/gc/bitset.go)2
-rw-r--r--src/cmd/compile/internal/ir/class_string.go (renamed from src/cmd/compile/internal/gc/class_string.go)2
-rw-r--r--src/cmd/compile/internal/ir/dump.go (renamed from src/cmd/compile/internal/gc/dump.go)23
-rw-r--r--src/cmd/compile/internal/ir/fmt.go (renamed from src/cmd/compile/internal/gc/fmt.go)974
-rw-r--r--src/cmd/compile/internal/ir/ir.go12
-rw-r--r--src/cmd/compile/internal/ir/node.go (renamed from src/cmd/compile/internal/gc/syntax.go)950
-rw-r--r--src/cmd/compile/internal/ir/op_string.go177
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go (renamed from src/cmd/compile/internal/gc/sizeof_test.go)10
-rw-r--r--src/cmd/compile/internal/ir/val.go120
-rw-r--r--src/cmd/compile/internal/mips/ggen.go5
-rw-r--r--src/cmd/compile/internal/mips/ssa.go10
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go10
-rw-r--r--src/cmd/compile/internal/ppc64/ggen.go9
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go12
-rw-r--r--src/cmd/compile/internal/riscv64/ggen.go3
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go18
-rw-r--r--src/cmd/compile/internal/s390x/ggen.go3
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go7
-rw-r--r--src/cmd/compile/internal/ssa/config.go21
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go29
-rw-r--r--src/cmd/compile/internal/ssa/debug.go21
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go2
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go108
-rw-r--r--src/cmd/compile/internal/ssa/location.go3
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go3
-rw-r--r--src/cmd/compile/internal/ssa/poset.go54
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go5
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go3
-rw-r--r--src/cmd/compile/internal/syntax/dumper_test.go2
-rw-r--r--src/cmd/compile/internal/syntax/nodes.go2
-rw-r--r--src/cmd/compile/internal/syntax/printer_test.go2
-rw-r--r--src/cmd/compile/internal/types/scope.go8
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go6
-rw-r--r--src/cmd/compile/internal/types/sym.go4
-rw-r--r--src/cmd/compile/internal/types/type.go64
-rw-r--r--src/cmd/compile/internal/wasm/ssa.go10
-rw-r--r--src/cmd/compile/internal/x86/galign.go5
-rw-r--r--src/cmd/compile/internal/x86/ssa.go13
-rw-r--r--src/cmd/compile/main.go3
-rw-r--r--src/cmd/dist/buildtool.go4
-rw-r--r--src/cmd/go/internal/base/flag.go2
-rw-r--r--src/cmd/go/internal/fsys/fsys.go16
-rw-r--r--src/cmd/go/internal/lockedfile/lockedfile_filelock.go3
-rw-r--r--src/cmd/go/internal/lockedfile/lockedfile_plan9.go6
-rw-r--r--src/cmd/go/internal/modcmd/vendor.go3
-rw-r--r--src/cmd/go/internal/modget/get.go147
-rw-r--r--src/cmd/go/internal/modload/build.go2
-rw-r--r--src/cmd/go/internal/modload/buildlist.go12
-rw-r--r--src/cmd/go/internal/modload/import.go4
-rw-r--r--src/cmd/go/internal/modload/init.go14
-rw-r--r--src/cmd/go/internal/modload/load.go4
-rw-r--r--src/cmd/go/internal/modload/modfile.go6
-rw-r--r--src/cmd/go/internal/modload/mvs.go11
-rw-r--r--src/cmd/go/internal/modload/mvs_test.go8
-rw-r--r--src/cmd/go/internal/search/search.go2
-rw-r--r--src/cmd/go/internal/work/build.go12
-rw-r--r--src/cmd/go/testdata/mod/example.com_retract_incompatible_v1.0.0.txt19
-rw-r--r--src/cmd/go/testdata/mod/example.com_retract_incompatible_v2.0.0+incompatible.txt9
-rw-r--r--src/cmd/go/testdata/script/mod_get_changes.txt70
-rw-r--r--src/cmd/go/testdata/script/mod_get_retract.txt1
-rw-r--r--src/cmd/go/testdata/script/mod_gonoproxy.txt6
-rw-r--r--src/cmd/go/testdata/script/mod_overlay.txt254
-rw-r--r--src/cmd/go/testdata/script/mod_retract_incompatible.txt15
-rw-r--r--src/cmd/internal/pkgpath/pkgpath.go68
-rw-r--r--src/cmd/internal/pkgpath/pkgpath_test.go22
-rw-r--r--src/go/constant/value.go40
-rw-r--r--src/go/constant/value_test.go21
-rw-r--r--src/internal/poll/copy_file_range_linux.go1
-rw-r--r--src/net/sock_linux.go1
-rw-r--r--src/runtime/metrics/description.go9
-rw-r--r--src/runtime/metrics/doc.go7
-rw-r--r--src/strconv/bytealg.go14
-rw-r--r--src/strconv/bytealg_bootstrap.go17
-rw-r--r--src/strconv/eisel_lemire.go16
-rw-r--r--src/strconv/quote.go6
-rw-r--r--src/sync/atomic/doc.go11
-rw-r--r--src/sync/once.go2
-rw-r--r--src/syscall/dll_windows.go2
-rw-r--r--test/const2.go11
-rw-r--r--test/fixedbugs/bug340.go3
-rw-r--r--test/fixedbugs/issue20232.go5
-rw-r--r--test/fixedbugs/issue42727.go23
-rw-r--r--test/fixedbugs/issue42753.go13
-rw-r--r--test/fixedbugs/issue42790.go9
151 files changed, 15225 insertions, 15317 deletions
diff --git a/doc/diagnostics.html b/doc/diagnostics.html
index f9368886c4..438cdce45f 100644
--- a/doc/diagnostics.html
+++ b/doc/diagnostics.html
@@ -455,7 +455,7 @@ environmental variable is set accordingly.</p>
each collection, summarizing the amount of memory collected
and the length of the pause.</li>
<li>GODEBUG=inittrace=1 prints a summary of execution time and memory allocation
-information for completed package initilization work.</li>
+information for completed package initialization work.</li>
<li>GODEBUG=schedtrace=X prints scheduling events every X milliseconds.</li>
</ul>
diff --git a/doc/go1.16.html b/doc/go1.16.html
index a2f39893be..92cadff713 100644
--- a/doc/go1.16.html
+++ b/doc/go1.16.html
@@ -501,6 +501,10 @@ Do not send CLs removing the interior tags from such phrases.
<p><!-- CL 261917 -->
<a href="/pkg/syscall/#SysProcAttr"><code>SysProcAttr</code></a> on Windows has a new NoInheritHandles field that disables inheriting handles when creating a new process.
</p>
+
+ <p><!-- CL 269761, golang.org/issue/42584 -->
+ <a href="/pkg/syscall/#DLLError"><code>DLLError</code></a> on Windows now has an Unwrap function for unwrapping its underlying error.
+ </p>
</dd>
</dl><!-- syscall -->
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index bb963799f6..11c53facf8 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -186,7 +186,7 @@ func (p *Package) writeDefs() {
panic(fmt.Errorf("invalid var kind %q", n.Kind))
}
if *gccgo {
- fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, n.Mangle)
+ fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, gccgoToSymbol(n.Mangle))
fmt.Fprintf(&gccgoInit, "\t%s = &%s;\n", n.Mangle, n.C)
fmt.Fprintf(fc, "\n")
}
@@ -1148,7 +1148,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
// will not be able to link against it from the C
// code.
goName := "Cgoexp_" + exp.ExpName
- fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
+ fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, gccgoToSymbol(goName))
fmt.Fprint(fgcc, "\n")
fmt.Fprint(fgcc, "\nCGO_NO_SANITIZE_THREAD\n")
@@ -1182,7 +1182,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprint(fgcc, "}\n")
// Dummy declaration for _cgo_main.c
- fmt.Fprintf(fm, `char %s[1] __asm__("%s.%s");`, goName, gccgoSymbolPrefix, goName)
+ fmt.Fprintf(fm, `char %s[1] __asm__("%s.%s");`, goName, gccgoSymbolPrefix, gccgoToSymbol(goName))
fmt.Fprint(fm, "\n")
// For gccgo we use a wrapper function in Go, in order
@@ -1266,9 +1266,8 @@ func (p *Package) writeExportHeader(fgcch io.Writer) {
fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog())
}
-// gccgoPkgpathToSymbol converts a package path to a mangled packagepath
-// symbol.
-func gccgoPkgpathToSymbol(ppath string) string {
+// gccgoToSymbol converts a name to a mangled symbol for gccgo.
+func gccgoToSymbol(ppath string) string {
if gccgoMangler == nil {
var err error
cmd := os.Getenv("GCCGO")
@@ -1293,12 +1292,12 @@ func (p *Package) gccgoSymbolPrefix() string {
}
if *gccgopkgpath != "" {
- return gccgoPkgpathToSymbol(*gccgopkgpath)
+ return gccgoToSymbol(*gccgopkgpath)
}
if *gccgoprefix == "" && p.PackageName == "main" {
return "main"
}
- prefix := gccgoPkgpathToSymbol(*gccgoprefix)
+ prefix := gccgoToSymbol(*gccgoprefix)
if prefix == "" {
prefix = "go"
}
@@ -1687,8 +1686,12 @@ void _cgoPREFIX_Cfunc__Cmalloc(void *v) {
`
func (p *Package) cPrologGccgo() string {
- return strings.Replace(strings.Replace(cPrologGccgo, "PREFIX", cPrefix, -1),
- "GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(), -1)
+ r := strings.NewReplacer(
+ "PREFIX", cPrefix,
+ "GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(),
+ "_cgoCheckPointer", gccgoToSymbol("_cgoCheckPointer"),
+ "_cgoCheckResult", gccgoToSymbol("_cgoCheckResult"))
+ return r.Replace(cPrologGccgo)
}
const cPrologGccgo = `
diff --git a/src/cmd/compile/fmtmap_test.go b/src/cmd/compile/fmtmap_test.go
index a7c1eca9db..3b94b05bfb 100644
--- a/src/cmd/compile/fmtmap_test.go
+++ b/src/cmd/compile/fmtmap_test.go
@@ -22,16 +22,7 @@ package main_test
var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
"*cmd/compile/internal/gc.EscLocation %v": "",
- "*cmd/compile/internal/gc.Mpflt %v": "",
- "*cmd/compile/internal/gc.Mpint %v": "",
- "*cmd/compile/internal/gc.Node %#v": "",
- "*cmd/compile/internal/gc.Node %+S": "",
- "*cmd/compile/internal/gc.Node %+v": "",
- "*cmd/compile/internal/gc.Node %L": "",
- "*cmd/compile/internal/gc.Node %S": "",
- "*cmd/compile/internal/gc.Node %j": "",
- "*cmd/compile/internal/gc.Node %p": "",
- "*cmd/compile/internal/gc.Node %v": "",
+ "*cmd/compile/internal/ir.node %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
@@ -54,7 +45,6 @@ var knownFormats = map[string]string{
"*cmd/compile/internal/types.Sym %v": "",
"*cmd/compile/internal/types.Type %#L": "",
"*cmd/compile/internal/types.Type %#v": "",
- "*cmd/compile/internal/types.Type %+v": "",
"*cmd/compile/internal/types.Type %-S": "",
"*cmd/compile/internal/types.Type %0S": "",
"*cmd/compile/internal/types.Type %L": "",
@@ -84,9 +74,7 @@ var knownFormats = map[string]string{
"*cmd/internal/obj.Addr %v": "",
"*cmd/internal/obj.LSym %v": "",
"*math/big.Float %f": "",
- "*math/big.Int %#x": "",
"*math/big.Int %s": "",
- "*math/big.Int %v": "",
"[16]byte %x": "",
"[]*cmd/compile/internal/ssa.Block %v": "",
"[]*cmd/compile/internal/ssa.Value %v": "",
@@ -110,27 +98,28 @@ var knownFormats = map[string]string{
"byte %q": "",
"byte %v": "",
"cmd/compile/internal/arm.shift %d": "",
- "cmd/compile/internal/gc.Class %d": "",
- "cmd/compile/internal/gc.Class %s": "",
- "cmd/compile/internal/gc.Class %v": "",
- "cmd/compile/internal/gc.Ctype %d": "",
- "cmd/compile/internal/gc.Ctype %v": "",
- "cmd/compile/internal/gc.Nodes %#v": "",
- "cmd/compile/internal/gc.Nodes %+v": "",
- "cmd/compile/internal/gc.Nodes %.v": "",
- "cmd/compile/internal/gc.Nodes %v": "",
- "cmd/compile/internal/gc.Op %#v": "",
- "cmd/compile/internal/gc.Op %v": "",
- "cmd/compile/internal/gc.Val %#v": "",
- "cmd/compile/internal/gc.Val %T": "",
- "cmd/compile/internal/gc.Val %v": "",
- "cmd/compile/internal/gc.fmtMode %d": "",
"cmd/compile/internal/gc.initKind %d": "",
"cmd/compile/internal/gc.itag %v": "",
"cmd/compile/internal/importer.itag %v": "",
+ "cmd/compile/internal/ir.Class %d": "",
+ "cmd/compile/internal/ir.Class %v": "",
+ "cmd/compile/internal/ir.FmtMode %d": "",
+ "cmd/compile/internal/ir.Node %#v": "",
+ "cmd/compile/internal/ir.Node %+S": "",
+ "cmd/compile/internal/ir.Node %+v": "",
+ "cmd/compile/internal/ir.Node %L": "",
+ "cmd/compile/internal/ir.Node %S": "",
+ "cmd/compile/internal/ir.Node %j": "",
+ "cmd/compile/internal/ir.Node %p": "",
+ "cmd/compile/internal/ir.Node %v": "",
+ "cmd/compile/internal/ir.Nodes %#v": "",
+ "cmd/compile/internal/ir.Nodes %+v": "",
+ "cmd/compile/internal/ir.Nodes %.v": "",
+ "cmd/compile/internal/ir.Nodes %v": "",
+ "cmd/compile/internal/ir.Op %#v": "",
+ "cmd/compile/internal/ir.Op %v": "",
"cmd/compile/internal/ssa.BranchPrediction %d": "",
"cmd/compile/internal/ssa.Edge %v": "",
- "cmd/compile/internal/ssa.GCNode %v": "",
"cmd/compile/internal/ssa.ID %d": "",
"cmd/compile/internal/ssa.ID %v": "",
"cmd/compile/internal/ssa.LocalSlot %s": "",
@@ -179,9 +168,11 @@ var knownFormats = map[string]string{
"error %v": "",
"float64 %.2f": "",
"float64 %.3f": "",
- "float64 %.6g": "",
"float64 %g": "",
+ "go/constant.Kind %v": "",
+ "go/constant.Value %#v": "",
"go/constant.Value %s": "",
+ "go/constant.Value %v": "",
"int %#x": "",
"int %-12d": "",
"int %-6d": "",
@@ -199,7 +190,6 @@ var knownFormats = map[string]string{
"int32 %v": "",
"int32 %x": "",
"int64 %#x": "",
- "int64 %+d": "",
"int64 %-10d": "",
"int64 %.5d": "",
"int64 %d": "",
@@ -214,13 +204,14 @@ var knownFormats = map[string]string{
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",
- "map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
- "map[*cmd/compile/internal/gc.Node][]*cmd/compile/internal/gc.Node %v": "",
"map[*cmd/compile/internal/types2.TypeParam]cmd/compile/internal/types2.Type %s": "",
+ "map[cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "",
+ "map[cmd/compile/internal/ir.Node][]cmd/compile/internal/ir.Node %v": "",
"map[cmd/compile/internal/ssa.ID]uint32 %v": "",
"map[int64]uint32 %v": "",
"math/big.Accuracy %s": "",
"reflect.Type %s": "",
+ "reflect.Type %v": "",
"rune %#U": "",
"rune %c": "",
"rune %q": "",
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 0c1456f4d0..ec98b8cca1 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -5,6 +5,7 @@
package amd64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/x86"
@@ -64,7 +65,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
if cnt%int64(gc.Widthreg) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
- gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 5ff05a0edd..5e3b962076 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
@@ -975,7 +976,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
- if x86.CanUse1InsnTLS(gc.Ctxt) {
+ if x86.CanUse1InsnTLS(base.Ctxt) {
// MOVQ (TLS), r
p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
@@ -1017,7 +1018,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p := s.Prog(mov)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+ p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -1164,8 +1165,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := s.Prog(v.Op.Asm())
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 765a771546..b34e2973b2 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -9,7 +9,9 @@ import (
"math"
"math/bits"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@@ -544,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@@ -741,8 +743,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARMLoweredZero:
// MOVW.P Rarg2, 4(R1)
@@ -849,7 +851,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 22b28a9308..d5bd9687cf 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -7,7 +7,9 @@ package arm64
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@@ -394,7 +396,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@@ -1038,8 +1040,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpARM64Equal,
ssa.OpARM64NotEqual,
@@ -1068,7 +1070,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
new file mode 100644
index 0000000000..e26b378472
--- /dev/null
+++ b/src/cmd/compile/internal/base/base.go
@@ -0,0 +1,28 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "os"
+
+ "cmd/internal/obj"
+)
+
+var Ctxt *obj.Link
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+ for i := len(atExitFuncs) - 1; i >= 0; i-- {
+ f := atExitFuncs[i]
+ atExitFuncs = atExitFuncs[:i]
+ f()
+ }
+ os.Exit(code)
+}
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
new file mode 100644
index 0000000000..45a552a4d9
--- /dev/null
+++ b/src/cmd/compile/internal/base/debug.go
@@ -0,0 +1,194 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Debug arguments, set by -d flag.
+
+package base
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "cmd/internal/objabi"
+)
+
+// Debug holds the parsed debugging configuration values.
+var Debug = DebugFlags{
+ Fieldtrack: &objabi.Fieldtrack_enabled,
+}
+
+// DebugFlags defines the debugging configuration values (see var Debug).
+// Each struct field is a different value, named for the lower-case of the field name.
+// Each field must be an int or string and must have a `help` struct tag.
+//
+// The -d option takes a comma-separated list of settings.
+// Each setting is name=value; for ints, name is short for name=1.
+type DebugFlags struct {
+ Append int `help:"print information about append compilation"`
+ Checkptr int `help:"instrument unsafe pointer conversions"`
+ Closure int `help:"print information about closure compilation"`
+ CompileLater int `help:"compile functions as late as possible"`
+ DclStack int `help:"run internal dclstack check"`
+ Defer int `help:"print information about defer compilation"`
+ DisableNil int `help:"disable nil checks"`
+ DumpPtrs int `help:"show Node pointers values in dump output"`
+ DwarfInl int `help:"print information about DWARF inlined function creation"`
+ Export int `help:"print export data"`
+ Fieldtrack *int `help:"enable field tracking"`
+ GCProg int `help:"print dump of GC programs"`
+ Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
+ LocationLists int `help:"print information about DWARF location list creation"`
+ Nil int `help:"print information about nil checks"`
+ PCTab string `help:"print named pc-value table"`
+ Panic int `help:"show all compiler panics"`
+ Slice int `help:"print information about slice compilation"`
+ SoftFloat int `help:"force compiler to emit soft-float code"`
+ TypeAssert int `help:"print information about type assertion inlining"`
+ TypecheckInl int `help:"eager typechecking of inline function bodies"`
+ WB int `help:"print information about write barriers"`
+
+ any bool // set when any of the values have been set
+}
+
+// Any reports whether any of the debug flags have been set.
+func (d *DebugFlags) Any() bool { return d.any }
+
+type debugField struct {
+ name string
+ help string
+ val interface{} // *int or *string
+}
+
+var debugTab []debugField
+
+func init() {
+ v := reflect.ValueOf(&Debug).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Name == "any" {
+ continue
+ }
+ name := strings.ToLower(f.Name)
+ help := f.Tag.Get("help")
+ if help == "" {
+ panic(fmt.Sprintf("base.Debug.%s is missing help text", f.Name))
+ }
+ ptr := v.Field(i).Addr().Interface()
+ switch ptr.(type) {
+ default:
+ panic(fmt.Sprintf("base.Debug.%s has invalid type %v (must be int or string)", f.Name, f.Type))
+ case *int, *string:
+ // ok
+ case **int:
+ ptr = *ptr.(**int) // record the *int itself
+ }
+ debugTab = append(debugTab, debugField{name, help, ptr})
+ }
+}
+
+// DebugSSA is called to set a -d ssa/... option.
+// If nil, those options are reported as invalid options.
+// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
+var DebugSSA func(phase, flag string, val int, valString string) string
+
+// parseDebug parses the -d debug string argument.
+func parseDebug(debugstr string) {
+ // parse -d argument
+ if debugstr == "" {
+ return
+ }
+ Debug.any = true
+Split:
+ for _, name := range strings.Split(debugstr, ",") {
+ if name == "" {
+ continue
+ }
+ // display help about the -d option itself and quit
+ if name == "help" {
+ fmt.Print(debugHelpHeader)
+ maxLen := len("ssa/help")
+ for _, t := range debugTab {
+ if len(t.name) > maxLen {
+ maxLen = len(t.name)
+ }
+ }
+ for _, t := range debugTab {
+ fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
+ }
+ // ssa options have their own help
+ fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
+ fmt.Print(debugHelpFooter)
+ os.Exit(0)
+ }
+ val, valstring, haveInt := 1, "", true
+ if i := strings.IndexAny(name, "=:"); i >= 0 {
+ var err error
+ name, valstring = name[:i], name[i+1:]
+ val, err = strconv.Atoi(valstring)
+ if err != nil {
+ val, haveInt = 1, false
+ }
+ }
+ for _, t := range debugTab {
+ if t.name != name {
+ continue
+ }
+ switch vp := t.val.(type) {
+ case nil:
+ // Ignore
+ case *string:
+ *vp = valstring
+ case *int:
+ if !haveInt {
+ log.Fatalf("invalid debug value %v", name)
+ }
+ *vp = val
+ default:
+ panic("bad debugtab type")
+ }
+ continue Split
+ }
+ // special case for ssa for now
+ if DebugSSA != nil && strings.HasPrefix(name, "ssa/") {
+ // expect form ssa/phase/flag
+ // e.g. -d=ssa/generic_cse/time
+ // _ in phase name also matches space
+ phase := name[4:]
+ flag := "debug" // default flag is debug
+ if i := strings.Index(phase, "/"); i >= 0 {
+ flag = phase[i+1:]
+ phase = phase[:i]
+ }
+ err := DebugSSA(phase, flag, val, valstring)
+ if err != "" {
+ log.Fatalf(err)
+ }
+ continue Split
+ }
+ log.Fatalf("unknown debug key -d %s\n", name)
+ }
+}
+
+const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
+
+<key> is one of:
+
+`
+
+const debugHelpFooter = `
+<value> is key-specific.
+
+Key "checkptr" supports values:
+ "0": instrumentation disabled
+ "1": conversions involving unsafe.Pointer are instrumented
+ "2": conversions to unsafe.Pointer force heap allocation
+
+Key "pctab" supports values:
+ "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
+`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
new file mode 100644
index 0000000000..240258d6b8
--- /dev/null
+++ b/src/cmd/compile/internal/base/flag.go
@@ -0,0 +1,454 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
+ objabi.Flagprint(os.Stderr)
+ Exit(2)
+}
+
+// Flag holds the parsed command-line flags.
+// See ParseFlag for non-zero defaults.
+var Flag CmdFlags
+
+// A CountFlag is a counting integer flag.
+// It accepts -name=value to set the value directly,
+// but it also accepts -name with no =value to increment the count.
+type CountFlag int
+
+// CmdFlags defines the command-line flags (see var Flag).
+// Each struct field is a different flag, by default named for the lower-case of the field name.
+// If the flag name is a single letter, the default flag name is left upper-case.
+// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
+//
+// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
+// but this should be done only in exceptional circumstances: it helps everyone if the flag name
+// is obvious from the field name when the flag is used elsewhere in the compiler sources.
+// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
+//
+// Each field must have a `help` struct tag giving the flag help message.
+//
+// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
+// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
+type CmdFlags struct {
+ // Single letters
+ B CountFlag "help:\"disable bounds checking\""
+ C CountFlag "help:\"disable printing of columns in error messages\""
+ D string "help:\"set relative `path` for local imports\""
+ E CountFlag "help:\"debug symbol export\""
+ G CountFlag "help:\"accept generic code\""
+ I func(string) "help:\"add `directory` to import search path\""
+ K CountFlag "help:\"debug missing line numbers\""
+ L CountFlag "help:\"show full file names in error messages\""
+ N CountFlag "help:\"disable optimizations\""
+ S CountFlag "help:\"print assembly listing\""
+ // V is added by objabi.AddVersionFlag
+ W CountFlag "help:\"debug parse tree after type checking\""
+
+ LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
+ LowerD func(string) "help:\"enable debugging settings; try -d help\""
+ LowerE CountFlag "help:\"no limit on number of errors reported\""
+ LowerH CountFlag "help:\"halt on error\""
+ LowerJ CountFlag "help:\"debug runtime-initialized variables\""
+ LowerL CountFlag "help:\"disable inlining\""
+ LowerM CountFlag "help:\"print optimization decisions\""
+ LowerO string "help:\"write output to `file`\""
+ LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
+ LowerR CountFlag "help:\"debug generated wrappers\""
+ LowerT bool "help:\"enable tracing for debugging the compiler\""
+ LowerW CountFlag "help:\"debug type checking\""
+ LowerV *bool "help:\"increase debug verbosity\""
+
+ // Special characters
+ Percent int "flag:\"%\" help:\"debug non-static initializers\""
+ CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
+
+ // Longer names
+ AsmHdr string "help:\"write assembly header to `file`\""
+ Bench string "help:\"append benchmark times to `file`\""
+ BlockProfile string "help:\"write block profile to `file`\""
+ BuildID string "help:\"record `id` as the build id in the export metadata\""
+ CPUProfile string "help:\"write cpu profile to `file`\""
+ Complete bool "help:\"compiling complete package (no C or assembly)\""
+ Dwarf bool "help:\"generate DWARF symbols\""
+ DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
+ DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
+ Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
+ EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
+ GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
+ GoVersion string "help:\"required version of the runtime\""
+ ImportCfg func(string) "help:\"read import configuration from `file`\""
+ ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
+ InstallSuffix string "help:\"set pkg directory `suffix`\""
+ JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
+ Lang string "help:\"Go language version source code expects\""
+ LinkObj string "help:\"write linker-specific object to `file`\""
+ LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
+ Live CountFlag "help:\"debug liveness analysis\""
+ MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
+ MemProfile string "help:\"write memory profile to `file`\""
+ MemProfileRate int64 "help:\"set runtime.MemProfileRate to `rate`\""
+ MutexProfile string "help:\"write mutex profile to `file`\""
+ NoLocalImports bool "help:\"reject local (relative) imports\""
+ Pack bool "help:\"write to file.a instead of file.o\""
+ Race bool "help:\"enable race detector\""
+ Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
+ SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
+ Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
+ Std bool "help:\"compiling standard library\""
+ SymABIs string "help:\"read symbol ABIs from `file`\""
+ TraceProfile string "help:\"write an execution trace to `file`\""
+ TrimPath string "help:\"remove `prefix` from recorded source file paths\""
+ WB bool "help:\"enable write barrier\"" // TODO: remove
+
+ // Configuration derived from flags; not a flag itself.
+ Cfg struct {
+ Embed struct { // set by -embedcfg
+ Patterns map[string][]string
+ Files map[string]string
+ }
+ ImportDirs []string // appended to by -I
+ ImportMap map[string]string // set by -importmap OR -importcfg
+ PackageFile map[string]string // set by -importcfg; nil means not in use
+ SpectreIndex bool // set by -spectre=index or -spectre=all
+ }
+}
+
+// ParseFlags parses the command-line flags into Flag.
+func ParseFlags() {
+ Flag.I = addImportDir
+
+ Flag.LowerC = 1
+ Flag.LowerD = parseDebug
+ Flag.LowerP = &Ctxt.Pkgpath
+ Flag.LowerV = &Ctxt.Debugvlog
+
+ Flag.Dwarf = objabi.GOARCH != "wasm"
+ Flag.DwarfBASEntries = &Ctxt.UseBASEntries
+ Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
+ *Flag.DwarfLocationLists = true
+ Flag.Dynlink = &Ctxt.Flag_dynlink
+ Flag.EmbedCfg = readEmbedCfg
+ Flag.GenDwarfInl = 2
+ Flag.ImportCfg = readImportCfg
+ Flag.ImportMap = addImportMap
+ Flag.LinkShared = &Ctxt.Flag_linkshared
+ Flag.Shared = &Ctxt.Flag_shared
+ Flag.WB = true
+
+ Flag.Cfg.ImportMap = make(map[string]string)
+
+ objabi.AddVersionFlag() // -V
+ registerFlags()
+ objabi.Flagparse(usage)
+
+ if Flag.MSan && !sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
+ log.Fatalf("%s/%s does not support -msan", objabi.GOOS, objabi.GOARCH)
+ }
+ if Flag.Race && !sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
+ log.Fatalf("%s/%s does not support -race", objabi.GOOS, objabi.GOARCH)
+ }
+ if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
+ log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
+ }
+ parseSpectre(Flag.Spectre) // left as string for recordFlags
+
+ Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
+ Ctxt.Flag_optimize = Flag.N == 0
+ Ctxt.Debugasm = int(Flag.S)
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+
+ if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
+ fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
+ Exit(2)
+ }
+
+ if Flag.LowerO == "" {
+ p := flag.Arg(0)
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if runtime.GOOS == "windows" {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ if i := strings.LastIndex(p, "."); i >= 0 {
+ p = p[:i]
+ }
+ suffix := ".o"
+ if Flag.Pack {
+ suffix = ".a"
+ }
+ Flag.LowerO = p + suffix
+ }
+
+ if Flag.Race && Flag.MSan {
+ log.Fatal("cannot use both -race and -msan")
+ }
+ if Flag.Race || Flag.MSan {
+ // -race and -msan imply -d=checkptr for now.
+ Debug.Checkptr = 1
+ }
+
+ if Flag.CompilingRuntime && Flag.N != 0 {
+ log.Fatal("cannot disable optimizations while compiling runtime")
+ }
+ if Flag.LowerC < 1 {
+ log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
+ }
+ if Flag.LowerC > 1 && !concurrentBackendAllowed() {
+ log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
+ }
+
+ if Flag.CompilingRuntime {
+ // Runtime can't use -d=checkptr, at least not yet.
+ Debug.Checkptr = 0
+
+ // Fuzzing the runtime isn't interesting either.
+ Debug.Libfuzzer = 0
+ }
+
+ // set via a -d flag
+ Ctxt.Debugpcln = Debug.PCTab
+}
+
+// registerFlags adds flag registrations for all the fields in Flag.
+// See the comment on type CmdFlags for the rules.
+func registerFlags() {
+ var (
+ boolType = reflect.TypeOf(bool(false))
+ intType = reflect.TypeOf(int(0))
+ stringType = reflect.TypeOf(string(""))
+ ptrBoolType = reflect.TypeOf(new(bool))
+ ptrIntType = reflect.TypeOf(new(int))
+ ptrStringType = reflect.TypeOf(new(string))
+ countType = reflect.TypeOf(CountFlag(0))
+ funcType = reflect.TypeOf((func(string))(nil))
+ )
+
+ v := reflect.ValueOf(&Flag).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Name == "Cfg" {
+ continue
+ }
+
+ var name string
+ if len(f.Name) == 1 {
+ name = f.Name
+ } else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
+ name = string(rune(f.Name[5] + 'a' - 'A'))
+ } else {
+ name = strings.ToLower(f.Name)
+ }
+ if tag := f.Tag.Get("flag"); tag != "" {
+ name = tag
+ }
+
+ help := f.Tag.Get("help")
+ if help == "" {
+ panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
+ }
+
+ if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
+ panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
+ }
+
+ switch f.Type {
+ case boolType:
+ p := v.Field(i).Addr().Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case intType:
+ p := v.Field(i).Addr().Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case stringType:
+ p := v.Field(i).Addr().Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case ptrBoolType:
+ p := v.Field(i).Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case ptrIntType:
+ p := v.Field(i).Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case ptrStringType:
+ p := v.Field(i).Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case countType:
+ p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
+ objabi.Flagcount(name, help, p)
+ case funcType:
+ f := v.Field(i).Interface().(func(string))
+ objabi.Flagfn1(name, help, f)
+ }
+ }
+}
+
+// concurrentFlagOk reports whether the current compiler flags
+// are compatible with concurrent compilation.
+func concurrentFlagOk() bool {
+ // TODO(rsc): Many of these are fine. Remove them.
+ return Flag.Percent == 0 &&
+ Flag.E == 0 &&
+ Flag.K == 0 &&
+ Flag.L == 0 &&
+ Flag.LowerH == 0 &&
+ Flag.LowerJ == 0 &&
+ Flag.LowerM == 0 &&
+ Flag.LowerR == 0
+}
+
+func concurrentBackendAllowed() bool {
+ if !concurrentFlagOk() {
+ return false
+ }
+
+ // Debug.S by itself is ok, because all printing occurs
+ // while writing the object file, and that is non-concurrent.
+ // Adding Debug_vlog, however, causes Debug.S to also print
+ // while flushing the plist, which happens concurrently.
+ if Ctxt.Debugvlog || Debug.Any() || Flag.Live > 0 {
+ return false
+ }
+ // TODO: Test and delete this condition.
+ if objabi.Fieldtrack_enabled != 0 {
+ return false
+ }
+ // TODO: fix races and enable the following flags
+ if Ctxt.Flag_shared || Ctxt.Flag_dynlink || Flag.Race {
+ return false
+ }
+ return true
+}
+
+func addImportDir(dir string) {
+ if dir != "" {
+ Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
+ }
+}
+
+func addImportMap(s string) {
+ if Flag.Cfg.ImportMap == nil {
+ Flag.Cfg.ImportMap = make(map[string]string)
+ }
+ if strings.Count(s, "=") != 1 {
+ log.Fatal("-importmap argument must be of the form source=actual")
+ }
+ i := strings.Index(s, "=")
+ source, actual := s[:i], s[i+1:]
+ if source == "" || actual == "" {
+ log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
+ }
+ Flag.Cfg.ImportMap[source] = actual
+}
+
+func readImportCfg(file string) {
+ if Flag.Cfg.ImportMap == nil {
+ Flag.Cfg.ImportMap = make(map[string]string)
+ }
+ Flag.Cfg.PackageFile = map[string]string{}
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-importcfg: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ var verb, args string
+ if i := strings.Index(line, " "); i < 0 {
+ verb = line
+ } else {
+ verb, args = line[:i], strings.TrimSpace(line[i+1:])
+ }
+ var before, after string
+ if i := strings.Index(args, "="); i >= 0 {
+ before, after = args[:i], args[i+1:]
+ }
+ switch verb {
+ default:
+ log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
+ case "importmap":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+ }
+ Flag.Cfg.ImportMap[before] = after
+ case "packagefile":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+ }
+ Flag.Cfg.PackageFile[before] = after
+ }
+ }
+}
+
+func readEmbedCfg(file string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-embedcfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
+ log.Fatalf("%s: %v", file, err)
+ }
+ if Flag.Cfg.Embed.Patterns == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+ }
+ if Flag.Cfg.Embed.Files == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Files", file)
+ }
+}
+
+// parseSpectre parses the spectre configuration from the string s.
+func parseSpectre(s string) {
+ for _, f := range strings.Split(s, ",") {
+ f = strings.TrimSpace(f)
+ switch f {
+ default:
+ log.Fatalf("unknown setting -spectre=%s", f)
+ case "":
+ // nothing
+ case "all":
+ Flag.Cfg.SpectreIndex = true
+ Ctxt.Retpoline = true
+ case "index":
+ Flag.Cfg.SpectreIndex = true
+ case "ret":
+ Ctxt.Retpoline = true
+ }
+ }
+
+ if Flag.Cfg.SpectreIndex {
+ switch objabi.GOARCH {
+ case "amd64":
+ // ok
+ default:
+ log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
new file mode 100644
index 0000000000..6831b3ada3
--- /dev/null
+++ b/src/cmd/compile/internal/base/print.go
@@ -0,0 +1,260 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "os"
+ "runtime/debug"
+ "sort"
+ "strings"
+
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// An errorMsg is a queued error message, waiting to be printed.
+type errorMsg struct {
+ pos src.XPos
+ msg string
+}
+
+// Pos is the current source position being processed,
+// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
+var Pos src.XPos
+
+var (
+ errorMsgs []errorMsg
+ numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
+ numSyntaxErrors int
+)
+
+// Errors returns the number of errors reported.
+func Errors() int {
+ return numErrors
+}
+
+// SyntaxErrors returns the number of syntax errors reported
+func SyntaxErrors() int {
+ return numSyntaxErrors
+}
+
+// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
+func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ // Only add the position if know the position.
+ // See issue golang.org/issue/11361.
+ if pos.IsKnown() {
+ msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
+ }
+ errorMsgs = append(errorMsgs, errorMsg{
+ pos: pos,
+ msg: msg + "\n",
+ })
+}
+
+// FmtPos formats pos as a file:line string.
+func FmtPos(pos src.XPos) string {
+ if Ctxt == nil {
+ return "???"
+ }
+ return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
+}
+
+// byPos sorts errors by source position.
+type byPos []errorMsg
+
+func (x byPos) Len() int { return len(x) }
+func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
+func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// FlushErrors sorts errors seen so far by line number, prints them to stdout,
+// and empties the errors array.
+func FlushErrors() {
+ Ctxt.Bso.Flush()
+ if len(errorMsgs) == 0 {
+ return
+ }
+ sort.Stable(byPos(errorMsgs))
+ for i, err := range errorMsgs {
+ if i == 0 || err.msg != errorMsgs[i-1].msg {
+ fmt.Printf("%s", err.msg)
+ }
+ }
+ errorMsgs = errorMsgs[:0]
+}
+
+// lasterror keeps track of the most recently issued error,
+// to avoid printing multiple error messages on the same line.
+var lasterror struct {
+ syntax src.XPos // source position of last syntax error
+ other src.XPos // source position of last non-syntax error
+ msg string // error message of last non-syntax error
+}
+
+// sameline reports whether two positions a, b are on the same line.
+func sameline(a, b src.XPos) bool {
+ p := Ctxt.PosTable.Pos(a)
+ q := Ctxt.PosTable.Pos(b)
+ return p.Base() == q.Base() && p.Line() == q.Line()
+}
+
+// Errorf reports a formatted error at the current line.
+func Errorf(format string, args ...interface{}) {
+ ErrorfAt(Pos, format, args...)
+}
+
+// ErrorfAt reports a formatted error message at pos.
+func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+
+ if strings.HasPrefix(msg, "syntax error") {
+ numSyntaxErrors++
+ // only one syntax error per line, no matter what error
+ if sameline(lasterror.syntax, pos) {
+ return
+ }
+ lasterror.syntax = pos
+ } else {
+ // only one of multiple equal non-syntax errors per line
+ // (flusherrors shows only one of them, so we filter them
+ // here as best as we can (they may not appear in order)
+ // so that we don't count them here and exit early, and
+ // then have nothing to show for.)
+ if sameline(lasterror.other, pos) && lasterror.msg == msg {
+ return
+ }
+ lasterror.other = pos
+ lasterror.msg = msg
+ }
+
+ addErrorMsg(pos, "%s", msg)
+ numErrors++
+
+ hcrash()
+ if numErrors >= 10 && Flag.LowerE == 0 {
+ FlushErrors()
+ fmt.Printf("%v: too many errors\n", FmtPos(pos))
+ ErrorExit()
+ }
+}
+
+// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
+func ErrorfVers(lang string, format string, args ...interface{}) {
+ Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
+}
+
+// UpdateErrorDot is a clumsy hack that rewrites the last error,
+// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
+// It is used to give better error messages for dot (selector) expressions.
+func UpdateErrorDot(line string, name, expr string) {
+ if len(errorMsgs) == 0 {
+ return
+ }
+ e := &errorMsgs[len(errorMsgs)-1]
+ if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
+ e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
+ }
+}
+
+// Warnf reports a formatted warning at the current line.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func Warn(format string, args ...interface{}) {
+ WarnfAt(Pos, format, args...)
+}
+
+// WarnfAt reports a formatted warning at pos.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func WarnfAt(pos src.XPos, format string, args ...interface{}) {
+ addErrorMsg(pos, format, args...)
+ if Flag.LowerM != 0 {
+ FlushErrors()
+ }
+}
+
+// Fatalf reports a fatal error - an internal problem - at the current line and exits.
+// If other errors have already been printed, then Fatalf just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// Fatalf prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, Fatalf panics to force the usual runtime info dump.
+func Fatalf(format string, args ...interface{}) {
+ FatalfAt(Pos, format, args...)
+}
+
+// FatalfAt reports a fatal error - an internal problem - at pos and exits.
+// If other errors have already been printed, then FatalfAt just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// FatalfAt prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
+func FatalfAt(pos src.XPos, format string, args ...interface{}) {
+ FlushErrors()
+
+ if Debug.Panic != 0 || numErrors == 0 {
+ fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
+ fmt.Printf(format, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if strings.HasPrefix(objabi.Version, "go") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://golang.org/issue/new\n")
+ } else {
+ // Not a release; dump a stack trace, too.
+ fmt.Println()
+ os.Stdout.Write(debug.Stack())
+ fmt.Println()
+ }
+ }
+
+ hcrash()
+ ErrorExit()
+}
+
+// hcrash crashes the compiler when -h is set, to find out where a message is generated.
+func hcrash() {
+ if Flag.LowerH != 0 {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ panic("-h")
+ }
+}
+
+// ErrorExit handles an error-status exit.
+// It flushes any pending errors, removes the output file, and exits.
+func ErrorExit() {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ os.Exit(2)
+}
+
+// ExitIfErrors calls ErrorExit if any errors have been reported.
+func ExitIfErrors() {
+ if Errors() > 0 {
+ ErrorExit()
+ }
+}
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
index 2f7fa27bb9..d2762126ad 100644
--- a/src/cmd/compile/internal/gc/alg.go
+++ b/src/cmd/compile/internal/gc/alg.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
@@ -69,11 +71,11 @@ func EqCanPanic(t *types.Type) bool {
switch t.Etype {
default:
return false
- case TINTER:
+ case types.TINTER:
return true
- case TARRAY:
+ case types.TARRAY:
return EqCanPanic(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
@@ -119,45 +121,45 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
}
switch t.Etype {
- case TANY, TFORW:
+ case types.TANY, types.TFORW:
// will be defined later.
return ANOEQ, t
- case TINT8, TUINT8, TINT16, TUINT16,
- TINT32, TUINT32, TINT64, TUINT64,
- TINT, TUINT, TUINTPTR,
- TBOOL, TPTR,
- TCHAN, TUNSAFEPTR:
+ case types.TINT8, types.TUINT8, types.TINT16, types.TUINT16,
+ types.TINT32, types.TUINT32, types.TINT64, types.TUINT64,
+ types.TINT, types.TUINT, types.TUINTPTR,
+ types.TBOOL, types.TPTR,
+ types.TCHAN, types.TUNSAFEPTR:
return AMEM, nil
- case TFUNC, TMAP:
+ case types.TFUNC, types.TMAP:
return ANOEQ, t
- case TFLOAT32:
+ case types.TFLOAT32:
return AFLOAT32, nil
- case TFLOAT64:
+ case types.TFLOAT64:
return AFLOAT64, nil
- case TCOMPLEX64:
+ case types.TCOMPLEX64:
return ACPLX64, nil
- case TCOMPLEX128:
+ case types.TCOMPLEX128:
return ACPLX128, nil
- case TSTRING:
+ case types.TSTRING:
return ASTRING, nil
- case TINTER:
+ case types.TINTER:
if t.IsEmptyInterface() {
return ANILINTER, nil
}
return AINTER, nil
- case TSLICE:
+ case types.TSLICE:
return ANOEQ, t
- case TARRAY:
+ case types.TARRAY:
a, bad := algtype1(t.Elem())
switch a {
case AMEM:
@@ -177,7 +179,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
return ASPECIAL, nil
- case TSTRUCT:
+ case types.TSTRUCT:
fields := t.FieldSlice()
// One-field struct is same as that one field alone.
@@ -203,7 +205,7 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
return ret, nil
}
- Fatalf("algtype1: unexpected type %v", t)
+ base.Fatalf("algtype1: unexpected type %v", t)
return 0, nil
}
@@ -214,7 +216,7 @@ func genhash(t *types.Type) *obj.LSym {
switch algtype(t) {
default:
// genhash is only called for types that have equality
- Fatalf("genhash %v", t)
+ base.Fatalf("genhash %v", t)
case AMEM0:
return sysClosure("memhash0")
case AMEM8:
@@ -282,24 +284,24 @@ func genhash(t *types.Type) *obj.LSym {
}
sym := typesymprefix(".hash", t)
- if Debug.r != 0 {
+ if base.Flag.LowerR != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
- lineno = autogeneratedPos // less confusing than end of input
- dclcontext = PEXTERN
+ base.Pos = autogeneratedPos // less confusing than end of input
+ dclcontext = ir.PEXTERN
// func sym(p *T, h uintptr) uintptr
- tfn := nod(OTFUNC, nil, nil)
- tfn.List.Set2(
+ tfn := ir.Nod(ir.OTFUNC, nil, nil)
+ tfn.PtrList().Set2(
namedfield("p", types.NewPtr(t)),
- namedfield("h", types.Types[TUINTPTR]),
+ namedfield("h", types.Types[types.TUINTPTR]),
)
- tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
+ tfn.PtrRlist().Set1(anonfield(types.Types[types.TUINTPTR]))
fn := dclfunc(sym, tfn)
- np := asNode(tfn.Type.Params().Field(0).Nname)
- nh := asNode(tfn.Type.Params().Field(1).Nname)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
switch t.Etype {
case types.TARRAY:
@@ -308,25 +310,25 @@ func genhash(t *types.Type) *obj.LSym {
// pure memory.
hashel := hashfor(t.Elem())
- n := nod(ORANGE, nil, nod(ODEREF, np, nil))
- ni := newname(lookup("i"))
- ni.Type = types.Types[TINT]
- n.List.Set1(ni)
+ n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil))
+ ni := NewName(lookup("i"))
+ ni.SetType(types.Types[types.TINT])
+ n.PtrList().Set1(ni)
n.SetColas(true)
- colasdefn(n.List.Slice(), n)
- ni = n.List.First()
+ colasdefn(n.List().Slice(), n)
+ ni = n.List().First()
// h = hashel(&p[i], h)
- call := nod(OCALL, hashel, nil)
+ call := ir.Nod(ir.OCALL, hashel, nil)
- nx := nod(OINDEX, np, ni)
+ nx := ir.Nod(ir.OINDEX, np, ni)
nx.SetBounded(true)
- na := nod(OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- n.Nbody.Append(nod(OAS, nh, call))
+ na := ir.Nod(ir.OADDR, nx, nil)
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ n.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
- fn.Nbody.Append(n)
+ fn.PtrBody().Append(n)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
@@ -343,12 +345,12 @@ func genhash(t *types.Type) *obj.LSym {
// Hash non-memory fields with appropriate hash function.
if !IsRegularMemory(f.Type) {
hashel := hashfor(f.Type)
- call := nod(OCALL, hashel, nil)
- nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := nod(OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- fn.Nbody.Append(nod(OAS, nh, call))
+ call := ir.Nod(ir.OCALL, hashel, nil)
+ nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := ir.Nod(ir.OADDR, nx, nil)
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i++
continue
}
@@ -358,40 +360,40 @@ func genhash(t *types.Type) *obj.LSym {
// h = hashel(&p.first, size, h)
hashel := hashmem(f.Type)
- call := nod(OCALL, hashel, nil)
- nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
- na := nod(OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- call.List.Append(nodintconst(size))
- fn.Nbody.Append(nod(OAS, nh, call))
+ call := ir.Nod(ir.OCALL, hashel, nil)
+ nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := ir.Nod(ir.OADDR, nx, nil)
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ call.PtrList().Append(nodintconst(size))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i = next
}
}
- r := nod(ORETURN, nil, nil)
- r.List.Append(nh)
- fn.Nbody.Append(r)
+ r := ir.Nod(ir.ORETURN, nil, nil)
+ r.PtrList().Append(nh)
+ fn.PtrBody().Append(r)
- if Debug.r != 0 {
- dumplist("genhash body", fn.Nbody)
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("genhash body", fn.Body())
}
funcbody()
- fn.Func.SetDupok(true)
+ fn.Func().SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
- if debug_dclstack != 0 {
+ if base.Debug.DclStack != 0 {
testdclstack()
}
- fn.Func.SetNilCheckDisabled(true)
+ fn.Func().SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Build closure. It doesn't close over any variables, so
@@ -402,12 +404,12 @@ func genhash(t *types.Type) *obj.LSym {
return closure
}
-func hashfor(t *types.Type) *Node {
+func hashfor(t *types.Type) ir.Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
case AMEM:
- Fatalf("hashfor with AMEM type")
+ base.Fatalf("hashfor with AMEM type")
case AINTER:
sym = Runtimepkg.Lookup("interhash")
case ANILINTER:
@@ -428,14 +430,14 @@ func hashfor(t *types.Type) *Node {
sym = typesymprefix(".hash", t)
}
- n := newname(sym)
+ n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
- anonfield(types.Types[TUINTPTR]),
- }, []*Node{
- anonfield(types.Types[TUINTPTR]),
- })
+ anonfield(types.Types[types.TUINTPTR]),
+ }, []ir.Node{
+ anonfield(types.Types[types.TUINTPTR]),
+ }))
return n
}
@@ -509,27 +511,27 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
sym := typesymprefix(".eq", t)
- if Debug.r != 0 {
+ if base.Flag.LowerR != 0 {
fmt.Printf("geneq %v\n", t)
}
// Autogenerate code for equality of structs and arrays.
- lineno = autogeneratedPos // less confusing than end of input
- dclcontext = PEXTERN
+ base.Pos = autogeneratedPos // less confusing than end of input
+ dclcontext = ir.PEXTERN
// func sym(p, q *T) bool
- tfn := nod(OTFUNC, nil, nil)
- tfn.List.Set2(
+ tfn := ir.Nod(ir.OTFUNC, nil, nil)
+ tfn.PtrList().Set2(
namedfield("p", types.NewPtr(t)),
namedfield("q", types.NewPtr(t)),
)
- tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
+ tfn.PtrRlist().Set1(namedfield("r", types.Types[types.TBOOL]))
fn := dclfunc(sym, tfn)
- np := asNode(tfn.Type.Params().Field(0).Nname)
- nq := asNode(tfn.Type.Params().Field(1).Nname)
- nr := asNode(tfn.Type.Results().Field(0).Nname)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
+ nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
@@ -539,9 +541,9 @@ func geneq(t *types.Type) *obj.LSym {
// so t must be either an array or a struct.
switch t.Etype {
default:
- Fatalf("geneq %v", t)
+ base.Fatalf("geneq %v", t)
- case TARRAY:
+ case types.TARRAY:
nelem := t.NumElem()
// checkAll generates code to check the equality of all array elements.
@@ -565,17 +567,17 @@ func geneq(t *types.Type) *obj.LSym {
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
- checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
+ checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
// checkIdx generates a node to check for equality at index i.
- checkIdx := func(i *Node) *Node {
+ checkIdx := func(i ir.Node) ir.Node {
// pi := p[i]
- pi := nod(OINDEX, np, i)
+ pi := ir.Nod(ir.OINDEX, np, i)
pi.SetBounded(true)
- pi.Type = t.Elem()
+ pi.SetType(t.Elem())
// qi := q[i]
- qi := nod(OINDEX, nq, i)
+ qi := ir.Nod(ir.OINDEX, nq, i)
qi.SetBounded(true)
- qi.Type = t.Elem()
+ qi.SetType(t.Elem())
return eq(pi, qi)
}
@@ -587,68 +589,68 @@ func geneq(t *types.Type) *obj.LSym {
// Generate a series of checks.
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
- nif := nod(OIF, checkIdx(nodintconst(i)), nil)
- nif.Rlist.Append(nodSym(OGOTO, nil, neq))
- fn.Nbody.Append(nif)
+ nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil)
+ nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ fn.PtrBody().Append(nif)
}
if last {
- fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
// for i := 0; i < nelem; i++
- i := temp(types.Types[TINT])
- init := nod(OAS, i, nodintconst(0))
- cond := nod(OLT, i, nodintconst(nelem))
- post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
- loop := nod(OFOR, cond, post)
- loop.Ninit.Append(init)
+ i := temp(types.Types[types.TINT])
+ init := ir.Nod(ir.OAS, i, nodintconst(0))
+ cond := ir.Nod(ir.OLT, i, nodintconst(nelem))
+ post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
+ loop := ir.Nod(ir.OFOR, cond, post)
+ loop.PtrInit().Append(init)
// if eq(pi, qi) {} else { goto neq }
- nif := nod(OIF, checkIdx(i), nil)
- nif.Rlist.Append(nodSym(OGOTO, nil, neq))
- loop.Nbody.Append(nif)
- fn.Nbody.Append(loop)
+ nif := ir.Nod(ir.OIF, checkIdx(i), nil)
+ nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ loop.PtrBody().Append(nif)
+ fn.PtrBody().Append(loop)
if last {
- fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
}
}
}
switch t.Elem().Etype {
- case TSTRING:
+ case types.TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
- checkAll(3, false, func(pi, qi *Node) *Node {
+ checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
- checkAll(1, true, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
- case TFLOAT32, TFLOAT64:
- checkAll(2, true, func(pi, qi *Node) *Node {
+ case types.TFLOAT32, types.TFLOAT64:
+ checkAll(2, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
- return nod(OEQ, pi, qi)
+ return ir.Nod(ir.OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
- checkAll(1, true, func(pi, qi *Node) *Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
- return nod(OEQ, pi, qi)
+ return ir.Nod(ir.OEQ, pi, qi)
})
}
- case TSTRUCT:
+ case types.TSTRUCT:
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
- var conds [][]*Node
- conds = append(conds, []*Node{})
- and := func(n *Node) {
+ var conds [][]ir.Node
+ conds = append(conds, []ir.Node{})
+ and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
@@ -668,21 +670,21 @@ func geneq(t *types.Type) *obj.LSym {
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
- conds = append(conds, []*Node{})
+ conds = append(conds, []ir.Node{})
}
- p := nodSym(OXDOT, np, f.Sym)
- q := nodSym(OXDOT, nq, f.Sym)
+ p := nodSym(ir.OXDOT, np, f.Sym)
+ q := nodSym(ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := eqstring(p, q)
and(eqlen)
and(eqmem)
default:
- and(nod(OEQ, p, q))
+ and(ir.Nod(ir.OEQ, p, q))
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
- conds = append(conds, []*Node{})
+ conds = append(conds, []ir.Node{})
}
i++
continue
@@ -707,10 +709,10 @@ func geneq(t *types.Type) *obj.LSym {
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
- var flatConds []*Node
+ var flatConds []ir.Node
for _, c := range conds {
- isCall := func(n *Node) bool {
- return n.Op == OCALL || n.Op == OCALLFUNC
+ isCall := func(n ir.Node) bool {
+ return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
@@ -719,54 +721,54 @@ func geneq(t *types.Type) *obj.LSym {
}
if len(flatConds) == 0 {
- fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
- n := nod(OIF, c, nil)
- n.Rlist.Append(nodSym(OGOTO, nil, neq))
- fn.Nbody.Append(n)
+ n := ir.Nod(ir.OIF, c, nil)
+ n.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ fn.PtrBody().Append(n)
}
- fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
ret := autolabel(".ret")
- fn.Nbody.Append(nodSym(OLABEL, nil, ret))
- fn.Nbody.Append(nod(ORETURN, nil, nil))
+ fn.PtrBody().Append(nodSym(ir.OLABEL, nil, ret))
+ fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
// neq:
// r = false
// return (or goto ret)
- fn.Nbody.Append(nodSym(OLABEL, nil, neq))
- fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
+ fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
// Epilogue is large, so share it with the equal case.
- fn.Nbody.Append(nodSym(OGOTO, nil, ret))
+ fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
- fn.Nbody.Append(nod(ORETURN, nil, nil))
+ fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
- if Debug.r != 0 {
- dumplist("geneq body", fn.Nbody)
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("geneq body", fn.Body())
}
funcbody()
- fn.Func.SetDupok(true)
+ fn.Func().SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
- if debug_dclstack != 0 {
+ if base.Debug.DclStack != 0 {
testdclstack()
}
@@ -774,7 +776,7 @@ func geneq(t *types.Type) *obj.LSym {
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
- fn.Func.SetNilCheckDisabled(true)
+ fn.Func().SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Generate a closure which points at the function we just generated.
@@ -783,32 +785,32 @@ func geneq(t *types.Type) *obj.LSym {
return closure
}
-func hasCall(n *Node) bool {
- if n.Op == OCALL || n.Op == OCALLFUNC {
+func hasCall(n ir.Node) bool {
+ if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC {
return true
}
- if n.Left != nil && hasCall(n.Left) {
+ if n.Left() != nil && hasCall(n.Left()) {
return true
}
- if n.Right != nil && hasCall(n.Right) {
+ if n.Right() != nil && hasCall(n.Right()) {
return true
}
- for _, x := range n.Ninit.Slice() {
+ for _, x := range n.Init().Slice() {
if hasCall(x) {
return true
}
}
- for _, x := range n.Nbody.Slice() {
+ for _, x := range n.Body().Slice() {
if hasCall(x) {
return true
}
}
- for _, x := range n.List.Slice() {
+ for _, x := range n.List().Slice() {
if hasCall(x) {
return true
}
}
- for _, x := range n.Rlist.Slice() {
+ for _, x := range n.Rlist().Slice() {
if hasCall(x) {
return true
}
@@ -818,10 +820,10 @@ func hasCall(n *Node) bool {
// eqfield returns the node
// p.field == q.field
-func eqfield(p *Node, q *Node, field *types.Sym) *Node {
- nx := nodSym(OXDOT, p, field)
- ny := nodSym(OXDOT, q, field)
- ne := nod(OEQ, nx, ny)
+func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
+ nx := nodSym(ir.OXDOT, p, field)
+ ny := nodSym(ir.OXDOT, q, field)
+ ne := ir.Nod(ir.OEQ, nx, ny)
return ne
}
@@ -831,23 +833,23 @@ func eqfield(p *Node, q *Node, field *types.Sym) *Node {
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t *Node) (eqlen, eqmem *Node) {
- s = conv(s, types.Types[TSTRING])
- t = conv(t, types.Types[TSTRING])
- sptr := nod(OSPTR, s, nil)
- tptr := nod(OSPTR, t, nil)
- slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
- tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
+func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) {
+ s = conv(s, types.Types[types.TSTRING])
+ t = conv(t, types.Types[types.TSTRING])
+ sptr := ir.Nod(ir.OSPTR, s, nil)
+ tptr := ir.Nod(ir.OSPTR, t, nil)
+ slen := conv(ir.Nod(ir.OLEN, s, nil), types.Types[types.TUINTPTR])
+ tlen := conv(ir.Nod(ir.OLEN, t, nil), types.Types[types.TUINTPTR])
fn := syslook("memequal")
- fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
- call := nod(OCALL, fn, nil)
- call.List.Append(sptr, tptr, slen.copy())
+ fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Append(sptr, tptr, ir.Copy(slen))
call = typecheck(call, ctxExpr|ctxMultiOK)
- cmp := nod(OEQ, slen, tlen)
+ cmp := ir.Nod(ir.OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr)
- cmp.Type = types.Types[TBOOL]
+ cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
@@ -857,58 +859,58 @@ func eqstring(s, t *Node) (eqlen, eqmem *Node) {
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
- if !types.Identical(s.Type, t.Type) {
- Fatalf("eqinterface %v %v", s.Type, t.Type)
+func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) {
+ if !types.Identical(s.Type(), t.Type()) {
+ base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
- var fn *Node
- if s.Type.IsEmptyInterface() {
+ var fn ir.Node
+ if s.Type().IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
}
- stab := nod(OITAB, s, nil)
- ttab := nod(OITAB, t, nil)
- sdata := nod(OIDATA, s, nil)
- tdata := nod(OIDATA, t, nil)
- sdata.Type = types.Types[TUNSAFEPTR]
- tdata.Type = types.Types[TUNSAFEPTR]
+ stab := ir.Nod(ir.OITAB, s, nil)
+ ttab := ir.Nod(ir.OITAB, t, nil)
+ sdata := ir.Nod(ir.OIDATA, s, nil)
+ tdata := ir.Nod(ir.OIDATA, t, nil)
+ sdata.SetType(types.Types[types.TUNSAFEPTR])
+ tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
- call := nod(OCALL, fn, nil)
- call.List.Append(stab, sdata, tdata)
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Append(stab, sdata, tdata)
call = typecheck(call, ctxExpr|ctxMultiOK)
- cmp := nod(OEQ, stab, ttab)
+ cmp := ir.Nod(ir.OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr)
- cmp.Type = types.Types[TBOOL]
+ cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
-func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
- nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
- ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
+func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
+ nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil)
+ ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil)
nx = typecheck(nx, ctxExpr)
ny = typecheck(ny, ctxExpr)
- fn, needsize := eqmemfunc(size, nx.Type.Elem())
- call := nod(OCALL, fn, nil)
- call.List.Append(nx)
- call.List.Append(ny)
+ fn, needsize := eqmemfunc(size, nx.Type().Elem())
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Append(nx)
+ call.PtrList().Append(ny)
if needsize {
- call.List.Append(nodintconst(size))
+ call.PtrList().Append(nodintconst(size))
}
return call
}
-func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
+func eqmemfunc(size int64, t *types.Type) (fn ir.Node, needsize bool) {
switch size {
default:
fn = syslook("memequal")
@@ -949,7 +951,7 @@ func memrun(t *types.Type, start int) (size int64, next int) {
// by padding.
func ispaddedfield(t *types.Type, i int) bool {
if !t.IsStruct() {
- Fatalf("ispaddedfield called non-struct %v", t)
+ base.Fatalf("ispaddedfield called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
index a3a0c8fce8..edf7d263a3 100644
--- a/src/cmd/compile/internal/gc/align.go
+++ b/src/cmd/compile/internal/gc/align.go
@@ -6,6 +6,8 @@ package gc
import (
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"fmt"
"sort"
@@ -21,7 +23,7 @@ var defercalc int
func Rnd(o int64, r int64) int64 {
if r < 1 || r > 8 || r&(r-1) != 0 {
- Fatalf("rnd %d", r)
+ base.Fatalf("rnd %d", r)
}
return (o + r - 1) &^ (r - 1)
}
@@ -39,7 +41,7 @@ func expandiface(t *types.Type) {
case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
return
default:
- yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
+ base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
}
methods = append(methods, m)
}
@@ -59,7 +61,7 @@ func expandiface(t *types.Type) {
}
if !m.Type.IsInterface() {
- yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
+ base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
@@ -74,11 +76,8 @@ func expandiface(t *types.Type) {
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.Fields().Slice() {
- f := types.NewField()
- f.Pos = m.Pos // preserve embedding position
- f.Sym = t1.Sym
- f.Type = t1.Type
- f.SetBroke(t1.Broke())
+ // Use m.Pos rather than t1.Pos to preserve embedding position.
+ f := types.NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
}
}
@@ -86,7 +85,7 @@ func expandiface(t *types.Type) {
sort.Sort(methcmp(methods))
if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
- yyerrorl(typePos(t), "interface too large")
+ base.ErrorfAt(typePos(t), "interface too large")
}
for i, m := range methods {
m.Offset = int64(i) * int64(Widthptr)
@@ -119,7 +118,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
o = Rnd(o, int64(f.Type.Align))
}
f.Offset = o
- if n := asNode(f.Nname); n != nil {
+ if n := ir.AsNode(f.Nname); n != nil {
// addrescapes has similar code to update these offsets.
// Usually addrescapes runs after widstruct,
// in which case we could drop this,
@@ -127,17 +126,17 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
// NOTE(rsc): This comment may be stale.
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
- if n.Name.Param.Stackcopy != nil {
- n.Name.Param.Stackcopy.Xoffset = o
- n.Xoffset = 0
+ if n.Name().Param.Stackcopy != nil {
+ n.Name().Param.Stackcopy.SetOffset(o)
+ n.SetOffset(0)
} else {
- n.Xoffset = o
+ n.SetOffset(o)
}
}
w := f.Type.Width
if w < 0 {
- Fatalf("invalid width %d", f.Type.Width)
+ base.Fatalf("invalid width %d", f.Type.Width)
}
if w == 0 {
lastzero = o
@@ -150,7 +149,7 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
maxwidth = 1<<31 - 1
}
if o >= maxwidth {
- yyerrorl(typePos(errtype), "type %L too large", errtype)
+ base.ErrorfAt(typePos(errtype), "type %L too large", errtype)
o = 8 // small but nonzero
}
}
@@ -199,7 +198,7 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
}
*path = append(*path, t)
- if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
+ if p := ir.AsNode(t.Nod).Name().Param; p != nil && findTypeLoop(p.Ntype.Type(), path) {
return true
}
*path = (*path)[:len(*path)-1]
@@ -207,17 +206,17 @@ func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
// Anonymous type. Recurse on contained types.
switch t.Etype {
- case TARRAY:
+ case types.TARRAY:
if findTypeLoop(t.Elem(), path) {
return true
}
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
if findTypeLoop(f.Type, path) {
return true
}
}
- case TINTER:
+ case types.TINTER:
for _, m := range t.Methods().Slice() {
if m.Type.IsInterface() { // embedded interface
if findTypeLoop(m.Type, path) {
@@ -238,7 +237,7 @@ func reportTypeLoop(t *types.Type) {
var l []*types.Type
if !findTypeLoop(t, &l) {
- Fatalf("failed to find type loop for: %v", t)
+ base.Fatalf("failed to find type loop for: %v", t)
}
// Rotate loop so that the earliest type declaration is first.
@@ -253,11 +252,11 @@ func reportTypeLoop(t *types.Type) {
var msg bytes.Buffer
fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
for _, t := range l {
- fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t)
t.SetBroke(true)
}
- fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
- yyerrorl(typePos(l[0]), msg.String())
+ fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0])
+ base.ErrorfAt(typePos(l[0]), msg.String())
}
// dowidth calculates and stores the size and alignment for t.
@@ -271,7 +270,7 @@ func dowidth(t *types.Type) {
return
}
if Widthptr == 0 {
- Fatalf("dowidth without betypeinit")
+ base.Fatalf("dowidth without betypeinit")
}
if t == nil {
@@ -295,7 +294,7 @@ func dowidth(t *types.Type) {
return
}
t.SetBroke(true)
- Fatalf("width not calculated: %v", t)
+ base.Fatalf("width not calculated: %v", t)
}
// break infinite recursion if the broken recursive type
@@ -307,9 +306,9 @@ func dowidth(t *types.Type) {
// defer checkwidth calls until after we're done
defercheckwidth()
- lno := lineno
- if asNode(t.Nod) != nil {
- lineno = asNode(t.Nod).Pos
+ lno := base.Pos
+ if ir.AsNode(t.Nod) != nil {
+ base.Pos = ir.AsNode(t.Nod).Pos()
}
t.Width = -2
@@ -317,7 +316,7 @@ func dowidth(t *types.Type) {
et := t.Etype
switch et {
- case TFUNC, TCHAN, TMAP, TSTRING:
+ case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING:
break
// simtype == 0 during bootstrap
@@ -330,44 +329,44 @@ func dowidth(t *types.Type) {
var w int64
switch et {
default:
- Fatalf("dowidth: unknown type: %v", t)
+ base.Fatalf("dowidth: unknown type: %v", t)
// compiler-specific stuff
- case TINT8, TUINT8, TBOOL:
+ case types.TINT8, types.TUINT8, types.TBOOL:
// bool is int8
w = 1
- case TINT16, TUINT16:
+ case types.TINT16, types.TUINT16:
w = 2
- case TINT32, TUINT32, TFLOAT32:
+ case types.TINT32, types.TUINT32, types.TFLOAT32:
w = 4
- case TINT64, TUINT64, TFLOAT64:
+ case types.TINT64, types.TUINT64, types.TFLOAT64:
w = 8
t.Align = uint8(Widthreg)
- case TCOMPLEX64:
+ case types.TCOMPLEX64:
w = 8
t.Align = 4
- case TCOMPLEX128:
+ case types.TCOMPLEX128:
w = 16
t.Align = uint8(Widthreg)
- case TPTR:
+ case types.TPTR:
w = int64(Widthptr)
checkwidth(t.Elem())
- case TUNSAFEPTR:
+ case types.TUNSAFEPTR:
w = int64(Widthptr)
- case TINTER: // implemented as 2 pointers
+ case types.TINTER: // implemented as 2 pointers
w = 2 * int64(Widthptr)
t.Align = uint8(Widthptr)
expandiface(t)
- case TCHAN: // implemented as pointer
+ case types.TCHAN: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Elem())
@@ -377,35 +376,35 @@ func dowidth(t *types.Type) {
t1 := types.NewChanArgs(t)
checkwidth(t1)
- case TCHANARGS:
+ case types.TCHANARGS:
t1 := t.ChanArgs()
dowidth(t1) // just in case
if t1.Elem().Width >= 1<<16 {
- yyerrorl(typePos(t1), "channel element type too large (>64kB)")
+ base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
}
w = 1 // anything will do
- case TMAP: // implemented as pointer
+ case types.TMAP: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Elem())
checkwidth(t.Key())
- case TFORW: // should have been filled in
+ case types.TFORW: // should have been filled in
reportTypeLoop(t)
w = 1 // anything will do
- case TANY:
- // dummy type; should be replaced before use.
- Fatalf("dowidth any")
+ case types.TANY:
+ // not a real type; should be replaced before use.
+ base.Fatalf("dowidth any")
- case TSTRING:
+ case types.TSTRING:
if sizeofString == 0 {
- Fatalf("early dowidth string")
+ base.Fatalf("early dowidth string")
}
w = sizeofString
t.Align = uint8(Widthptr)
- case TARRAY:
+ case types.TARRAY:
if t.Elem() == nil {
break
}
@@ -414,13 +413,13 @@ func dowidth(t *types.Type) {
if t.Elem().Width != 0 {
cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
if uint64(t.NumElem()) > cap {
- yyerrorl(typePos(t), "type %L larger than address space", t)
+ base.ErrorfAt(typePos(t), "type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().Width
t.Align = t.Elem().Align
- case TSLICE:
+ case types.TSLICE:
if t.Elem() == nil {
break
}
@@ -428,46 +427,46 @@ func dowidth(t *types.Type) {
checkwidth(t.Elem())
t.Align = uint8(Widthptr)
- case TSTRUCT:
+ case types.TSTRUCT:
if t.IsFuncArgStruct() {
- Fatalf("dowidth fn struct %v", t)
+ base.Fatalf("dowidth fn struct %v", t)
}
w = widstruct(t, t, 0, 1)
// make fake type to check later to
// trigger function argument computation.
- case TFUNC:
+ case types.TFUNC:
t1 := types.NewFuncArgs(t)
checkwidth(t1)
w = int64(Widthptr) // width of func type is pointer
// function is 3 cated structures;
// compute their widths as side-effect.
- case TFUNCARGS:
+ case types.TFUNCARGS:
t1 := t.FuncArgs()
w = widstruct(t1, t1.Recvs(), 0, 0)
w = widstruct(t1, t1.Params(), w, Widthreg)
w = widstruct(t1, t1.Results(), w, Widthreg)
t1.Extra.(*types.Func).Argwid = w
if w%int64(Widthreg) != 0 {
- Warn("bad type %v %d\n", t1, w)
+ base.Warn("bad type %v %d\n", t1, w)
}
t.Align = 1
}
if Widthptr == 4 && w != int64(int32(w)) {
- yyerrorl(typePos(t), "type %v too large", t)
+ base.ErrorfAt(typePos(t), "type %v too large", t)
}
t.Width = w
if t.Align == 0 {
if w == 0 || w > 8 || w&(w-1) != 0 {
- Fatalf("invalid alignment for %v", t)
+ base.Fatalf("invalid alignment for %v", t)
}
t.Align = uint8(w)
}
- lineno = lno
+ base.Pos = lno
resumecheckwidth()
}
@@ -498,7 +497,7 @@ func checkwidth(t *types.Type) {
// function arg structs should not be checked
// outside of the enclosing function.
if t.IsFuncArgStruct() {
- Fatalf("checkwidth %v", t)
+ base.Fatalf("checkwidth %v", t)
}
if defercalc == 0 {
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
index 10f21f86df..a470b842ff 100644
--- a/src/cmd/compile/internal/gc/bexport.go
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -5,6 +5,7 @@
package gc
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
@@ -12,6 +13,15 @@ type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
+// markObject visits a reachable object.
+func (p *exporter) markObject(n ir.Node) {
+ if n.Op() == ir.ONAME && n.Class() == ir.PFUNC {
+ inlFlood(n)
+ }
+
+ p.markType(n.Type())
+}
+
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
@@ -25,10 +35,10 @@ func (p *exporter) markType(t *types.Type) {
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
- if t.Sym != nil && t.Etype != TINTER {
+ if t.Sym != nil && t.Etype != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
- p.markType(m.Type)
+ p.markObject(ir.AsNode(m.Nname))
}
}
}
@@ -43,36 +53,31 @@ func (p *exporter) markType(t *types.Type) {
// the user already needs some way to construct values of
// those types.
switch t.Etype {
- case TPTR, TARRAY, TSLICE:
+ case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
- case TCHAN:
+ case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
- case TMAP:
+ case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
- case TFUNC:
- // If t is the type of a function or method, then
- // t.Nname() is its ONAME. Mark its inline body and
- // any recursively called functions for export.
- inlFlood(asNode(t.Nname()))
-
+ case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
- case TINTER:
+ case types.TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
@@ -129,23 +134,23 @@ func predeclared() []*types.Type {
// elements have been initialized before
predecl = []*types.Type{
// basic types
- types.Types[TBOOL],
- types.Types[TINT],
- types.Types[TINT8],
- types.Types[TINT16],
- types.Types[TINT32],
- types.Types[TINT64],
- types.Types[TUINT],
- types.Types[TUINT8],
- types.Types[TUINT16],
- types.Types[TUINT32],
- types.Types[TUINT64],
- types.Types[TUINTPTR],
- types.Types[TFLOAT32],
- types.Types[TFLOAT64],
- types.Types[TCOMPLEX64],
- types.Types[TCOMPLEX128],
- types.Types[TSTRING],
+ types.Types[types.TBOOL],
+ types.Types[types.TINT],
+ types.Types[types.TINT8],
+ types.Types[types.TINT16],
+ types.Types[types.TINT32],
+ types.Types[types.TINT64],
+ types.Types[types.TUINT],
+ types.Types[types.TUINT8],
+ types.Types[types.TUINT16],
+ types.Types[types.TUINT32],
+ types.Types[types.TUINT64],
+ types.Types[types.TUINTPTR],
+ types.Types[types.TFLOAT32],
+ types.Types[types.TFLOAT64],
+ types.Types[types.TCOMPLEX64],
+ types.Types[types.TCOMPLEX128],
+ types.Types[types.TSTRING],
// basic type aliases
types.Bytetype,
@@ -161,16 +166,16 @@ func predeclared() []*types.Type {
types.UntypedFloat,
types.UntypedComplex,
types.UntypedString,
- types.Types[TNIL],
+ types.Types[types.TNIL],
// package unsafe
- types.Types[TUNSAFEPTR],
+ types.Types[types.TUNSAFEPTR],
// invalid type (package contains errors)
- types.Types[Txxx],
+ types.Types[types.Txxx],
// any type, for builtin export data
- types.Types[TANY],
+ types.Types[types.TANY],
}
}
return predecl
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
index 911ac4c0dc..c0c18e728e 100644
--- a/src/cmd/compile/internal/gc/bimport.go
+++ b/src/cmd/compile/internal/gc/bimport.go
@@ -5,20 +5,15 @@
package gc
import (
+ "cmd/compile/internal/ir"
"cmd/internal/src"
)
-// numImport tracks how often a package with a given name is imported.
-// It is used to provide a better error message (by using the package
-// path to disambiguate) if a package that appears multiple times with
-// the same name appears in an error message.
-var numImport = make(map[string]int)
-
-func npos(pos src.XPos, n *Node) *Node {
- n.Pos = pos
+func npos(pos src.XPos, n ir.Node) ir.Node {
+ n.SetPos(pos)
return n
}
-func builtinCall(op Op) *Node {
- return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
+func builtinCall(op ir.Op) ir.Node {
+ return ir.Nod(ir.OCALL, mkname(ir.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}
diff --git a/src/cmd/compile/internal/gc/bootstrap.go b/src/cmd/compile/internal/gc/bootstrap.go
index 967f75a9ac..2e13d6b57a 100644
--- a/src/cmd/compile/internal/gc/bootstrap.go
+++ b/src/cmd/compile/internal/gc/bootstrap.go
@@ -6,8 +6,11 @@
package gc
-import "runtime"
+import (
+ "cmd/compile/internal/base"
+ "runtime"
+)
func startMutexProfiling() {
- Fatalf("mutex profiling unavailable in version %v", runtime.Version())
+ base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
}
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
index fd95b657b2..a57c611559 100644
--- a/src/cmd/compile/internal/gc/builtin.go
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -2,7 +2,10 @@
package gc
-import "cmd/compile/internal/types"
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
var runtimeDecls = [...]struct {
name string
@@ -205,134 +208,134 @@ func runtimeTypes() []*types.Type {
var typs [131]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
- typs[2] = types.Types[TANY]
+ typs[2] = types.Types[types.TANY]
typs[3] = types.NewPtr(typs[2])
- typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
- typs[5] = types.Types[TUINTPTR]
- typs[6] = types.Types[TBOOL]
- typs[7] = types.Types[TUNSAFEPTR]
- typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
+ typs[4] = functype(nil, []ir.Node{anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
+ typs[5] = types.Types[types.TUINTPTR]
+ typs[6] = types.Types[types.TBOOL]
+ typs[7] = types.Types[types.TUNSAFEPTR]
+ typs[8] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []ir.Node{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
- typs[10] = types.Types[TINTER]
- typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
- typs[12] = types.Types[TINT32]
+ typs[10] = types.Types[types.TINTER]
+ typs[11] = functype(nil, []ir.Node{anonfield(typs[10])}, nil)
+ typs[12] = types.Types[types.TINT32]
typs[13] = types.NewPtr(typs[12])
- typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
- typs[15] = types.Types[TINT]
- typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
- typs[17] = types.Types[TUINT]
- typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
- typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
- typs[20] = types.Types[TFLOAT64]
- typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
- typs[22] = types.Types[TINT64]
- typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
- typs[24] = types.Types[TUINT64]
- typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
- typs[26] = types.Types[TCOMPLEX128]
- typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
- typs[28] = types.Types[TSTRING]
- typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
- typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
- typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
+ typs[14] = functype(nil, []ir.Node{anonfield(typs[13])}, []ir.Node{anonfield(typs[10])})
+ typs[15] = types.Types[types.TINT]
+ typs[16] = functype(nil, []ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
+ typs[17] = types.Types[types.TUINT]
+ typs[18] = functype(nil, []ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
+ typs[19] = functype(nil, []ir.Node{anonfield(typs[6])}, nil)
+ typs[20] = types.Types[types.TFLOAT64]
+ typs[21] = functype(nil, []ir.Node{anonfield(typs[20])}, nil)
+ typs[22] = types.Types[types.TINT64]
+ typs[23] = functype(nil, []ir.Node{anonfield(typs[22])}, nil)
+ typs[24] = types.Types[types.TUINT64]
+ typs[25] = functype(nil, []ir.Node{anonfield(typs[24])}, nil)
+ typs[26] = types.Types[types.TCOMPLEX128]
+ typs[27] = functype(nil, []ir.Node{anonfield(typs[26])}, nil)
+ typs[28] = types.Types[types.TSTRING]
+ typs[29] = functype(nil, []ir.Node{anonfield(typs[28])}, nil)
+ typs[30] = functype(nil, []ir.Node{anonfield(typs[2])}, nil)
+ typs[31] = functype(nil, []ir.Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
- typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
- typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[34] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[35] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[36] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[37] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
- typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
- typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[39] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []ir.Node{anonfield(typs[28])})
+ typs[40] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
- typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
- typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
- typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
+ typs[43] = functype(nil, []ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []ir.Node{anonfield(typs[28])})
+ typs[44] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
+ typs[45] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
- typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
+ typs[48] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []ir.Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
- typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
+ typs[50] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []ir.Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
- typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
- typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
- typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
- typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
- typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
- typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
- typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
- typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
- typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
+ typs[53] = functype(nil, []ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []ir.Node{anonfield(typs[47])})
+ typs[54] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []ir.Node{anonfield(typs[15])})
+ typs[55] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []ir.Node{anonfield(typs[46]), anonfield(typs[15])})
+ typs[56] = functype(nil, []ir.Node{anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
+ typs[57] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2])})
+ typs[58] = functype(nil, []ir.Node{anonfield(typs[2])}, []ir.Node{anonfield(typs[7])})
+ typs[59] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []ir.Node{anonfield(typs[2])})
+ typs[60] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2]), anonfield(typs[6])})
+ typs[61] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[62] = functype(nil, []ir.Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
- typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[65] = types.Types[TUINT32]
- typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
+ typs[64] = functype(nil, []ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
+ typs[65] = types.Types[types.TUINT32]
+ typs[66] = functype(nil, nil, []ir.Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
- typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
- typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
- typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
- typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
- typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
- typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
- typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
- typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
- typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
- typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
+ typs[68] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
+ typs[69] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
+ typs[70] = functype(nil, nil, []ir.Node{anonfield(typs[67])})
+ typs[71] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3])})
+ typs[72] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3])})
+ typs[73] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
+ typs[74] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[75] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[76] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[77] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
+ typs[78] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
+ typs[79] = functype(nil, []ir.Node{anonfield(typs[3])}, nil)
+ typs[80] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
- typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
- typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
+ typs[82] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []ir.Node{anonfield(typs[81])})
+ typs[83] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
- typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
- typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[85] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
- typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
+ typs[88] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
- typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
- typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
- typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[90] = tostruct([]ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
+ typs[91] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[92] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[93] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []ir.Node{anonfield(typs[15])})
+ typs[94] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
+ typs[95] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
- typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
- typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
- typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
- typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
- typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
- typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
+ typs[97] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
+ typs[98] = functype(nil, []ir.Node{anonfield(typs[63])}, nil)
+ typs[99] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []ir.Node{anonfield(typs[15]), anonfield(typs[6])})
+ typs[100] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []ir.Node{anonfield(typs[7])})
+ typs[101] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[7])})
+ typs[102] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []ir.Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
- typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
- typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
- typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
- typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
- typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
- typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
- typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
- typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
- typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
- typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
- typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
- typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
- typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
- typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
- typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
- typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
- typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
- typs[122] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
+ typs[104] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []ir.Node{anonfield(typs[103])})
+ typs[105] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
+ typs[106] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
+ typs[107] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []ir.Node{anonfield(typs[6])})
+ typs[108] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
+ typs[109] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
+ typs[110] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
+ typs[111] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
+ typs[112] = functype(nil, []ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[22])})
+ typs[113] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []ir.Node{anonfield(typs[24])})
+ typs[114] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[22])})
+ typs[115] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[24])})
+ typs[116] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[65])})
+ typs[117] = functype(nil, []ir.Node{anonfield(typs[22])}, []ir.Node{anonfield(typs[20])})
+ typs[118] = functype(nil, []ir.Node{anonfield(typs[24])}, []ir.Node{anonfield(typs[20])})
+ typs[119] = functype(nil, []ir.Node{anonfield(typs[65])}, []ir.Node{anonfield(typs[20])})
+ typs[120] = functype(nil, []ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []ir.Node{anonfield(typs[26])})
+ typs[121] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
+ typs[122] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
- typs[124] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
- typs[125] = types.Types[TUINT8]
- typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
- typs[127] = types.Types[TUINT16]
- typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
- typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
- typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
+ typs[124] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
+ typs[125] = types.Types[types.TUINT8]
+ typs[126] = functype(nil, []ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
+ typs[127] = types.Types[types.TUINT16]
+ typs[128] = functype(nil, []ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
+ typs[129] = functype(nil, []ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
+ typs[130] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
index e32ab97ad5..d82851e7cb 100644
--- a/src/cmd/compile/internal/gc/bv.go
+++ b/src/cmd/compile/internal/gc/bv.go
@@ -6,6 +6,8 @@ package gc
import (
"math/bits"
+
+ "cmd/compile/internal/base"
)
const (
@@ -35,7 +37,7 @@ func bvbulkalloc(nbit int32, count int32) bulkBvec {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
- Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return bulkBvec{
words: make([]uint32, size),
@@ -52,7 +54,7 @@ func (b *bulkBvec) next() bvec {
func (bv1 bvec) Eq(bv2 bvec) bool {
if bv1.n != bv2.n {
- Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
+ base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
@@ -68,7 +70,7 @@ func (dst bvec) Copy(src bvec) {
func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
- Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
+ base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
return bv.b[i>>wordShift]&mask != 0
@@ -76,7 +78,7 @@ func (bv bvec) Get(i int32) bool {
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
- Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
+ base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] |= mask
@@ -84,7 +86,7 @@ func (bv bvec) Set(i int32) {
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
- Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
+ base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] &^= mask
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
index bd350f696e..2901ae41d6 100644
--- a/src/cmd/compile/internal/gc/closure.go
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -5,37 +5,40 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
+ "cmd/internal/src"
"fmt"
)
-func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
+func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
- xfunc := p.nod(expr, ODCLFUNC, nil, nil)
- xfunc.Func.SetIsHiddenClosure(Curfn != nil)
- xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
- xfunc.Func.Nname.Name.Param.Ntype = xtype
- xfunc.Func.Nname.Name.Defn = xfunc
+ dcl := p.nod(expr, ir.ODCLFUNC, nil, nil)
+ fn := dcl.Func()
+ fn.SetIsHiddenClosure(Curfn != nil)
+ fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
+ fn.Nname.Name().Param.Ntype = xtype
+ fn.Nname.Name().Defn = dcl
- clo := p.nod(expr, OCLOSURE, nil, nil)
- clo.Func.Ntype = ntype
+ clo := p.nod(expr, ir.OCLOSURE, nil, nil)
+ clo.SetFunc(fn)
+ fn.ClosureType = ntype
+ fn.OClosure = clo
- xfunc.Func.Closure = clo
- clo.Func.Closure = xfunc
-
- p.funcBody(xfunc, expr.Body)
+ p.funcBody(dcl, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
- for _, v := range xfunc.Func.Cvars.Slice() {
+ for _, v := range fn.ClosureVars.Slice() {
// Unlink from v1; see comment in syntax.go type Param for these fields.
- v1 := v.Name.Defn
- v1.Name.Param.Innermost = v.Name.Param.Outer
+ v1 := v.Name().Defn
+ v1.Name().Param.Innermost = v.Name().Param.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
@@ -65,7 +68,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
- v.Name.Param.Outer = oldname(v.Sym)
+ v.Name().Param.Outer = oldname(v.Sym())
}
return clo
@@ -75,60 +78,61 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
-func typecheckclosure(clo *Node, top int) {
- xfunc := clo.Func.Closure
+func typecheckclosure(clo ir.Node, top int) {
+ fn := clo.Func()
+ dcl := fn.Decl
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
- xfunc.SetIota(x)
+ dcl.SetIota(x)
}
- clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
- clo.Type = clo.Func.Ntype.Type
- clo.Func.Top = top
+ fn.ClosureType = typecheck(fn.ClosureType, ctxType)
+ clo.SetType(fn.ClosureType.Type())
+ fn.ClosureCalled = top&ctxCallee != 0
- // Do not typecheck xfunc twice, otherwise, we will end up pushing
- // xfunc to xtop multiple times, causing initLSym called twice.
+ // Do not typecheck dcl twice, otherwise, we will end up pushing
+ // dcl to xtop multiple times, causing initLSym called twice.
// See #30709
- if xfunc.Typecheck() == 1 {
+ if dcl.Typecheck() == 1 {
return
}
- for _, ln := range xfunc.Func.Cvars.Slice() {
- n := ln.Name.Defn
- if !n.Name.Captured() {
- n.Name.SetCaptured(true)
- if n.Name.Decldepth == 0 {
- Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
+ for _, ln := range fn.ClosureVars.Slice() {
+ n := ln.Name().Defn
+ if !n.Name().Captured() {
+ n.Name().SetCaptured(true)
+ if n.Name().Decldepth == 0 {
+ base.Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
- if n.Name.Decldepth == decldepth {
- n.Name.SetAssigned(false)
+ if n.Name().Decldepth == decldepth {
+ n.Name().SetAssigned(false)
}
}
}
- xfunc.Func.Nname.Sym = closurename(Curfn)
- setNodeNameFunc(xfunc.Func.Nname)
- xfunc = typecheck(xfunc, ctxStmt)
+ fn.Nname.SetSym(closurename(Curfn))
+ setNodeNameFunc(fn.Nname)
+ dcl = typecheck(dcl, ctxStmt)
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to xtop.
- if Curfn != nil && clo.Type != nil {
+ if Curfn != nil && clo.Type() != nil {
oldfn := Curfn
- Curfn = xfunc
+ Curfn = dcl
olddd := decldepth
decldepth = 1
- typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
+ typecheckslice(dcl.Body().Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
- xtop = append(xtop, xfunc)
+ xtop = append(xtop, dcl)
}
// globClosgen is like Func.Closgen, but for the global scope.
@@ -136,23 +140,23 @@ var globClosgen int
// closurename generates a new unique name for a closure within
// outerfunc.
-func closurename(outerfunc *Node) *types.Sym {
+func closurename(outerfunc ir.Node) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfunc != nil {
- if outerfunc.Func.Closure != nil {
+ if outerfunc.Func().OClosure != nil {
prefix = ""
}
- outer = outerfunc.funcname()
+ outer = ir.FuncName(outerfunc)
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
- if !outerfunc.Func.Nname.isBlank() {
- gen = &outerfunc.Func.Closgen
+ if !ir.IsBlank(outerfunc.Func().Nname) {
+ gen = &outerfunc.Func().Closgen
}
}
@@ -168,15 +172,14 @@ var capturevarscomplete bool
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
-func capturevars(xfunc *Node) {
- lno := lineno
- lineno = xfunc.Pos
-
- clo := xfunc.Func.Closure
- cvars := xfunc.Func.Cvars.Slice()
+func capturevars(dcl ir.Node) {
+ lno := base.Pos
+ base.Pos = dcl.Pos()
+ fn := dcl.Func()
+ cvars := fn.ClosureVars.Slice()
out := cvars[:0]
for _, v := range cvars {
- if v.Type == nil {
+ if v.Type() == nil {
// If v.Type is nil, it means v looked like it
// was going to be used in the closure, but
// isn't. This happens in struct literals like
@@ -189,47 +192,47 @@ func capturevars(xfunc *Node) {
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
- dowidth(v.Type)
+ dowidth(v.Type())
- outer := v.Name.Param.Outer
- outermost := v.Name.Defn
+ outer := v.Name().Param.Outer
+ outermost := v.Name().Defn
// out parameters will be assigned to implicitly upon return.
- if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
- v.Name.SetByval(true)
+ if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
+ v.Name().SetByval(true)
} else {
- outermost.Name.SetAddrtaken(true)
- outer = nod(OADDR, outer, nil)
+ outermost.Name().SetAddrtaken(true)
+ outer = ir.Nod(ir.OADDR, outer, nil)
}
- if Debug.m > 1 {
+ if base.Flag.LowerM > 1 {
var name *types.Sym
- if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
- name = v.Name.Curfn.Func.Nname.Sym
+ if v.Name().Curfn != nil && v.Name().Curfn.Func().Nname != nil {
+ name = v.Name().Curfn.Func().Nname.Sym()
}
how := "ref"
- if v.Name.Byval() {
+ if v.Name().Byval() {
how = "value"
}
- Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
+ base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
}
outer = typecheck(outer, ctxExpr)
- clo.Func.Enter.Append(outer)
+ fn.ClosureEnter.Append(outer)
}
- xfunc.Func.Cvars.Set(out)
- lineno = lno
+ fn.ClosureVars.Set(out)
+ base.Pos = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
-func transformclosure(xfunc *Node) {
- lno := lineno
- lineno = xfunc.Pos
- clo := xfunc.Func.Closure
+func transformclosure(dcl ir.Node) {
+ lno := base.Pos
+ base.Pos = dcl.Pos()
+ fn := dcl.Func()
- if clo.Func.Top&ctxCallee != 0 {
+ if fn.ClosureCalled {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
@@ -246,116 +249,112 @@ func transformclosure(xfunc *Node) {
// }(byval, &byref, 42)
// f is ONAME of the actual function.
- f := xfunc.Func.Nname
+ f := fn.Nname
// We are going to insert captured variables before input args.
var params []*types.Field
- var decls []*Node
- for _, v := range xfunc.Func.Cvars.Slice() {
- if !v.Name.Byval() {
+ var decls []ir.Node
+ for _, v := range fn.ClosureVars.Slice() {
+ if !v.Name().Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
- addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
- v.Name.Param.Heapaddr = addr
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ v.Name().Param.Heapaddr = addr
v = addr
}
- v.SetClass(PPARAM)
+ v.SetClass(ir.PPARAM)
decls = append(decls, v)
- fld := types.NewField()
- fld.Nname = asTypesNode(v)
- fld.Type = v.Type
- fld.Sym = v.Sym
+ fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
+ fld.Nname = v
params = append(params, fld)
}
if len(params) > 0 {
// Prepend params and decls.
- f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
- xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
+ f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
+ fn.Dcl = append(decls, fn.Dcl...)
}
- dowidth(f.Type)
- xfunc.Type = f.Type // update type of ODCLFUNC
+ dowidth(f.Type())
+ dcl.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
- var body []*Node
+ var body []ir.Node
offset := int64(Widthptr)
- for _, v := range xfunc.Func.Cvars.Slice() {
+ for _, v := range fn.ClosureVars.Slice() {
// cv refers to the field inside of closure OSTRUCTLIT.
- cv := nod(OCLOSUREVAR, nil, nil)
+ cv := ir.Nod(ir.OCLOSUREVAR, nil, nil)
- cv.Type = v.Type
- if !v.Name.Byval() {
- cv.Type = types.NewPtr(v.Type)
+ cv.SetType(v.Type())
+ if !v.Name().Byval() {
+ cv.SetType(types.NewPtr(v.Type()))
}
- offset = Rnd(offset, int64(cv.Type.Align))
- cv.Xoffset = offset
- offset += cv.Type.Width
+ offset = Rnd(offset, int64(cv.Type().Align))
+ cv.SetOffset(offset)
+ offset += cv.Type().Width
- if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
+ if v.Name().Byval() && v.Type().Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
- v.SetClass(PAUTO)
- xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
- body = append(body, nod(OAS, v, cv))
+ v.SetClass(ir.PAUTO)
+ fn.Dcl = append(fn.Dcl, v)
+ body = append(body, ir.Nod(ir.OAS, v, cv))
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
- addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
- addr.SetClass(PAUTO)
- addr.Name.SetUsed(true)
- addr.Name.Curfn = xfunc
- xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
- v.Name.Param.Heapaddr = addr
- if v.Name.Byval() {
- cv = nod(OADDR, cv, nil)
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ addr.SetClass(ir.PAUTO)
+ addr.Name().SetUsed(true)
+ addr.Name().Curfn = dcl
+ fn.Dcl = append(fn.Dcl, addr)
+ v.Name().Param.Heapaddr = addr
+ if v.Name().Byval() {
+ cv = ir.Nod(ir.OADDR, cv, nil)
}
- body = append(body, nod(OAS, addr, cv))
+ body = append(body, ir.Nod(ir.OAS, addr, cv))
}
}
if len(body) > 0 {
typecheckslice(body, ctxStmt)
- xfunc.Func.Enter.Set(body)
- xfunc.Func.SetNeedctxt(true)
+ fn.Enter.Set(body)
+ fn.SetNeedctxt(true)
}
}
- lineno = lno
+ base.Pos = lno
}
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
-func hasemptycvars(clo *Node) bool {
- xfunc := clo.Func.Closure
- return xfunc.Func.Cvars.Len() == 0
+func hasemptycvars(clo ir.Node) bool {
+ return clo.Func().ClosureVars.Len() == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
-func closuredebugruntimecheck(clo *Node) {
- if Debug_closure > 0 {
- xfunc := clo.Func.Closure
- if clo.Esc == EscHeap {
- Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
+func closuredebugruntimecheck(clo ir.Node) {
+ if base.Debug.Closure > 0 {
+ if clo.Esc() == EscHeap {
+ base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
} else {
- Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
+ base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars)
}
}
- if compiling_runtime && clo.Esc == EscHeap {
- yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
}
}
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
-func closureType(clo *Node) *types.Type {
+func closureType(clo ir.Node) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
@@ -369,94 +368,95 @@ func closureType(clo *Node) *types.Type {
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
- fields := []*Node{
- namedfield(".F", types.Types[TUINTPTR]),
+ fields := []ir.Node{
+ namedfield(".F", types.Types[types.TUINTPTR]),
}
- for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
- typ := v.Type
- if !v.Name.Byval() {
+ for _, v := range clo.Func().ClosureVars.Slice() {
+ typ := v.Type()
+ if !v.Name().Byval() {
typ = types.NewPtr(typ)
}
- fields = append(fields, symfield(v.Sym, typ))
+ fields = append(fields, symfield(v.Sym(), typ))
}
typ := tostruct(fields)
typ.SetNoalg(true)
return typ
}
-func walkclosure(clo *Node, init *Nodes) *Node {
- xfunc := clo.Func.Closure
+func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node {
+ fn := clo.Func()
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
- if Debug_closure > 0 {
- Warnl(clo.Pos, "closure converted to global")
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(clo.Pos(), "closure converted to global")
}
- return xfunc.Func.Nname
+ return fn.Nname
}
closuredebugruntimecheck(clo)
typ := closureType(clo)
- clos := nod(OCOMPLIT, nil, typenod(typ))
- clos.Esc = clo.Esc
- clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
+ clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
+ clos.SetEsc(clo.Esc())
+ clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
- clos = nod(OADDR, clos, nil)
- clos.Esc = clo.Esc
+ clos = ir.Nod(ir.OADDR, clos, nil)
+ clos.SetEsc(clo.Esc())
// Force type conversion from *struct to the func type.
- clos = convnop(clos, clo.Type)
+ clos = convnop(clos, clo.Type())
// non-escaping temp to use, if any.
if x := prealloc[clo]; x != nil {
- if !types.Identical(typ, x.Type) {
+ if !types.Identical(typ, x.Type()) {
panic("closure type does not match order's assigned type")
}
- clos.Left.Right = x
+ clos.Left().SetRight(x)
delete(prealloc, clo)
}
return walkexpr(clos, init)
}
-func typecheckpartialcall(fn *Node, sym *types.Sym) {
- switch fn.Op {
- case ODOTINTER, ODOTMETH:
+func typecheckpartialcall(dot ir.Node, sym *types.Sym) {
+ switch dot.Op() {
+ case ir.ODOTINTER, ir.ODOTMETH:
break
default:
- Fatalf("invalid typecheckpartialcall")
+ base.Fatalf("invalid typecheckpartialcall")
}
// Create top-level function.
- xfunc := makepartialcall(fn, fn.Type, sym)
- fn.Func = xfunc.Func
- fn.Func.SetWrapper(true)
- fn.Right = newname(sym)
- fn.Op = OCALLPART
- fn.Type = xfunc.Type
+ dcl := makepartialcall(dot, dot.Type(), sym)
+ dcl.Func().SetWrapper(true)
+ dot.SetOp(ir.OCALLPART)
+ dot.SetRight(NewName(sym))
+ dot.SetType(dcl.Type())
+ dot.SetFunc(dcl.Func())
+ dot.SetOpt(nil) // clear types.Field from ODOTMETH
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
-func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
- rcvrtype := fn.Left.Type
+func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node {
+ rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
- return asNode(sym.Def)
+ return ir.AsNode(sym.Def)
}
sym.SetUniq(true)
savecurfn := Curfn
- saveLineNo := lineno
+ saveLineNo := base.Pos
Curfn = nil
// Set line number equal to the line number where the method is declared.
var m *types.Field
if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
- lineno = m.Pos
+ base.Pos = m.Pos
}
// Note: !m.Pos.IsKnown() happens for method expressions where
// the method is implicitly declared. The Error method of the
@@ -464,73 +464,74 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
// number at the use of the method expression in this
// case. See issue 29389.
- tfn := nod(OTFUNC, nil, nil)
- tfn.List.Set(structargs(t0.Params(), true))
- tfn.Rlist.Set(structargs(t0.Results(), false))
+ tfn := ir.Nod(ir.OTFUNC, nil, nil)
+ tfn.PtrList().Set(structargs(t0.Params(), true))
+ tfn.PtrRlist().Set(structargs(t0.Results(), false))
- xfunc := dclfunc(sym, tfn)
- xfunc.Func.SetDupok(true)
- xfunc.Func.SetNeedctxt(true)
+ dcl := dclfunc(sym, tfn)
+ fn := dcl.Func()
+ fn.SetDupok(true)
+ fn.SetNeedctxt(true)
- tfn.Type.SetPkg(t0.Pkg())
+ tfn.Type().SetPkg(t0.Pkg())
// Declare and initialize variable holding receiver.
- cv := nod(OCLOSUREVAR, nil, nil)
- cv.Type = rcvrtype
- cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
+ cv := ir.Nod(ir.OCLOSUREVAR, nil, nil)
+ cv.SetType(rcvrtype)
+ cv.SetOffset(Rnd(int64(Widthptr), int64(cv.Type().Align)))
- ptr := newname(lookup(".this"))
- declare(ptr, PAUTO)
- ptr.Name.SetUsed(true)
- var body []*Node
+ ptr := NewName(lookup(".this"))
+ declare(ptr, ir.PAUTO)
+ ptr.Name().SetUsed(true)
+ var body []ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
- ptr.Type = rcvrtype
- body = append(body, nod(OAS, ptr, cv))
+ ptr.SetType(rcvrtype)
+ body = append(body, ir.Nod(ir.OAS, ptr, cv))
} else {
- ptr.Type = types.NewPtr(rcvrtype)
- body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
+ ptr.SetType(types.NewPtr(rcvrtype))
+ body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cv, nil)))
}
- call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
- call.List.Set(paramNnames(tfn.Type))
- call.SetIsDDD(tfn.Type.IsVariadic())
+ call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil)
+ call.PtrList().Set(paramNnames(tfn.Type()))
+ call.SetIsDDD(tfn.Type().IsVariadic())
if t0.NumResults() != 0 {
- n := nod(ORETURN, nil, nil)
- n.List.Set1(call)
+ n := ir.Nod(ir.ORETURN, nil, nil)
+ n.PtrList().Set1(call)
call = n
}
body = append(body, call)
- xfunc.Nbody.Set(body)
+ dcl.PtrBody().Set(body)
funcbody()
- xfunc = typecheck(xfunc, ctxStmt)
+ dcl = typecheck(dcl, ctxStmt)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
- Curfn = xfunc
- typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
- sym.Def = asTypesNode(xfunc)
- xtop = append(xtop, xfunc)
+ Curfn = dcl
+ typecheckslice(dcl.Body().Slice(), ctxStmt)
+ sym.Def = dcl
+ xtop = append(xtop, dcl)
Curfn = savecurfn
- lineno = saveLineNo
+ base.Pos = saveLineNo
- return xfunc
+ return dcl
}
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
-func partialCallType(n *Node) *types.Type {
- t := tostruct([]*Node{
- namedfield("F", types.Types[TUINTPTR]),
- namedfield("R", n.Left.Type),
+func partialCallType(n ir.Node) *types.Type {
+ t := tostruct([]ir.Node{
+ namedfield("F", types.Types[types.TUINTPTR]),
+ namedfield("R", n.Left().Type()),
})
t.SetNoalg(true)
return t
}
-func walkpartialcall(n *Node, init *Nodes) *Node {
+func walkpartialcall(n ir.Node, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
@@ -538,38 +539,38 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
//
// Like walkclosure above.
- if n.Left.Type.IsInterface() {
+ if n.Left().Type().IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
- n.Left = cheapexpr(n.Left, init)
- n.Left = walkexpr(n.Left, nil)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetLeft(walkexpr(n.Left(), nil))
- tab := nod(OITAB, n.Left, nil)
+ tab := ir.Nod(ir.OITAB, n.Left(), nil)
tab = typecheck(tab, ctxExpr)
- c := nod(OCHECKNIL, tab, nil)
+ c := ir.Nod(ir.OCHECKNIL, tab, nil)
c.SetTypecheck(1)
init.Append(c)
}
typ := partialCallType(n)
- clos := nod(OCOMPLIT, nil, typenod(typ))
- clos.Esc = n.Esc
- clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
+ clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
+ clos.SetEsc(n.Esc())
+ clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left())
- clos = nod(OADDR, clos, nil)
- clos.Esc = n.Esc
+ clos = ir.Nod(ir.OADDR, clos, nil)
+ clos.SetEsc(n.Esc())
// Force type conversion from *struct to the func type.
- clos = convnop(clos, n.Type)
+ clos = convnop(clos, n.Type())
// non-escaping temp to use, if any.
if x := prealloc[n]; x != nil {
- if !types.Identical(typ, x.Type) {
+ if !types.Identical(typ, x.Type()) {
panic("partial call type does not match order's assigned type")
}
- clos.Left.Right = x
+ clos.Left().SetRight(x)
delete(prealloc, n)
}
@@ -578,16 +579,16 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
-func callpartMethod(n *Node) *types.Field {
- if n.Op != OCALLPART {
- Fatalf("expected OCALLPART, got %v", n)
+func callpartMethod(n ir.Node) *types.Field {
+ if n.Op() != ir.OCALLPART {
+ base.Fatalf("expected OCALLPART, got %v", n)
}
// TODO(mdempsky): Optimize this. If necessary,
// makepartialcall could save m for us somewhere.
var m *types.Field
- if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
- Fatalf("failed to find field for OCALLPART")
+ if lookdot0(n.Right().Sym(), n.Left().Type(), &m, false) != 1 {
+ base.Fatalf("failed to find field for OCALLPART")
}
return m
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index b92c8d66b5..4beb85245f 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -5,217 +5,87 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
+ "go/constant"
+ "go/token"
+ "math"
"math/big"
"strings"
+ "unicode"
)
-// Ctype describes the constant kind of an "ideal" (untyped) constant.
-type Ctype uint8
-
const (
- CTxxx Ctype = iota
-
- CTINT
- CTRUNE
- CTFLT
- CTCPLX
- CTSTR
- CTBOOL
- CTNIL
+ // Maximum size in bits for big.Ints before signalling
+ // overflow and also mantissa precision for big.Floats.
+ Mpprec = 512
)
-type Val struct {
- // U contains one of:
- // bool bool when Ctype() == CTBOOL
- // *Mpint int when Ctype() == CTINT, rune when Ctype() == CTRUNE
- // *Mpflt float when Ctype() == CTFLT
- // *Mpcplx pair of floats when Ctype() == CTCPLX
- // string string when Ctype() == CTSTR
- // *Nilval when Ctype() == CTNIL
- U interface{}
-}
-
-func (v Val) Ctype() Ctype {
- switch x := v.U.(type) {
- default:
- Fatalf("unexpected Ctype for %T", v.U)
- panic("unreachable")
- case nil:
- return CTxxx
- case *NilVal:
- return CTNIL
- case bool:
- return CTBOOL
- case *Mpint:
- if x.Rune {
- return CTRUNE
- }
- return CTINT
- case *Mpflt:
- return CTFLT
- case *Mpcplx:
- return CTCPLX
- case string:
- return CTSTR
- }
-}
-
-func eqval(a, b Val) bool {
- if a.Ctype() != b.Ctype() {
- return false
- }
- switch x := a.U.(type) {
- default:
- Fatalf("unexpected Ctype for %T", a.U)
- panic("unreachable")
- case *NilVal:
- return true
- case bool:
- y := b.U.(bool)
- return x == y
- case *Mpint:
- y := b.U.(*Mpint)
- return x.Cmp(y) == 0
- case *Mpflt:
- y := b.U.(*Mpflt)
- return x.Cmp(y) == 0
- case *Mpcplx:
- y := b.U.(*Mpcplx)
- return x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
- case string:
- y := b.U.(string)
- return x == y
- }
-}
-
-// Interface returns the constant value stored in v as an interface{}.
-// It returns int64s for ints and runes, float64s for floats,
-// complex128s for complex values, and nil for constant nils.
-func (v Val) Interface() interface{} {
- switch x := v.U.(type) {
+func bigFloatVal(v constant.Value) *big.Float {
+ f := new(big.Float)
+ f.SetPrec(Mpprec)
+ switch u := constant.Val(v).(type) {
+ case int64:
+ f.SetInt64(u)
+ case *big.Int:
+ f.SetInt(u)
+ case *big.Float:
+ f.Set(u)
+ case *big.Rat:
+ f.SetRat(u)
default:
- Fatalf("unexpected Interface for %T", v.U)
- panic("unreachable")
- case *NilVal:
- return nil
- case bool, string:
- return x
- case *Mpint:
- return x.Int64()
- case *Mpflt:
- return x.Float64()
- case *Mpcplx:
- return complex(x.Real.Float64(), x.Imag.Float64())
+ base.Fatalf("unexpected: %v", u)
}
+ return f
}
-type NilVal struct{}
-
-// Int64Val returns n as an int64.
-// n must be an integer or rune constant.
-func (n *Node) Int64Val() int64 {
- if !Isconst(n, CTINT) {
- Fatalf("Int64Val(%v)", n)
- }
- return n.Val().U.(*Mpint).Int64()
-}
-
-// CanInt64 reports whether it is safe to call Int64Val() on n.
-func (n *Node) CanInt64() bool {
- if !Isconst(n, CTINT) {
- return false
+func roundFloat(v constant.Value, sz int64) constant.Value {
+ switch sz {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return makeFloat64(float64(f))
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return makeFloat64(f)
}
-
- // if the value inside n cannot be represented as an int64, the
- // return value of Int64 is undefined
- return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0
-}
-
-// BoolVal returns n as a bool.
-// n must be a boolean constant.
-func (n *Node) BoolVal() bool {
- if !Isconst(n, CTBOOL) {
- Fatalf("BoolVal(%v)", n)
- }
- return n.Val().U.(bool)
-}
-
-// StringVal returns the value of a literal string Node as a string.
-// n must be a string constant.
-func (n *Node) StringVal() string {
- if !Isconst(n, CTSTR) {
- Fatalf("StringVal(%v)", n)
- }
- return n.Val().U.(string)
+ base.Fatalf("unexpected size: %v", sz)
+ panic("unreachable")
}
// truncate float literal fv to 32-bit or 64-bit precision
// according to type; return truncated value.
-func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
- if t == nil {
- return oldv
- }
-
- if overflow(Val{oldv}, t) {
+func truncfltlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() || overflow(v, t) {
// If there was overflow, simply continuing would set the
// value to Inf which in turn would lead to spurious follow-on
// errors. Avoid this by returning the existing value.
- return oldv
- }
-
- fv := newMpflt()
-
- // convert large precision literal floating
- // into limited precision (float64 or float32)
- switch t.Etype {
- case types.TFLOAT32:
- fv.SetFloat64(oldv.Float32())
- case types.TFLOAT64:
- fv.SetFloat64(oldv.Float64())
- default:
- Fatalf("truncfltlit: unexpected Etype %v", t.Etype)
+ return v
}
- return fv
+ return roundFloat(v, t.Size())
}
// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
// precision, according to type; return truncated value. In case of
-// overflow, calls yyerror but does not truncate the input value.
-func trunccmplxlit(oldv *Mpcplx, t *types.Type) *Mpcplx {
- if t == nil {
- return oldv
- }
-
- if overflow(Val{oldv}, t) {
+// overflow, calls Errorf but does not truncate the input value.
+func trunccmplxlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() || overflow(v, t) {
// If there was overflow, simply continuing would set the
// value to Inf which in turn would lead to spurious follow-on
// errors. Avoid this by returning the existing value.
- return oldv
- }
-
- cv := newMpcmplx()
-
- switch t.Etype {
- case types.TCOMPLEX64:
- cv.Real.SetFloat64(oldv.Real.Float32())
- cv.Imag.SetFloat64(oldv.Imag.Float32())
- case types.TCOMPLEX128:
- cv.Real.SetFloat64(oldv.Real.Float64())
- cv.Imag.SetFloat64(oldv.Imag.Float64())
- default:
- Fatalf("trunccplxlit: unexpected Etype %v", t.Etype)
+ return v
}
- return cv
+ fsz := t.Size() / 2
+ return makeComplex(roundFloat(constant.Real(v), fsz), roundFloat(constant.Imag(v), fsz))
}
// TODO(mdempsky): Replace these with better APIs.
-func convlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
-func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
+func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+func defaultlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
// convlit1 converts an untyped expression n to type t. If n already
// has a type, convlit1 has no effect.
@@ -228,35 +98,38 @@ func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil
//
// If there's an error converting n to t, context is used in the error
// message.
-func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Node {
+func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node {
if explicit && t == nil {
- Fatalf("explicit conversion missing type")
+ base.Fatalf("explicit conversion missing type")
}
if t != nil && t.IsUntyped() {
- Fatalf("bad conversion to untyped: %v", t)
+ base.Fatalf("bad conversion to untyped: %v", t)
}
- if n == nil || n.Type == nil {
+ if n == nil || n.Type() == nil {
// Allow sloppy callers.
return n
}
- if !n.Type.IsUntyped() {
+ if !n.Type().IsUntyped() {
// Already typed; nothing to do.
return n
}
- if n.Op == OLITERAL {
+ if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// Can't always set n.Type directly on OLITERAL nodes.
// See discussion on CL 20813.
- n = n.rawcopy()
+ n = n.RawCopy()
}
// Nil is technically not a constant, so handle it specially.
- if n.Type.Etype == TNIL {
+ if n.Type().Etype == types.TNIL {
+ if n.Op() != ir.ONIL {
+ base.Fatalf("unexpected op: %v (%v)", n, n.Op())
+ }
if t == nil {
- yyerror("use of untyped nil")
+ base.Errorf("use of untyped nil")
n.SetDiag(true)
- n.Type = nil
+ n.SetType(nil)
return n
}
@@ -265,77 +138,77 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod
return n
}
- n.Type = t
+ n.SetType(t)
return n
}
- if t == nil || !okforconst[t.Etype] {
- t = defaultType(n.Type)
+ if t == nil || !ir.OKForConst[t.Etype] {
+ t = defaultType(n.Type())
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected untyped expression: %v", n)
+ base.Fatalf("unexpected untyped expression: %v", n)
- case OLITERAL:
+ case ir.OLITERAL:
v := convertVal(n.Val(), t, explicit)
- if v.U == nil {
+ if v.Kind() == constant.Unknown {
break
}
+ n.SetType(t)
n.SetVal(v)
- n.Type = t
return n
- case OPLUS, ONEG, OBITNOT, ONOT, OREAL, OIMAG:
- ot := operandType(n.Op, t)
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG:
+ ot := operandType(n.Op(), t)
if ot == nil {
n = defaultlit(n, nil)
break
}
- n.Left = convlit(n.Left, ot)
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(convlit(n.Left(), ot))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
return n
- case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND, OCOMPLEX:
- ot := operandType(n.Op, t)
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX:
+ ot := operandType(n.Op(), t)
if ot == nil {
n = defaultlit(n, nil)
break
}
- n.Left = convlit(n.Left, ot)
- n.Right = convlit(n.Right, ot)
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ n.SetLeft(convlit(n.Left(), ot))
+ n.SetRight(convlit(n.Right(), ot))
+ if n.Left().Type() == nil || n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
- if !types.Identical(n.Left.Type, n.Right.Type) {
- yyerror("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type)
- n.Type = nil
+ if !types.Identical(n.Left().Type(), n.Right().Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left().Type(), n.Right().Type())
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
return n
- case OEQ, ONE, OLT, OLE, OGT, OGE:
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
if !t.IsBoolean() {
break
}
- n.Type = t
+ n.SetType(t)
return n
- case OLSH, ORSH:
- n.Left = convlit1(n.Left, t, explicit, nil)
- n.Type = n.Left.Type
- if n.Type != nil && !n.Type.IsInteger() {
- yyerror("invalid operation: %v (shift of type %v)", n, n.Type)
- n.Type = nil
+ case ir.OLSH, ir.ORSH:
+ n.SetLeft(convlit1(n.Left(), t, explicit, nil))
+ n.SetType(n.Left().Type())
+ if n.Type() != nil && !n.Type().IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type())
+ n.SetType(nil)
}
return n
}
@@ -343,26 +216,26 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod
if !n.Diag() {
if !t.Broke() {
if explicit {
- yyerror("cannot convert %L to type %v", n, t)
+ base.Errorf("cannot convert %L to type %v", n, t)
} else if context != nil {
- yyerror("cannot use %L as type %v in %s", n, t, context())
+ base.Errorf("cannot use %L as type %v in %s", n, t, context())
} else {
- yyerror("cannot use %L as type %v", n, t)
+ base.Errorf("cannot use %L as type %v", n, t)
}
}
n.SetDiag(true)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
-func operandType(op Op, t *types.Type) *types.Type {
+func operandType(op ir.Op, t *types.Type) *types.Type {
switch op {
- case OCOMPLEX:
+ case ir.OCOMPLEX:
if t.IsComplex() {
return floatForComplex(t)
}
- case OREAL, OIMAG:
+ case ir.OREAL, ir.OIMAG:
if t.IsFloat() {
return complexForFloat(t)
}
@@ -379,24 +252,24 @@ func operandType(op Op, t *types.Type) *types.Type {
//
// If explicit is true, then conversions from integer to string are
// also allowed.
-func convertVal(v Val, t *types.Type, explicit bool) Val {
- switch ct := v.Ctype(); ct {
- case CTBOOL:
+func convertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
+ switch ct := v.Kind(); ct {
+ case constant.Bool:
if t.IsBoolean() {
return v
}
- case CTSTR:
+ case constant.String:
if t.IsString() {
return v
}
- case CTINT, CTRUNE:
+ case constant.Int:
if explicit && t.IsString() {
return tostr(v)
}
fallthrough
- case CTFLT, CTCPLX:
+ case constant.Float, constant.Complex:
switch {
case t.IsInteger():
v = toint(v)
@@ -404,646 +277,383 @@ func convertVal(v Val, t *types.Type, explicit bool) Val {
return v
case t.IsFloat():
v = toflt(v)
- v = Val{truncfltlit(v.U.(*Mpflt), t)}
+ v = truncfltlit(v, t)
return v
case t.IsComplex():
v = tocplx(v)
- v = Val{trunccmplxlit(v.U.(*Mpcplx), t)}
+ v = trunccmplxlit(v, t)
return v
}
}
- return Val{}
+ return constant.MakeUnknown()
}
-func tocplx(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- c := newMpcmplx()
- c.Real.SetInt(u)
- c.Imag.SetFloat64(0.0)
- v.U = c
-
- case *Mpflt:
- c := newMpcmplx()
- c.Real.Set(u)
- c.Imag.SetFloat64(0.0)
- v.U = c
- }
-
- return v
+func tocplx(v constant.Value) constant.Value {
+ return constant.ToComplex(v)
}
-func toflt(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- f := newMpflt()
- f.SetInt(u)
- v.U = f
-
- case *Mpcplx:
- f := newMpflt()
- f.Set(&u.Real)
- if u.Imag.CmpFloat64(0) != 0 {
- yyerror("constant %v truncated to real", u.GoString())
+func toflt(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ if constant.Sign(constant.Imag(v)) != 0 {
+ base.Errorf("constant %v truncated to real", v)
}
- v.U = f
+ v = constant.Real(v)
}
- return v
+ return constant.ToFloat(v)
}
-func toint(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- if u.Rune {
- i := new(Mpint)
- i.Set(u)
- v.U = i
+func toint(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ if constant.Sign(constant.Imag(v)) != 0 {
+ base.Errorf("constant %v truncated to integer", v)
}
+ v = constant.Real(v)
+ }
- case *Mpflt:
- i := new(Mpint)
- if !i.SetFloat(u) {
- if i.checkOverflow(0) {
- yyerror("integer too large")
- } else {
- // The value of u cannot be represented as an integer;
- // so we need to print an error message.
- // Unfortunately some float values cannot be
- // reasonably formatted for inclusion in an error
- // message (example: 1 + 1e-100), so first we try to
- // format the float; if the truncation resulted in
- // something that looks like an integer we omit the
- // value from the error message.
- // (See issue #11371).
- var t big.Float
- t.Parse(u.GoString(), 10)
- if t.IsInt() {
- yyerror("constant truncated to integer")
- } else {
- yyerror("constant %v truncated to integer", u.GoString())
- }
- }
- }
- v.U = i
+ if v := constant.ToInt(v); v.Kind() == constant.Int {
+ return v
+ }
- case *Mpcplx:
- i := new(Mpint)
- if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 {
- yyerror("constant %v truncated to integer", u.GoString())
+ // The value of v cannot be represented as an integer;
+ // so we need to print an error message.
+ // Unfortunately some float values cannot be
+ // reasonably formatted for inclusion in an error
+ // message (example: 1 + 1e-100), so first we try to
+ // format the float; if the truncation resulted in
+ // something that looks like an integer we omit the
+ // value from the error message.
+ // (See issue #11371).
+ f := bigFloatVal(v)
+ if f.MantExp(nil) > 2*Mpprec {
+ base.Errorf("integer too large")
+ } else {
+ var t big.Float
+ t.Parse(fmt.Sprint(v), 0)
+ if t.IsInt() {
+ base.Errorf("constant truncated to integer")
+ } else {
+ base.Errorf("constant %v truncated to integer", v)
}
-
- v.U = i
}
- return v
+ // Prevent follow-on errors.
+ // TODO(mdempsky): Use constant.MakeUnknown() instead.
+ return constant.MakeInt64(1)
}
-func doesoverflow(v Val, t *types.Type) bool {
- switch u := v.U.(type) {
- case *Mpint:
- if !t.IsInteger() {
- Fatalf("overflow: %v integer constant", t)
- }
- return u.Cmp(minintval[t.Etype]) < 0 || u.Cmp(maxintval[t.Etype]) > 0
-
- case *Mpflt:
- if !t.IsFloat() {
- Fatalf("overflow: %v floating-point constant", t)
- }
- return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0
-
- case *Mpcplx:
- if !t.IsComplex() {
- Fatalf("overflow: %v complex constant", t)
- }
- return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 ||
- u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0
- }
-
- return false
+// doesoverflow reports whether constant value v is too large
+// to represent with type t.
+func doesoverflow(v constant.Value, t *types.Type) bool {
+ switch {
+ case t.IsInteger():
+ bits := uint(8 * t.Size())
+ if t.IsUnsigned() {
+ x, ok := constant.Uint64Val(v)
+ return !ok || x>>bits != 0
+ }
+ x, ok := constant.Int64Val(v)
+ if x < 0 {
+ x = ^x
+ }
+ return !ok || x>>(bits-1) != 0
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return math.IsInf(float64(f), 0)
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return math.IsInf(f, 0)
+ }
+ case t.IsComplex():
+ ft := floatForComplex(t)
+ return doesoverflow(constant.Real(v), ft) || doesoverflow(constant.Imag(v), ft)
+ }
+ base.Fatalf("doesoverflow: %v, %v", v, t)
+ panic("unreachable")
}
-func overflow(v Val, t *types.Type) bool {
+// overflow reports whether constant value v is too large
+// to represent with type t, and emits an error message if so.
+func overflow(v constant.Value, t *types.Type) bool {
// v has already been converted
// to appropriate form for t.
- if t == nil || t.Etype == TIDEAL {
+ if t.IsUntyped() {
return false
}
-
- // Only uintptrs may be converted to pointers, which cannot overflow.
- if t.IsPtr() || t.IsUnsafePtr() {
- return false
+ if v.Kind() == constant.Int && constant.BitLen(v) > Mpprec {
+ base.Errorf("integer too large")
+ return true
}
-
if doesoverflow(v, t) {
- yyerror("constant %v overflows %v", v, t)
+ base.Errorf("constant %v overflows %v", ir.FmtConst(v, 0), t)
return true
}
-
return false
-
}
-func tostr(v Val) Val {
- switch u := v.U.(type) {
- case *Mpint:
- var r rune = 0xFFFD
- if u.Cmp(minintval[TINT32]) >= 0 && u.Cmp(maxintval[TINT32]) <= 0 {
- r = rune(u.Int64())
+func tostr(v constant.Value) constant.Value {
+ if v.Kind() == constant.Int {
+ r := unicode.ReplacementChar
+ if x, ok := constant.Uint64Val(v); ok && x <= unicode.MaxRune {
+ r = rune(x)
}
- v.U = string(r)
+ v = constant.MakeString(string(r))
}
-
return v
}
-func consttype(n *Node) Ctype {
- if n == nil || n.Op != OLITERAL {
- return CTxxx
- }
- return n.Val().Ctype()
-}
-
-func Isconst(n *Node, ct Ctype) bool {
- t := consttype(n)
-
- // If the caller is asking for CTINT, allow CTRUNE too.
- // Makes life easier for back ends.
- return t == ct || (ct == CTINT && t == CTRUNE)
-}
-
-// evconst rewrites constant expressions into OLITERAL nodes.
-func evconst(n *Node) {
- nl, nr := n.Left, n.Right
+var tokenForOp = [...]token.Token{
+ ir.OPLUS: token.ADD,
+ ir.ONEG: token.SUB,
+ ir.ONOT: token.NOT,
+ ir.OBITNOT: token.XOR,
+
+ ir.OADD: token.ADD,
+ ir.OSUB: token.SUB,
+ ir.OMUL: token.MUL,
+ ir.ODIV: token.QUO,
+ ir.OMOD: token.REM,
+ ir.OOR: token.OR,
+ ir.OXOR: token.XOR,
+ ir.OAND: token.AND,
+ ir.OANDNOT: token.AND_NOT,
+ ir.OOROR: token.LOR,
+ ir.OANDAND: token.LAND,
+
+ ir.OEQ: token.EQL,
+ ir.ONE: token.NEQ,
+ ir.OLT: token.LSS,
+ ir.OLE: token.LEQ,
+ ir.OGT: token.GTR,
+ ir.OGE: token.GEQ,
+
+ ir.OLSH: token.SHL,
+ ir.ORSH: token.SHR,
+}
+
+// evalConst returns a constant-evaluated expression equivalent to n.
+// If n is not a constant, evalConst returns n.
+// Otherwise, evalConst returns a new OLITERAL with the same value as n,
+// and with .Orig pointing back to n.
+func evalConst(n ir.Node) ir.Node {
+ nl, nr := n.Left(), n.Right()
// Pick off just the opcodes that can be constant evaluated.
- switch op := n.Op; op {
- case OPLUS, ONEG, OBITNOT, ONOT:
- if nl.Op == OLITERAL {
- setconst(n, unaryOp(op, nl.Val(), n.Type))
+ switch op := n.Op(); op {
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ if nl.Op() == ir.OLITERAL {
+ var prec uint
+ if n.Type().IsUnsigned() {
+ prec = uint(n.Type().Size() * 8)
+ }
+ return origConst(n, constant.UnaryOp(tokenForOp[op], nl.Val(), prec))
}
- case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- setconst(n, binaryOp(nl.Val(), op, nr.Val()))
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND:
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ rval := nr.Val()
+
+ // check for divisor underflow in complex division (see issue 20227)
+ if op == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
+ base.Errorf("complex division by zero")
+ n.SetType(nil)
+ return n
+ }
+ if (op == ir.ODIV || op == ir.OMOD) && constant.Sign(rval) == 0 {
+ base.Errorf("division by zero")
+ n.SetType(nil)
+ return n
+ }
+
+ tok := tokenForOp[op]
+ if op == ir.ODIV && n.Type().IsInteger() {
+ tok = token.QUO_ASSIGN // integer division
+ }
+ return origConst(n, constant.BinaryOp(nl.Val(), tok, rval))
}
- case OEQ, ONE, OLT, OLE, OGT, OGE:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- setboolconst(n, compareOp(nl.Val(), op, nr.Val()))
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[op], nr.Val()))
}
- case OLSH, ORSH:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- setconst(n, shiftOp(nl.Val(), op, nr.Val()))
+ case ir.OLSH, ir.ORSH:
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ // shiftBound from go/types; "so we can express smallestFloat64"
+ const shiftBound = 1023 - 1 + 52
+ s, ok := constant.Uint64Val(nr.Val())
+ if !ok || s > shiftBound {
+ base.Errorf("invalid shift count %v", nr)
+ n.SetType(nil)
+ break
+ }
+ return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[op], uint(s)))
}
- case OCONV, ORUNESTR:
- if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
- setconst(n, convertVal(nl.Val(), n.Type, true))
+ case ir.OCONV, ir.ORUNESTR:
+ if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL {
+ return origConst(n, convertVal(nl.Val(), n.Type(), true))
}
- case OCONVNOP:
- if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
+ case ir.OCONVNOP:
+ if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL {
// set so n.Orig gets OCONV instead of OCONVNOP
- n.Op = OCONV
- setconst(n, nl.Val())
+ n.SetOp(ir.OCONV)
+ return origConst(n, nl.Val())
}
- case OADDSTR:
+ case ir.OADDSTR:
// Merge adjacent constants in the argument list.
- s := n.List.Slice()
- for i1 := 0; i1 < len(s); i1++ {
- if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
- // merge from i1 up to but not including i2
+ s := n.List().Slice()
+ need := 0
+ for i := 0; i < len(s); i++ {
+ if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) {
+ // Can't merge s[i] into s[i-1]; need a slot in the list.
+ need++
+ }
+ }
+ if need == len(s) {
+ return n
+ }
+ if need == 1 {
+ var strs []string
+ for _, c := range s {
+ strs = append(strs, c.StringVal())
+ }
+ return origConst(n, constant.MakeString(strings.Join(strs, "")))
+ }
+ newList := make([]ir.Node, 0, need)
+ for i := 0; i < len(s); i++ {
+ if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) {
+ // merge from i up to but not including i2
var strs []string
- i2 := i1
- for i2 < len(s) && Isconst(s[i2], CTSTR) {
+ i2 := i
+ for i2 < len(s) && ir.IsConst(s[i2], constant.String) {
strs = append(strs, s[i2].StringVal())
i2++
}
- nl := *s[i1]
- nl.Orig = &nl
- nl.SetVal(Val{strings.Join(strs, "")})
- s[i1] = &nl
- s = append(s[:i1+1], s[i2:]...)
+ nl := origConst(s[i], constant.MakeString(strings.Join(strs, "")))
+ nl.SetOrig(nl) // it's bigger than just s[i]
+ newList = append(newList, nl)
+ i = i2 - 1
+ } else {
+ newList = append(newList, s[i])
}
}
- if len(s) == 1 && Isconst(s[0], CTSTR) {
- n.Op = OLITERAL
- n.SetVal(s[0].Val())
- } else {
- n.List.Set(s)
- }
+ n = ir.Copy(n)
+ n.PtrList().Set(newList)
+ return n
- case OCAP, OLEN:
- switch nl.Type.Etype {
- case TSTRING:
- if Isconst(nl, CTSTR) {
- setintconst(n, int64(len(nl.StringVal())))
+ case ir.OCAP, ir.OLEN:
+ switch nl.Type().Etype {
+ case types.TSTRING:
+ if ir.IsConst(nl, constant.String) {
+ return origIntConst(n, int64(len(nl.StringVal())))
}
- case TARRAY:
+ case types.TARRAY:
if !hascallchan(nl) {
- setintconst(n, nl.Type.NumElem())
- }
- }
-
- case OALIGNOF, OOFFSETOF, OSIZEOF:
- setintconst(n, evalunsafe(n))
-
- case OREAL, OIMAG:
- if nl.Op == OLITERAL {
- var re, im *Mpflt
- switch u := nl.Val().U.(type) {
- case *Mpint:
- re = newMpflt()
- re.SetInt(u)
- // im = 0
- case *Mpflt:
- re = u
- // im = 0
- case *Mpcplx:
- re = &u.Real
- im = &u.Imag
- default:
- Fatalf("impossible")
- }
- if n.Op == OIMAG {
- if im == nil {
- im = newMpflt()
- }
- re = im
+ return origIntConst(n, nl.Type().NumElem())
}
- setconst(n, Val{re})
}
- case OCOMPLEX:
- if nl.Op == OLITERAL && nr.Op == OLITERAL {
- // make it a complex literal
- c := newMpcmplx()
- c.Real.Set(toflt(nl.Val()).U.(*Mpflt))
- c.Imag.Set(toflt(nr.Val()).U.(*Mpflt))
- setconst(n, Val{c})
- }
- }
-}
-
-func match(x, y Val) (Val, Val) {
- switch {
- case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX:
- return tocplx(x), tocplx(y)
- case x.Ctype() == CTFLT || y.Ctype() == CTFLT:
- return toflt(x), toflt(y)
- }
-
- // Mixed int/rune are fine.
- return x, y
-}
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ return origIntConst(n, evalunsafe(n))
-func compareOp(x Val, op Op, y Val) bool {
- x, y = match(x, y)
-
- switch x.Ctype() {
- case CTBOOL:
- x, y := x.U.(bool), y.U.(bool)
- switch op {
- case OEQ:
- return x == y
- case ONE:
- return x != y
+ case ir.OREAL:
+ if nl.Op() == ir.OLITERAL {
+ return origConst(n, constant.Real(nl.Val()))
}
- case CTINT, CTRUNE:
- x, y := x.U.(*Mpint), y.U.(*Mpint)
- return cmpZero(x.Cmp(y), op)
-
- case CTFLT:
- x, y := x.U.(*Mpflt), y.U.(*Mpflt)
- return cmpZero(x.Cmp(y), op)
-
- case CTCPLX:
- x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
- eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
- switch op {
- case OEQ:
- return eq
- case ONE:
- return !eq
+ case ir.OIMAG:
+ if nl.Op() == ir.OLITERAL {
+ return origConst(n, constant.Imag(nl.Val()))
}
- case CTSTR:
- x, y := x.U.(string), y.U.(string)
- switch op {
- case OEQ:
- return x == y
- case ONE:
- return x != y
- case OLT:
- return x < y
- case OLE:
- return x <= y
- case OGT:
- return x > y
- case OGE:
- return x >= y
+ case ir.OCOMPLEX:
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return origConst(n, makeComplex(nl.Val(), nr.Val()))
}
}
- Fatalf("compareOp: bad comparison: %v %v %v", x, op, y)
- panic("unreachable")
+ return n
}
-func cmpZero(x int, op Op) bool {
- switch op {
- case OEQ:
- return x == 0
- case ONE:
- return x != 0
- case OLT:
- return x < 0
- case OLE:
- return x <= 0
- case OGT:
- return x > 0
- case OGE:
- return x >= 0
+func makeInt(i *big.Int) constant.Value {
+ if i.IsInt64() {
+ return constant.Make(i.Int64()) // workaround #42640 (Int64Val(Make(big.NewInt(10))) returns (10, false), not (10, true))
}
-
- Fatalf("cmpZero: want comparison operator, got %v", op)
- panic("unreachable")
+ return constant.Make(i)
}
-func binaryOp(x Val, op Op, y Val) Val {
- x, y = match(x, y)
-
-Outer:
- switch x.Ctype() {
- case CTBOOL:
- x, y := x.U.(bool), y.U.(bool)
- switch op {
- case OANDAND:
- return Val{U: x && y}
- case OOROR:
- return Val{U: x || y}
- }
-
- case CTINT, CTRUNE:
- x, y := x.U.(*Mpint), y.U.(*Mpint)
-
- u := new(Mpint)
- u.Rune = x.Rune || y.Rune
- u.Set(x)
- switch op {
- case OADD:
- u.Add(y)
- case OSUB:
- u.Sub(y)
- case OMUL:
- u.Mul(y)
- case ODIV:
- if y.CmpInt64(0) == 0 {
- yyerror("division by zero")
- return Val{}
- }
- u.Quo(y)
- case OMOD:
- if y.CmpInt64(0) == 0 {
- yyerror("division by zero")
- return Val{}
- }
- u.Rem(y)
- case OOR:
- u.Or(y)
- case OAND:
- u.And(y)
- case OANDNOT:
- u.AndNot(y)
- case OXOR:
- u.Xor(y)
- default:
- break Outer
- }
- return Val{U: u}
-
- case CTFLT:
- x, y := x.U.(*Mpflt), y.U.(*Mpflt)
-
- u := newMpflt()
- u.Set(x)
- switch op {
- case OADD:
- u.Add(y)
- case OSUB:
- u.Sub(y)
- case OMUL:
- u.Mul(y)
- case ODIV:
- if y.CmpFloat64(0) == 0 {
- yyerror("division by zero")
- return Val{}
- }
- u.Quo(y)
- default:
- break Outer
- }
- return Val{U: u}
-
- case CTCPLX:
- x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
-
- u := newMpcmplx()
- u.Real.Set(&x.Real)
- u.Imag.Set(&x.Imag)
- switch op {
- case OADD:
- u.Real.Add(&y.Real)
- u.Imag.Add(&y.Imag)
- case OSUB:
- u.Real.Sub(&y.Real)
- u.Imag.Sub(&y.Imag)
- case OMUL:
- u.Mul(y)
- case ODIV:
- if !u.Div(y) {
- yyerror("complex division by zero")
- return Val{}
- }
- default:
- break Outer
- }
- return Val{U: u}
+func makeFloat64(f float64) constant.Value {
+ if math.IsInf(f, 0) {
+ base.Fatalf("infinity is not a valid constant")
}
-
- Fatalf("binaryOp: bad operation: %v %v %v", x, op, y)
- panic("unreachable")
+ v := constant.MakeFloat64(f)
+ v = constant.ToFloat(v) // workaround #42641 (MakeFloat64(0).Kind() returns Int, not Float)
+ return v
}
-func unaryOp(op Op, x Val, t *types.Type) Val {
- switch op {
- case OPLUS:
- switch x.Ctype() {
- case CTINT, CTRUNE, CTFLT, CTCPLX:
- return x
- }
-
- case ONEG:
- switch x.Ctype() {
- case CTINT, CTRUNE:
- x := x.U.(*Mpint)
- u := new(Mpint)
- u.Rune = x.Rune
- u.Set(x)
- u.Neg()
- return Val{U: u}
-
- case CTFLT:
- x := x.U.(*Mpflt)
- u := newMpflt()
- u.Set(x)
- u.Neg()
- return Val{U: u}
-
- case CTCPLX:
- x := x.U.(*Mpcplx)
- u := newMpcmplx()
- u.Real.Set(&x.Real)
- u.Imag.Set(&x.Imag)
- u.Real.Neg()
- u.Imag.Neg()
- return Val{U: u}
- }
-
- case OBITNOT:
- switch x.Ctype() {
- case CTINT, CTRUNE:
- x := x.U.(*Mpint)
-
- u := new(Mpint)
- u.Rune = x.Rune
- if t.IsSigned() || t.IsUntyped() {
- // Signed values change sign.
- u.SetInt64(-1)
- } else {
- // Unsigned values invert their bits.
- u.Set(maxintval[t.Etype])
- }
- u.Xor(x)
- return Val{U: u}
- }
-
- case ONOT:
- return Val{U: !x.U.(bool)}
- }
-
- Fatalf("unaryOp: bad operation: %v %v", op, x)
- panic("unreachable")
+func makeComplex(real, imag constant.Value) constant.Value {
+ return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag)))
}
-func shiftOp(x Val, op Op, y Val) Val {
- if x.Ctype() != CTRUNE {
- x = toint(x)
- }
- y = toint(y)
-
- u := new(Mpint)
- u.Set(x.U.(*Mpint))
- u.Rune = x.U.(*Mpint).Rune
- switch op {
- case OLSH:
- u.Lsh(y.U.(*Mpint))
- case ORSH:
- u.Rsh(y.U.(*Mpint))
- default:
- Fatalf("shiftOp: bad operator: %v", op)
- panic("unreachable")
- }
- return Val{U: u}
+func square(x constant.Value) constant.Value {
+ return constant.BinaryOp(x, token.MUL, x)
}
-// setconst rewrites n as an OLITERAL with value v.
-func setconst(n *Node, v Val) {
- // If constant folding failed, mark n as broken and give up.
- if v.U == nil {
- n.Type = nil
- return
- }
-
- // Ensure n.Orig still points to a semantically-equivalent
- // expression after we rewrite n into a constant.
- if n.Orig == n {
- n.Orig = n.sepcopy()
- }
-
- *n = Node{
- Op: OLITERAL,
- Pos: n.Pos,
- Orig: n.Orig,
- Type: n.Type,
- Xoffset: BADWIDTH,
- }
- n.SetVal(v)
- if vt := idealType(v.Ctype()); n.Type.IsUntyped() && n.Type != vt {
- Fatalf("untyped type mismatch, have: %v, want: %v", n.Type, vt)
- }
+// For matching historical "constant OP overflow" error messages.
+// TODO(mdempsky): Replace with error messages like go/types uses.
+var overflowNames = [...]string{
+ ir.OADD: "addition",
+ ir.OSUB: "subtraction",
+ ir.OMUL: "multiplication",
+ ir.OLSH: "shift",
+ ir.OXOR: "bitwise XOR",
+ ir.OBITNOT: "bitwise complement",
+}
- // Check range.
+// origConst returns an OLITERAL with orig n and value v.
+func origConst(n ir.Node, v constant.Value) ir.Node {
lno := setlineno(n)
- overflow(v, n.Type)
- lineno = lno
-
- if !n.Type.IsUntyped() {
- switch v.Ctype() {
- // Truncate precision for non-ideal float.
- case CTFLT:
- n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)})
- // Truncate precision for non-ideal complex.
- case CTCPLX:
- n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)})
+ v = convertVal(v, n.Type(), false)
+ base.Pos = lno
+
+ switch v.Kind() {
+ case constant.Int:
+ if constant.BitLen(v) <= Mpprec {
+ break
+ }
+ fallthrough
+ case constant.Unknown:
+ what := overflowNames[n.Op()]
+ if what == "" {
+ base.Fatalf("unexpected overflow: %v", n.Op())
}
+ base.ErrorfAt(n.Pos(), "constant %v overflow", what)
+ n.SetType(nil)
+ return n
}
-}
-
-func setboolconst(n *Node, v bool) {
- setconst(n, Val{U: v})
-}
-
-func setintconst(n *Node, v int64) {
- u := new(Mpint)
- u.SetInt64(v)
- setconst(n, Val{u})
-}
-// nodlit returns a new untyped constant with value v.
-func nodlit(v Val) *Node {
- n := nod(OLITERAL, nil, nil)
+ orig := n
+ n = ir.NodAt(orig.Pos(), ir.OLITERAL, nil, nil)
+ n.SetOrig(orig)
+ n.SetType(orig.Type())
n.SetVal(v)
- n.Type = idealType(v.Ctype())
return n
}
-func idealType(ct Ctype) *types.Type {
- switch ct {
- case CTSTR:
- return types.UntypedString
- case CTBOOL:
- return types.UntypedBool
- case CTINT:
- return types.UntypedInt
- case CTRUNE:
- return types.UntypedRune
- case CTFLT:
- return types.UntypedFloat
- case CTCPLX:
- return types.UntypedComplex
- case CTNIL:
- return types.Types[TNIL]
- }
- Fatalf("unexpected Ctype: %v", ct)
- return nil
+func origBoolConst(n ir.Node, v bool) ir.Node {
+ return origConst(n, constant.MakeBool(v))
+}
+
+func origIntConst(n ir.Node, v int64) ir.Node {
+ return origConst(n, constant.MakeInt64(v))
}
// defaultlit on both nodes simultaneously;
@@ -1052,17 +662,17 @@ func idealType(ct Ctype) *types.Type {
// force means must assign concrete (non-ideal) type.
// The results of defaultlit2 MUST be assigned back to l and r, e.g.
// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
-func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
- if l.Type == nil || r.Type == nil {
+func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
+ if l.Type() == nil || r.Type() == nil {
return l, r
}
- if !l.Type.IsUntyped() {
- r = convlit(r, l.Type)
+ if !l.Type().IsUntyped() {
+ r = convlit(r, l.Type())
return l, r
}
- if !r.Type.IsUntyped() {
- l = convlit(l, r.Type)
+ if !r.Type().IsUntyped() {
+ l = convlit(l, r.Type())
return l, r
}
@@ -1071,93 +681,77 @@ func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
}
// Can't mix bool with non-bool, string with non-string, or nil with anything (untyped).
- if l.Type.IsBoolean() != r.Type.IsBoolean() {
+ if l.Type().IsBoolean() != r.Type().IsBoolean() {
return l, r
}
- if l.Type.IsString() != r.Type.IsString() {
+ if l.Type().IsString() != r.Type().IsString() {
return l, r
}
- if l.isNil() || r.isNil() {
+ if ir.IsNil(l) || ir.IsNil(r) {
return l, r
}
- t := defaultType(mixUntyped(l.Type, r.Type))
+ t := defaultType(mixUntyped(l.Type(), r.Type()))
l = convlit(l, t)
r = convlit(r, t)
return l, r
}
-func ctype(t *types.Type) Ctype {
- switch t {
- case types.UntypedBool:
- return CTBOOL
- case types.UntypedString:
- return CTSTR
- case types.UntypedInt:
- return CTINT
- case types.UntypedRune:
- return CTRUNE
- case types.UntypedFloat:
- return CTFLT
- case types.UntypedComplex:
- return CTCPLX
+func mixUntyped(t1, t2 *types.Type) *types.Type {
+ if t1 == t2 {
+ return t1
+ }
+
+ rank := func(t *types.Type) int {
+ switch t {
+ case types.UntypedInt:
+ return 0
+ case types.UntypedRune:
+ return 1
+ case types.UntypedFloat:
+ return 2
+ case types.UntypedComplex:
+ return 3
+ }
+ base.Fatalf("bad type %v", t)
+ panic("unreachable")
}
- Fatalf("bad type %v", t)
- panic("unreachable")
-}
-func mixUntyped(t1, t2 *types.Type) *types.Type {
- t := t1
- if ctype(t2) > ctype(t1) {
- t = t2
+ if rank(t2) > rank(t1) {
+ return t2
}
- return t
+ return t1
}
func defaultType(t *types.Type) *types.Type {
- if !t.IsUntyped() || t.Etype == TNIL {
+ if !t.IsUntyped() || t.Etype == types.TNIL {
return t
}
switch t {
case types.UntypedBool:
- return types.Types[TBOOL]
+ return types.Types[types.TBOOL]
case types.UntypedString:
- return types.Types[TSTRING]
+ return types.Types[types.TSTRING]
case types.UntypedInt:
- return types.Types[TINT]
+ return types.Types[types.TINT]
case types.UntypedRune:
return types.Runetype
case types.UntypedFloat:
- return types.Types[TFLOAT64]
+ return types.Types[types.TFLOAT64]
case types.UntypedComplex:
- return types.Types[TCOMPLEX128]
+ return types.Types[types.TCOMPLEX128]
}
- Fatalf("bad type %v", t)
+ base.Fatalf("bad type %v", t)
return nil
}
-func smallintconst(n *Node) bool {
- if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
- switch simtype[n.Type.Etype] {
- case TINT8,
- TUINT8,
- TINT16,
- TUINT16,
- TINT32,
- TUINT32,
- TBOOL:
- return true
-
- case TIDEAL, TINT64, TUINT64, TPTR:
- v, ok := n.Val().U.(*Mpint)
- if ok && v.Cmp(minintval[TINT32]) >= 0 && v.Cmp(maxintval[TINT32]) <= 0 {
- return true
- }
- }
+func smallintconst(n ir.Node) bool {
+ if n.Op() == ir.OLITERAL {
+ v, ok := constant.Int64Val(n.Val())
+ return ok && int64(int32(v)) == v
}
-
return false
}
@@ -1166,21 +760,22 @@ func smallintconst(n *Node) bool {
// If n is not a constant expression, not representable as an
// integer, or negative, it returns -1. If n is too large, it
// returns -2.
-func indexconst(n *Node) int64 {
- if n.Op != OLITERAL {
+func indexconst(n ir.Node) int64 {
+ if n.Op() != ir.OLITERAL {
+ return -1
+ }
+ if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
return -1
}
- v := toint(n.Val()) // toint returns argument unchanged if not representable as an *Mpint
- vi, ok := v.U.(*Mpint)
- if !ok || vi.CmpInt64(0) < 0 {
+ v := toint(n.Val())
+ if v.Kind() != constant.Int || constant.Sign(v) < 0 {
return -1
}
- if vi.Cmp(maxintval[TINT]) > 0 {
+ if doesoverflow(v, types.Types[types.TINT]) {
return -2
}
-
- return vi.Int64()
+ return ir.Int64Val(types.Types[types.TINT], v)
}
// isGoConst reports whether n is a Go language constant (as opposed to a
@@ -1188,47 +783,47 @@ func indexconst(n *Node) int64 {
//
// Expressions derived from nil, like string([]byte(nil)), while they
// may be known at compile time, are not Go language constants.
-func (n *Node) isGoConst() bool {
- return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
+func isGoConst(n ir.Node) bool {
+ return n.Op() == ir.OLITERAL
}
-func hascallchan(n *Node) bool {
+func hascallchan(n ir.Node) bool {
if n == nil {
return false
}
- switch n.Op {
- case OAPPEND,
- OCALL,
- OCALLFUNC,
- OCALLINTER,
- OCALLMETH,
- OCAP,
- OCLOSE,
- OCOMPLEX,
- OCOPY,
- ODELETE,
- OIMAG,
- OLEN,
- OMAKE,
- ONEW,
- OPANIC,
- OPRINT,
- OPRINTN,
- OREAL,
- ORECOVER,
- ORECV:
+ switch n.Op() {
+ case ir.OAPPEND,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCAP,
+ ir.OCLOSE,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.ONEW,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OREAL,
+ ir.ORECOVER,
+ ir.ORECV:
return true
}
- if hascallchan(n.Left) || hascallchan(n.Right) {
+ if hascallchan(n.Left()) || hascallchan(n.Right()) {
return true
}
- for _, n1 := range n.List.Slice() {
+ for _, n1 := range n.List().Slice() {
if hascallchan(n1) {
return true
}
}
- for _, n2 := range n.Rlist.Slice() {
+ for _, n2 := range n.Rlist().Slice() {
if hascallchan(n2) {
return true
}
@@ -1256,16 +851,16 @@ type constSetKey struct {
// where are used in the error message.
//
// n must not be an untyped constant.
-func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
- if n.Op == OCONVIFACE && n.Implicit() {
- n = n.Left
+func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
+ if n.Op() == ir.OCONVIFACE && n.Implicit() {
+ n = n.Left()
}
- if !n.isGoConst() {
+ if !isGoConst(n) {
return
}
- if n.Type.IsUntyped() {
- Fatalf("%v is untyped", n)
+ if n.Type().IsUntyped() {
+ base.Fatalf("%v is untyped", n)
}
// Consts are only duplicates if they have the same value and
@@ -1283,17 +878,17 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
// #21866 by treating all type aliases like byte/uint8 and
// rune/int32.
- typ := n.Type
+ typ := n.Type()
switch typ {
case types.Bytetype:
- typ = types.Types[TUINT8]
+ typ = types.Types[types.TUINT8]
case types.Runetype:
- typ = types.Types[TINT32]
+ typ = types.Types[types.TINT32]
}
- k := constSetKey{typ, n.Val().Interface()}
+ k := constSetKey{typ, ir.ConstValue(n)}
if hasUniquePos(n) {
- pos = n.Pos
+ pos = n.Pos()
}
if s.m == nil {
@@ -1301,9 +896,9 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
}
if prevPos, isDup := s.m[k]; isDup {
- yyerrorl(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
+ base.ErrorfAt(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
what, nodeAndVal(n), where,
- what, linestr(prevPos))
+ what, base.FmtPos(prevPos))
} else {
s.m[k] = pos
}
@@ -1313,9 +908,9 @@ func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
// the latter is non-obvious.
//
// TODO(mdempsky): This could probably be a fmt.go flag.
-func nodeAndVal(n *Node) string {
+func nodeAndVal(n ir.Node) string {
show := n.String()
- val := n.Val().Interface()
+ val := ir.ConstValue(n)
if s := fmt.Sprintf("%#v", val); show != s {
show += " (value " + s + ")"
}
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index 6e90eb4d65..2a7be137c0 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -6,23 +6,23 @@ package gc
import (
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
+ "go/constant"
"strings"
)
// Declaration stack & operations
-var externdcl []*Node
+var externdcl []ir.Node
func testdclstack() {
if !types.IsDclstackValid() {
- if nerrors != 0 {
- errorexit()
- }
- Fatalf("mark left on the dclstack")
+ base.Fatalf("mark left on the dclstack")
}
}
@@ -33,7 +33,7 @@ func redeclare(pos src.XPos, s *types.Sym, where string) {
if pkg == nil {
pkg = s.Pkg
}
- yyerrorl(pos, "%v redeclared %s\n"+
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
"\tprevious declaration during import %q", s, where, pkg.Path)
} else {
prevPos := s.Lastlineno
@@ -46,8 +46,8 @@ func redeclare(pos src.XPos, s *types.Sym, where string) {
pos, prevPos = prevPos, pos
}
- yyerrorl(pos, "%v redeclared %s\n"+
- "\tprevious declaration at %v", s, where, linestr(prevPos))
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
+ "\tprevious declaration at %v", s, where, base.FmtPos(prevPos))
}
}
@@ -59,103 +59,103 @@ var declare_typegen int
// declare records that Node n declares symbol n.Sym in the specified
// declaration context.
-func declare(n *Node, ctxt Class) {
- if n.isBlank() {
+func declare(n ir.Node, ctxt ir.Class) {
+ if ir.IsBlank(n) {
return
}
- if n.Name == nil {
+ if n.Name() == nil {
// named OLITERAL needs Name; most OLITERALs don't.
- n.Name = new(Name)
+ n.SetName(new(ir.Name))
}
- s := n.Sym
+ s := n.Sym()
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
- if !inimport && !typecheckok && s.Pkg != localpkg {
- yyerrorl(n.Pos, "cannot declare name %v", s)
+ if !inimport && !typecheckok && s.Pkg != ir.LocalPkg {
+ base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
}
gen := 0
- if ctxt == PEXTERN {
+ if ctxt == ir.PEXTERN {
if s.Name == "init" {
- yyerrorl(n.Pos, "cannot declare init - must be func")
+ base.ErrorfAt(n.Pos(), "cannot declare init - must be func")
}
if s.Name == "main" && s.Pkg.Name == "main" {
- yyerrorl(n.Pos, "cannot declare main - must be func")
+ base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
}
externdcl = append(externdcl, n)
} else {
- if Curfn == nil && ctxt == PAUTO {
- lineno = n.Pos
- Fatalf("automatic outside function")
+ if Curfn == nil && ctxt == ir.PAUTO {
+ base.Pos = n.Pos()
+ base.Fatalf("automatic outside function")
}
- if Curfn != nil && ctxt != PFUNC {
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ if Curfn != nil && ctxt != ir.PFUNC {
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
}
- if n.Op == OTYPE {
+ if n.Op() == ir.OTYPE {
declare_typegen++
gen = declare_typegen
- } else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
+ } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") {
vargen++
gen = vargen
}
types.Pushdcl(s)
- n.Name.Curfn = Curfn
+ n.Name().Curfn = Curfn
}
- if ctxt == PAUTO {
- n.Xoffset = 0
+ if ctxt == ir.PAUTO {
+ n.SetOffset(0)
}
if s.Block == types.Block {
// functype will print errors about duplicate function arguments.
// Don't repeat the error here.
- if ctxt != PPARAM && ctxt != PPARAMOUT {
- redeclare(n.Pos, s, "in this block")
+ if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
+ redeclare(n.Pos(), s, "in this block")
}
}
s.Block = types.Block
- s.Lastlineno = lineno
- s.Def = asTypesNode(n)
- n.Name.Vargen = int32(gen)
+ s.Lastlineno = base.Pos
+ s.Def = n
+ n.Name().Vargen = int32(gen)
n.SetClass(ctxt)
- if ctxt == PFUNC {
- n.Sym.SetFunc(true)
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
}
autoexport(n, ctxt)
}
-func addvar(n *Node, t *types.Type, ctxt Class) {
- if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
- Fatalf("addvar: n=%v t=%v nil", n, t)
+func addvar(n ir.Node, t *types.Type, ctxt ir.Class) {
+ if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil {
+ base.Fatalf("addvar: n=%v t=%v nil", n, t)
}
- n.Op = ONAME
+ n.SetOp(ir.ONAME)
declare(n, ctxt)
- n.Type = t
+ n.SetType(t)
}
// declare variables from grammar
// new_name_list (type | [type] = expr_list)
-func variter(vl []*Node, t *Node, el []*Node) []*Node {
- var init []*Node
+func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node {
+ var init []ir.Node
doexpr := len(el) > 0
if len(el) == 1 && len(vl) > 1 {
e := el[0]
- as2 := nod(OAS2, nil, nil)
- as2.List.Set(vl)
- as2.Rlist.Set1(e)
+ as2 := ir.Nod(ir.OAS2, nil, nil)
+ as2.PtrList().Set(vl)
+ as2.PtrRlist().Set1(e)
for _, v := range vl {
- v.Op = ONAME
+ v.SetOp(ir.ONAME)
declare(v, dclcontext)
- v.Name.Param.Ntype = t
- v.Name.Defn = as2
+ v.Name().Param.Ntype = t
+ v.Name().Defn = as2
if Curfn != nil {
- init = append(init, nod(ODCL, v, nil))
+ init = append(init, ir.Nod(ir.ODCL, v, nil))
}
}
@@ -164,94 +164,96 @@ func variter(vl []*Node, t *Node, el []*Node) []*Node {
nel := len(el)
for _, v := range vl {
- var e *Node
+ var e ir.Node
if doexpr {
if len(el) == 0 {
- yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
+ base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel)
break
}
e = el[0]
el = el[1:]
}
- v.Op = ONAME
+ v.SetOp(ir.ONAME)
declare(v, dclcontext)
- v.Name.Param.Ntype = t
+ v.Name().Param.Ntype = t
- if e != nil || Curfn != nil || v.isBlank() {
+ if e != nil || Curfn != nil || ir.IsBlank(v) {
if Curfn != nil {
- init = append(init, nod(ODCL, v, nil))
+ init = append(init, ir.Nod(ir.ODCL, v, nil))
}
- e = nod(OAS, v, e)
+ e = ir.Nod(ir.OAS, v, e)
init = append(init, e)
- if e.Right != nil {
- v.Name.Defn = e
+ if e.Right() != nil {
+ v.Name().Defn = e
}
}
}
if len(el) != 0 {
- yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
+ base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel)
}
return init
}
// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *types.Sym) *Node {
+func newnoname(s *types.Sym) ir.Node {
if s == nil {
- Fatalf("newnoname nil")
+ base.Fatalf("newnoname nil")
}
- n := nod(ONONAME, nil, nil)
- n.Sym = s
- n.Xoffset = 0
+ n := ir.Nod(ir.ONONAME, nil, nil)
+ n.SetSym(s)
+ n.SetOffset(0)
return n
}
// newfuncnamel generates a new name node for a function or method.
-// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
-func newfuncnamel(pos src.XPos, s *types.Sym) *Node {
- n := newnamel(pos, s)
- n.Func = new(Func)
- n.Func.SetIsHiddenClosure(Curfn != nil)
+func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) ir.Node {
+ if fn.Nname != nil {
+ base.Fatalf("newfuncnamel - already have name")
+ }
+ n := ir.NewNameAt(pos, s)
+ n.SetFunc(fn)
+ fn.Nname = n
return n
}
// this generates a new name node for a name
// being declared.
-func dclname(s *types.Sym) *Node {
- n := newname(s)
- n.Op = ONONAME // caller will correct it
+func dclname(s *types.Sym) ir.Node {
+ n := NewName(s)
+ n.SetOp(ir.ONONAME) // caller will correct it
return n
}
-func typenod(t *types.Type) *Node {
+func typenod(t *types.Type) ir.Node {
return typenodl(src.NoXPos, t)
}
-func typenodl(pos src.XPos, t *types.Type) *Node {
+func typenodl(pos src.XPos, t *types.Type) ir.Node {
// if we copied another type with *t = *u
// then t->nod might be out of date, so
// check t->nod->type too
- if asNode(t.Nod) == nil || asNode(t.Nod).Type != t {
- t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil))
- asNode(t.Nod).Type = t
- asNode(t.Nod).Sym = t.Sym
+ if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type() != t {
+ t.Nod = ir.NodAt(pos, ir.OTYPE, nil, nil)
+ ir.AsNode(t.Nod).SetType(t)
+ ir.AsNode(t.Nod).SetSym(t.Sym)
}
- return asNode(t.Nod)
+ return ir.AsNode(t.Nod)
}
-func anonfield(typ *types.Type) *Node {
+func anonfield(typ *types.Type) ir.Node {
return symfield(nil, typ)
}
-func namedfield(s string, typ *types.Type) *Node {
+func namedfield(s string, typ *types.Type) ir.Node {
return symfield(lookup(s), typ)
}
-func symfield(s *types.Sym, typ *types.Type) *Node {
- n := nodSym(ODCLFIELD, nil, s)
- n.Type = typ
+func symfield(s *types.Sym, typ *types.Type) ir.Node {
+ n := nodSym(ir.ODCLFIELD, nil, s)
+ n.SetType(typ)
return n
}
@@ -259,8 +261,8 @@ func symfield(s *types.Sym, typ *types.Type) *Node {
// If no such Node currently exists, an ONONAME Node is returned instead.
// Automatically creates a new closure variable if the referenced symbol was
// declared in a different (containing) function.
-func oldname(s *types.Sym) *Node {
- n := asNode(s.Def)
+func oldname(s *types.Sym) ir.Node {
+ n := ir.AsNode(s.Def)
if n == nil {
// Maybe a top-level declaration will come along later to
// define s. resolve will check s.Def again once all input
@@ -268,28 +270,28 @@ func oldname(s *types.Sym) *Node {
return newnoname(s)
}
- if Curfn != nil && n.Op == ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn {
+ if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn {
// Inner func is referring to var in outer func.
//
// TODO(rsc): If there is an outer variable x and we
// are parsing x := 5 inside the closure, until we get to
// the := it looks like a reference to the outer x so we'll
// make x a closure variable unnecessarily.
- c := n.Name.Param.Innermost
- if c == nil || c.Name.Curfn != Curfn {
+ c := n.Name().Param.Innermost
+ if c == nil || c.Name().Curfn != Curfn {
// Do not have a closure var for the active closure yet; make one.
- c = newname(s)
- c.SetClass(PAUTOHEAP)
- c.Name.SetIsClosureVar(true)
+ c = NewName(s)
+ c.SetClass(ir.PAUTOHEAP)
+ c.Name().SetIsClosureVar(true)
c.SetIsDDD(n.IsDDD())
- c.Name.Defn = n
+ c.Name().Defn = n
// Link into list of active closure variables.
// Popped from list in func funcLit.
- c.Name.Param.Outer = n.Name.Param.Innermost
- n.Name.Param.Innermost = c
+ c.Name().Param.Outer = n.Name().Param.Innermost
+ n.Name().Param.Innermost = c
- Curfn.Func.Cvars.Append(c)
+ Curfn.Func().ClosureVars.Append(c)
}
// return ref to closure var, not original
@@ -300,81 +302,81 @@ func oldname(s *types.Sym) *Node {
}
// importName is like oldname, but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) *Node {
+func importName(sym *types.Sym) ir.Node {
n := oldname(sym)
- if !types.IsExported(sym.Name) && sym.Pkg != localpkg {
+ if !types.IsExported(sym.Name) && sym.Pkg != ir.LocalPkg {
n.SetDiag(true)
- yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
+ base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
}
return n
}
// := declarations
-func colasname(n *Node) bool {
- switch n.Op {
- case ONAME,
- ONONAME,
- OPACK,
- OTYPE,
- OLITERAL:
- return n.Sym != nil
+func colasname(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OPACK,
+ ir.OTYPE,
+ ir.OLITERAL:
+ return n.Sym() != nil
}
return false
}
-func colasdefn(left []*Node, defn *Node) {
+func colasdefn(left []ir.Node, defn ir.Node) {
for _, n := range left {
- if n.Sym != nil {
- n.Sym.SetUniq(true)
+ if n.Sym() != nil {
+ n.Sym().SetUniq(true)
}
}
var nnew, nerr int
for i, n := range left {
- if n.isBlank() {
+ if ir.IsBlank(n) {
continue
}
if !colasname(n) {
- yyerrorl(defn.Pos, "non-name %v on left side of :=", n)
+ base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n)
nerr++
continue
}
- if !n.Sym.Uniq() {
- yyerrorl(defn.Pos, "%v repeated on left side of :=", n.Sym)
+ if !n.Sym().Uniq() {
+ base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym())
n.SetDiag(true)
nerr++
continue
}
- n.Sym.SetUniq(false)
- if n.Sym.Block == types.Block {
+ n.Sym().SetUniq(false)
+ if n.Sym().Block == types.Block {
continue
}
nnew++
- n = newname(n.Sym)
+ n = NewName(n.Sym())
declare(n, dclcontext)
- n.Name.Defn = defn
- defn.Ninit.Append(nod(ODCL, n, nil))
+ n.Name().Defn = defn
+ defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
left[i] = n
}
if nnew == 0 && nerr == 0 {
- yyerrorl(defn.Pos, "no new variables on left side of :=")
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
}
}
// declare the arguments in an
// interface field declaration.
-func ifacedcl(n *Node) {
- if n.Op != ODCLFIELD || n.Left == nil {
- Fatalf("ifacedcl")
+func ifacedcl(n ir.Node) {
+ if n.Op() != ir.ODCLFIELD || n.Left() == nil {
+ base.Fatalf("ifacedcl")
}
- if n.Sym.IsBlank() {
- yyerror("methods must have a unique non-blank name")
+ if n.Sym().IsBlank() {
+ base.Errorf("methods must have a unique non-blank name")
}
}
@@ -382,26 +384,24 @@ func ifacedcl(n *Node) {
// and declare the arguments.
// called in extern-declaration context
// returns in auto-declaration context.
-func funchdr(n *Node) {
+func funchdr(n ir.Node) {
// change the declaration context from extern to auto
funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
Curfn = n
- dclcontext = PAUTO
+ dclcontext = ir.PAUTO
types.Markdcl()
- if n.Func.Nname != nil {
- funcargs(n.Func.Nname.Name.Param.Ntype)
- } else if n.Func.Ntype != nil {
- funcargs(n.Func.Ntype)
+ if n.Func().Nname != nil && n.Func().Nname.Name().Param.Ntype != nil {
+ funcargs(n.Func().Nname.Name().Param.Ntype)
} else {
- funcargs2(n.Type)
+ funcargs2(n.Type())
}
}
-func funcargs(nt *Node) {
- if nt.Op != OTFUNC {
- Fatalf("funcargs %v", nt.Op)
+func funcargs(nt ir.Node) {
+ if nt.Op() != ir.OTFUNC {
+ base.Fatalf("funcargs %v", nt.Op())
}
// re-start the variable generation number
@@ -411,87 +411,87 @@ func funcargs(nt *Node) {
// TODO(mdempsky): This is ugly, and only necessary because
// esc.go uses Vargen to figure out result parameters' index
// within the result tuple.
- vargen = nt.Rlist.Len()
+ vargen = nt.Rlist().Len()
// declare the receiver and in arguments.
- if nt.Left != nil {
- funcarg(nt.Left, PPARAM)
+ if nt.Left() != nil {
+ funcarg(nt.Left(), ir.PPARAM)
}
- for _, n := range nt.List.Slice() {
- funcarg(n, PPARAM)
+ for _, n := range nt.List().Slice() {
+ funcarg(n, ir.PPARAM)
}
oldvargen := vargen
vargen = 0
// declare the out arguments.
- gen := nt.List.Len()
- for _, n := range nt.Rlist.Slice() {
- if n.Sym == nil {
+ gen := nt.List().Len()
+ for _, n := range nt.Rlist().Slice() {
+ if n.Sym() == nil {
// Name so that escape analysis can track it. ~r stands for 'result'.
- n.Sym = lookupN("~r", gen)
+ n.SetSym(lookupN("~r", gen))
gen++
}
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
// Give it a name so we can assign to it during return. ~b stands for 'blank'.
// The name must be different from ~r above because if you have
// func f() (_ int)
// func g() int
// f is allowed to use a plain 'return' with no arguments, while g is not.
// So the two cases must be distinguished.
- n.Sym = lookupN("~b", gen)
+ n.SetSym(lookupN("~b", gen))
gen++
}
- funcarg(n, PPARAMOUT)
+ funcarg(n, ir.PPARAMOUT)
}
vargen = oldvargen
}
-func funcarg(n *Node, ctxt Class) {
- if n.Op != ODCLFIELD {
- Fatalf("funcarg %v", n.Op)
+func funcarg(n ir.Node, ctxt ir.Class) {
+ if n.Op() != ir.ODCLFIELD {
+ base.Fatalf("funcarg %v", n.Op())
}
- if n.Sym == nil {
+ if n.Sym() == nil {
return
}
- n.Right = newnamel(n.Pos, n.Sym)
- n.Right.Name.Param.Ntype = n.Left
- n.Right.SetIsDDD(n.IsDDD())
- declare(n.Right, ctxt)
+ n.SetRight(ir.NewNameAt(n.Pos(), n.Sym()))
+ n.Right().Name().Param.Ntype = n.Left()
+ n.Right().SetIsDDD(n.IsDDD())
+ declare(n.Right(), ctxt)
vargen++
- n.Right.Name.Vargen = int32(vargen)
+ n.Right().Name().Vargen = int32(vargen)
}
// Same as funcargs, except run over an already constructed TFUNC.
// This happens during import, where the hidden_fndcl rule has
// used functype directly to parse the function's type.
func funcargs2(t *types.Type) {
- if t.Etype != TFUNC {
- Fatalf("funcargs2 %v", t)
+ if t.Etype != types.TFUNC {
+ base.Fatalf("funcargs2 %v", t)
}
for _, f := range t.Recvs().Fields().Slice() {
- funcarg2(f, PPARAM)
+ funcarg2(f, ir.PPARAM)
}
for _, f := range t.Params().Fields().Slice() {
- funcarg2(f, PPARAM)
+ funcarg2(f, ir.PPARAM)
}
for _, f := range t.Results().Fields().Slice() {
- funcarg2(f, PPARAMOUT)
+ funcarg2(f, ir.PPARAMOUT)
}
}
-func funcarg2(f *types.Field, ctxt Class) {
+func funcarg2(f *types.Field, ctxt ir.Class) {
if f.Sym == nil {
return
}
- n := newnamel(f.Pos, f.Sym)
- f.Nname = asTypesNode(n)
- n.Type = f.Type
+ n := ir.NewNameAt(f.Pos, f.Sym)
+ f.Nname = n
+ n.SetType(f.Type)
n.SetIsDDD(f.IsDDD())
declare(n, ctxt)
}
@@ -499,8 +499,8 @@ func funcarg2(f *types.Field, ctxt Class) {
var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
type funcStackEnt struct {
- curfn *Node
- dclcontext Class
+ curfn ir.Node
+ dclcontext ir.Class
}
// finish the body.
@@ -524,57 +524,41 @@ func checkembeddedtype(t *types.Type) {
if t.Sym == nil && t.IsPtr() {
t = t.Elem()
if t.IsInterface() {
- yyerror("embedded type cannot be a pointer to interface")
+ base.Errorf("embedded type cannot be a pointer to interface")
}
}
if t.IsPtr() || t.IsUnsafePtr() {
- yyerror("embedded type cannot be a pointer")
- } else if t.Etype == TFORW && !t.ForwardType().Embedlineno.IsKnown() {
- t.ForwardType().Embedlineno = lineno
+ base.Errorf("embedded type cannot be a pointer")
+ } else if t.Etype == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
+ t.ForwardType().Embedlineno = base.Pos
}
}
-func structfield(n *Node) *types.Field {
- lno := lineno
- lineno = n.Pos
-
- if n.Op != ODCLFIELD {
- Fatalf("structfield: oops %v\n", n)
- }
-
- f := types.NewField()
- f.Pos = n.Pos
- f.Sym = n.Sym
+func structfield(n ir.Node) *types.Field {
+ lno := base.Pos
+ base.Pos = n.Pos()
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxType)
- n.Type = n.Left.Type
- n.Left = nil
+ if n.Op() != ir.ODCLFIELD {
+ base.Fatalf("structfield: oops %v\n", n)
}
- f.Type = n.Type
- if f.Type == nil {
- f.SetBroke(true)
+ if n.Left() != nil {
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ n.SetType(n.Left().Type())
+ n.SetLeft(nil)
}
+ f := types.NewField(n.Pos(), n.Sym(), n.Type())
if n.Embedded() {
- checkembeddedtype(n.Type)
+ checkembeddedtype(n.Type())
f.Embedded = 1
- } else {
- f.Embedded = 0
}
-
- switch u := n.Val().U.(type) {
- case string:
- f.Note = u
- default:
- yyerror("field tag must be a string")
- case nil:
- // no-op
+ if n.HasVal() {
+ f.Note = constant.StringVal(n.Val())
}
- lineno = lno
+ base.Pos = lno
return f
}
@@ -588,7 +572,7 @@ func checkdupfields(what string, fss ...[]*types.Field) {
continue
}
if seen[f.Sym] {
- yyerrorl(f.Pos, "duplicate %s %s", what, f.Sym.Name)
+ base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name)
continue
}
seen[f.Sym] = true
@@ -598,8 +582,8 @@ func checkdupfields(what string, fss ...[]*types.Field) {
// convert a parsed id/type list into
// a type for struct/interface/arglist
-func tostruct(l []*Node) *types.Type {
- t := types.New(TSTRUCT)
+func tostruct(l []ir.Node) *types.Type {
+ t := types.New(types.TSTRUCT)
fields := make([]*types.Field, len(l))
for i, n := range l {
@@ -620,17 +604,17 @@ func tostruct(l []*Node) *types.Type {
return t
}
-func tofunargs(l []*Node, funarg types.Funarg) *types.Type {
- t := types.New(TSTRUCT)
+func tofunargs(l []ir.Node, funarg types.Funarg) *types.Type {
+ t := types.New(types.TSTRUCT)
t.StructType().Funarg = funarg
fields := make([]*types.Field, len(l))
for i, n := range l {
f := structfield(n)
f.SetIsDDD(n.IsDDD())
- if n.Right != nil {
- n.Right.Type = f.Type
- f.Nname = asTypesNode(n.Right)
+ if n.Right() != nil {
+ n.Right().SetType(f.Type)
+ f.Nname = n.Right()
}
if f.Broke() {
t.SetBroke(true)
@@ -642,22 +626,22 @@ func tofunargs(l []*Node, funarg types.Funarg) *types.Type {
}
func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type {
- t := types.New(TSTRUCT)
+ t := types.New(types.TSTRUCT)
t.StructType().Funarg = funarg
t.SetFields(fields)
return t
}
-func interfacefield(n *Node) *types.Field {
- lno := lineno
- lineno = n.Pos
+func interfacefield(n ir.Node) *types.Field {
+ lno := base.Pos
+ base.Pos = n.Pos()
- if n.Op != ODCLFIELD {
- Fatalf("interfacefield: oops %v\n", n)
+ if n.Op() != ir.ODCLFIELD {
+ base.Fatalf("interfacefield: oops %v\n", n)
}
- if n.Val().Ctype() != CTxxx {
- yyerror("interface method cannot have annotation")
+ if n.HasVal() {
+ base.Errorf("interface method cannot have annotation")
}
// MethodSpec = MethodName Signature | InterfaceTypeName .
@@ -665,29 +649,23 @@ func interfacefield(n *Node) *types.Field {
// If Sym != nil, then Sym is MethodName and Left is Signature.
// Otherwise, Left is InterfaceTypeName.
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxType)
- n.Type = n.Left.Type
- n.Left = nil
+ if n.Left() != nil {
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ n.SetType(n.Left().Type())
+ n.SetLeft(nil)
}
- f := types.NewField()
- f.Pos = n.Pos
- f.Sym = n.Sym
- f.Type = n.Type
- if f.Type == nil {
- f.SetBroke(true)
- }
+ f := types.NewField(n.Pos(), n.Sym(), n.Type())
- lineno = lno
+ base.Pos = lno
return f
}
-func tointerface(l []*Node) *types.Type {
+func tointerface(l []ir.Node) *types.Type {
if len(l) == 0 {
- return types.Types[TINTER]
+ return types.Types[types.TINTER]
}
- t := types.New(TINTER)
+ t := types.New(types.TINTER)
var fields []*types.Field
for _, n := range l {
f := interfacefield(n)
@@ -700,14 +678,12 @@ func tointerface(l []*Node) *types.Type {
return t
}
-func fakeRecv() *Node {
+func fakeRecv() ir.Node {
return anonfield(types.FakeRecvType())
}
func fakeRecvField() *types.Field {
- f := types.NewField()
- f.Type = types.FakeRecvType()
- return f
+ return types.NewField(src.NoXPos, nil, types.FakeRecvType())
}
// isifacemethod reports whether (field) m is
@@ -718,12 +694,12 @@ func isifacemethod(f *types.Type) bool {
}
// turn a parsed function declaration into a type
-func functype(this *Node, in, out []*Node) *types.Type {
- t := types.New(TFUNC)
+func functype(this ir.Node, in, out []ir.Node) *types.Type {
+ t := types.New(types.TFUNC)
- var rcvr []*Node
+ var rcvr []ir.Node
if this != nil {
- rcvr = []*Node{this}
+ rcvr = []ir.Node{this}
}
t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
t.FuncType().Params = tofunargs(in, types.FunargParams)
@@ -735,13 +711,13 @@ func functype(this *Node, in, out []*Node) *types.Type {
t.SetBroke(true)
}
- t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
+ t.FuncType().Outnamed = t.NumResults() > 0 && ir.OrigSym(t.Results().Field(0).Sym) != nil
return t
}
func functypefield(this *types.Field, in, out []*types.Field) *types.Type {
- t := types.New(TFUNC)
+ t := types.New(types.TFUNC)
var rcvr []*types.Field
if this != nil {
@@ -751,36 +727,11 @@ func functypefield(this *types.Field, in, out []*types.Field) *types.Type {
t.FuncType().Params = tofunargsfield(in, types.FunargParams)
t.FuncType().Results = tofunargsfield(out, types.FunargResults)
- t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
+ t.FuncType().Outnamed = t.NumResults() > 0 && ir.OrigSym(t.Results().Field(0).Sym) != nil
return t
}
-// origSym returns the original symbol written by the user.
-func origSym(s *types.Sym) *types.Sym {
- if s == nil {
- return nil
- }
-
- if len(s.Name) > 1 && s.Name[0] == '~' {
- switch s.Name[1] {
- case 'r': // originally an unnamed result
- return nil
- case 'b': // originally the blank identifier _
- // TODO(mdempsky): Does s.Pkg matter here?
- return nblank.Sym
- }
- return s
- }
-
- if strings.HasPrefix(s.Name, ".anon") {
- // originally an unnamed or _ name (see subr.go: structargs)
- return nil
- }
-
- return s
-}
-
// methodSym returns the method symbol representing a method name
// associated with a specific receiver type.
//
@@ -800,13 +751,13 @@ func methodSym(recv *types.Type, msym *types.Sym) *types.Sym {
// start with a letter, number, or period.
func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
if msym.IsBlank() {
- Fatalf("blank method name")
+ base.Fatalf("blank method name")
}
rsym := recv.Sym
if recv.IsPtr() {
if rsym != nil {
- Fatalf("declared pointer receiver type: %v", recv)
+ base.Fatalf("declared pointer receiver type: %v", recv)
}
rsym = recv.Elem().Sym
}
@@ -848,15 +799,15 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy
// - msym is the method symbol
// - t is function type (with receiver)
// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+func addmethod(n ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
if msym == nil {
- Fatalf("no method symbol")
+ base.Fatalf("no method symbol")
}
// get parent type sym
rf := t.Recv() // ptr to this structure
if rf == nil {
- yyerror("missing receiver")
+ base.Errorf("missing receiver")
return nil
}
@@ -866,7 +817,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F
t := pa
if t != nil && t.IsPtr() {
if t.Sym != nil {
- yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
return nil
}
t = t.Elem()
@@ -876,21 +827,21 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F
case t == nil || t.Broke():
// rely on typecheck having complained before
case t.Sym == nil:
- yyerror("invalid receiver type %v (%v is not a defined type)", pa, t)
+ base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
case t.IsPtr():
- yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
case t.IsInterface():
- yyerror("invalid receiver type %v (%v is an interface type)", pa, t)
+ base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t)
default:
// Should have picked off all the reasons above,
// but just in case, fall back to generic error.
- yyerror("invalid receiver type %v (%L / %L)", pa, pa, t)
+ base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t)
}
return nil
}
- if local && mt.Sym.Pkg != localpkg {
- yyerror("cannot define new methods on non-local type %v", mt)
+ if local && mt.Sym.Pkg != ir.LocalPkg {
+ base.Errorf("cannot define new methods on non-local type %v", mt)
return nil
}
@@ -901,7 +852,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F
if mt.IsStruct() {
for _, f := range mt.Fields().Slice() {
if f.Sym == msym {
- yyerror("type %v has both field and method named %v", mt, msym)
+ base.Errorf("type %v has both field and method named %v", mt, msym)
f.SetBroke(true)
return nil
}
@@ -915,15 +866,13 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F
// types.Identical only checks that incoming and result parameters match,
// so explicitly check that the receiver parameters match too.
if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
- yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
+ base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
}
return f
}
- f := types.NewField()
- f.Pos = lineno
- f.Sym = msym
- f.Type = t
+ f := types.NewField(base.Pos, msym, t)
+ f.Nname = n.Func().Nname
f.SetNointerface(nointerface)
mt.Methods().Append(f)
@@ -951,7 +900,7 @@ func funcsym(s *types.Sym) *types.Sym {
// When dynamically linking, the necessary function
// symbols will be created explicitly with makefuncsym.
// See the makefuncsym comment for details.
- if !Ctxt.Flag_dynlink && !existed {
+ if !base.Ctxt.Flag_dynlink && !existed {
funcsyms = append(funcsyms, s)
}
funcsymsmu.Unlock()
@@ -968,13 +917,13 @@ func funcsym(s *types.Sym) *types.Sym {
// So instead, when dynamic linking, we only create
// the s·f stubs in s's package.
func makefuncsym(s *types.Sym) {
- if !Ctxt.Flag_dynlink {
- Fatalf("makefuncsym dynlink")
+ if !base.Ctxt.Flag_dynlink {
+ base.Fatalf("makefuncsym dynlink")
}
if s.IsBlank() {
return
}
- if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
+ if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
// runtime.getg(), getclosureptr(), getcallerpc(), and
// getcallersp() are not real functions and so do not
// get funcsyms.
@@ -986,27 +935,27 @@ func makefuncsym(s *types.Sym) {
}
// setNodeNameFunc marks a node as a function.
-func setNodeNameFunc(n *Node) {
- if n.Op != ONAME || n.Class() != Pxxx {
- Fatalf("expected ONAME/Pxxx node, got %v", n)
+func setNodeNameFunc(n ir.Node) {
+ if n.Op() != ir.ONAME || n.Class() != ir.Pxxx {
+ base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
- n.SetClass(PFUNC)
- n.Sym.SetFunc(true)
+ n.SetClass(ir.PFUNC)
+ n.Sym().SetFunc(true)
}
-func dclfunc(sym *types.Sym, tfn *Node) *Node {
- if tfn.Op != OTFUNC {
- Fatalf("expected OTFUNC node, got %v", tfn)
+func dclfunc(sym *types.Sym, tfn ir.Node) ir.Node {
+ if tfn.Op() != ir.OTFUNC {
+ base.Fatalf("expected OTFUNC node, got %v", tfn)
}
- fn := nod(ODCLFUNC, nil, nil)
- fn.Func.Nname = newfuncnamel(lineno, sym)
- fn.Func.Nname.Name.Defn = fn
- fn.Func.Nname.Name.Param.Ntype = tfn
- setNodeNameFunc(fn.Func.Nname)
+ fn := ir.Nod(ir.ODCLFUNC, nil, nil)
+ fn.Func().Nname = newfuncnamel(base.Pos, sym, fn.Func())
+ fn.Func().Nname.Name().Defn = fn
+ fn.Func().Nname.Name().Param.Ntype = tfn
+ setNodeNameFunc(fn.Func().Nname)
funchdr(fn)
- fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
+ fn.Func().Nname.Name().Param.Ntype = typecheck(fn.Func().Nname.Name().Param.Ntype, ctxType)
return fn
}
@@ -1014,27 +963,22 @@ type nowritebarrierrecChecker struct {
// extraCalls contains extra function calls that may not be
// visible during later analysis. It maps from the ODCLFUNC of
// the caller to a list of callees.
- extraCalls map[*Node][]nowritebarrierrecCall
+ extraCalls map[ir.Node][]nowritebarrierrecCall
// curfn is the current function during AST walks.
- curfn *Node
+ curfn ir.Node
}
type nowritebarrierrecCall struct {
- target *Node // ODCLFUNC of caller or callee
+ target ir.Node // ODCLFUNC of caller or callee
lineno src.XPos // line of call
}
-type nowritebarrierrecCallSym struct {
- target *obj.LSym // LSym of callee
- lineno src.XPos // line of call
-}
-
// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
// must be called before transformclosure and walk.
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
- extraCalls: make(map[*Node][]nowritebarrierrecCall),
+ extraCalls: make(map[ir.Node][]nowritebarrierrecCall),
}
// Find all systemstack calls and record their targets. In
@@ -1043,42 +987,42 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
// directly. This has to happen before transformclosure since
// it's a lot harder to work out the argument after.
for _, n := range xtop {
- if n.Op != ODCLFUNC {
+ if n.Op() != ir.ODCLFUNC {
continue
}
c.curfn = n
- inspect(n, c.findExtraCalls)
+ ir.Inspect(n, c.findExtraCalls)
}
c.curfn = nil
return c
}
-func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool {
- if n.Op != OCALLFUNC {
+func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool {
+ if n.Op() != ir.OCALLFUNC {
return true
}
- fn := n.Left
- if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil {
+ fn := n.Left()
+ if fn == nil || fn.Op() != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name().Defn == nil {
return true
}
- if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" {
+ if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
return true
}
- var callee *Node
- arg := n.List.First()
- switch arg.Op {
- case ONAME:
- callee = arg.Name.Defn
- case OCLOSURE:
- callee = arg.Func.Closure
+ var callee ir.Node
+ arg := n.List().First()
+ switch arg.Op() {
+ case ir.ONAME:
+ callee = arg.Name().Defn
+ case ir.OCLOSURE:
+ callee = arg.Func().Decl
default:
- Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
+ base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
}
- if callee.Op != ODCLFUNC {
- Fatalf("expected ODCLFUNC node, got %+v", callee)
+ if callee.Op() != ir.ODCLFUNC {
+ base.Fatalf("expected ODCLFUNC node, got %+v", callee)
}
- c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos})
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
return true
}
@@ -1090,17 +1034,17 @@ func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool {
// because that's all we know after we start SSA.
//
// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) {
- if from.Op != ODCLFUNC {
- Fatalf("expected ODCLFUNC, got %v", from)
+func (c *nowritebarrierrecChecker) recordCall(from ir.Node, to *obj.LSym, pos src.XPos) {
+ if from.Op() != ir.ODCLFUNC {
+ base.Fatalf("expected ODCLFUNC, got %v", from)
}
// We record this information on the *Func so this is
// concurrent-safe.
- fn := from.Func
- if fn.nwbrCalls == nil {
- fn.nwbrCalls = new([]nowritebarrierrecCallSym)
+ fn := from.Func()
+ if fn.NWBRCalls == nil {
+ fn.NWBRCalls = new([]ir.SymAndPos)
}
- *fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos})
+ *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
}
func (c *nowritebarrierrecChecker) check() {
@@ -1108,39 +1052,39 @@ func (c *nowritebarrierrecChecker) check() {
// capture all calls created by lowering, but this means we
// only get to see the obj.LSyms of calls. symToFunc lets us
// get back to the ODCLFUNCs.
- symToFunc := make(map[*obj.LSym]*Node)
+ symToFunc := make(map[*obj.LSym]ir.Node)
// funcs records the back-edges of the BFS call graph walk. It
// maps from the ODCLFUNC of each function that must not have
// write barriers to the call that inhibits them. Functions
// that are directly marked go:nowritebarrierrec are in this
// map with a zero-valued nowritebarrierrecCall. This also
// acts as the set of marks for the BFS of the call graph.
- funcs := make(map[*Node]nowritebarrierrecCall)
+ funcs := make(map[ir.Node]nowritebarrierrecCall)
// q is the queue of ODCLFUNC Nodes to visit in BFS order.
- var q nodeQueue
+ var q ir.NodeQueue
for _, n := range xtop {
- if n.Op != ODCLFUNC {
+ if n.Op() != ir.ODCLFUNC {
continue
}
- symToFunc[n.Func.lsym] = n
+ symToFunc[n.Func().LSym] = n
// Make nowritebarrierrec functions BFS roots.
- if n.Func.Pragma&Nowritebarrierrec != 0 {
+ if n.Func().Pragma&ir.Nowritebarrierrec != 0 {
funcs[n] = nowritebarrierrecCall{}
- q.pushRight(n)
+ q.PushRight(n)
}
// Check go:nowritebarrier functions.
- if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() {
- yyerrorl(n.Func.WBPos, "write barrier prohibited")
+ if n.Func().Pragma&ir.Nowritebarrier != 0 && n.Func().WBPos.IsKnown() {
+ base.ErrorfAt(n.Func().WBPos, "write barrier prohibited")
}
}
// Perform a BFS of the call graph from all
// go:nowritebarrierrec functions.
- enqueue := func(src, target *Node, pos src.XPos) {
- if target.Func.Pragma&Yeswritebarrierrec != 0 {
+ enqueue := func(src, target ir.Node, pos src.XPos) {
+ if target.Func().Pragma&ir.Yeswritebarrierrec != 0 {
// Don't flow into this function.
return
}
@@ -1151,20 +1095,20 @@ func (c *nowritebarrierrecChecker) check() {
// Record the path.
funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
- q.pushRight(target)
+ q.PushRight(target)
}
- for !q.empty() {
- fn := q.popLeft()
+ for !q.Empty() {
+ fn := q.PopLeft()
// Check fn.
- if fn.Func.WBPos.IsKnown() {
+ if fn.Func().WBPos.IsKnown() {
var err bytes.Buffer
call := funcs[fn]
for call.target != nil {
- fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname)
+ fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func().Nname)
call = funcs[call.target]
}
- yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String())
+ base.ErrorfAt(fn.Func().WBPos, "write barrier prohibited by caller; %v%s", fn.Func().Nname, err.String())
continue
}
@@ -1172,13 +1116,13 @@ func (c *nowritebarrierrecChecker) check() {
for _, callee := range c.extraCalls[fn] {
enqueue(fn, callee.target, callee.lineno)
}
- if fn.Func.nwbrCalls == nil {
+ if fn.Func().NWBRCalls == nil {
continue
}
- for _, callee := range *fn.Func.nwbrCalls {
- target := symToFunc[callee.target]
+ for _, callee := range *fn.Func().NWBRCalls {
+ target := symToFunc[callee.Sym]
if target != nil {
- enqueue(fn, target, callee.lineno)
+ enqueue(fn, target, callee.Pos)
}
}
}
diff --git a/src/cmd/compile/internal/gc/dep_test.go b/src/cmd/compile/internal/gc/dep_test.go
index ecc9a70ce4..d03683a280 100644
--- a/src/cmd/compile/internal/gc/dep_test.go
+++ b/src/cmd/compile/internal/gc/dep_test.go
@@ -18,7 +18,7 @@ func TestDeps(t *testing.T) {
}
for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
switch dep {
- case "go/build", "go/token":
+ case "go/build", "go/scanner":
// cmd/compile/internal/importer introduces a dependency
// on go/build and go/token; cmd/compile/internal/ uses
// go/constant which uses go/token in its API. Once we
diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go
index bb5ae61cbb..1e4e43caad 100644
--- a/src/cmd/compile/internal/gc/dwinl.go
+++ b/src/cmd/compile/internal/gc/dwinl.go
@@ -5,6 +5,7 @@
package gc
import (
+ "cmd/compile/internal/base"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
@@ -26,8 +27,8 @@ type varPos struct {
func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
var inlcalls dwarf.InlCalls
- if Debug_gendwarfinl != 0 {
- Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
}
// This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
@@ -106,7 +107,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
m = makePreinlineDclMap(fnsym)
} else {
- ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
+ ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
m = makePreinlineDclMap(ifnlsym)
}
@@ -181,7 +182,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
}
// Debugging
- if Debug_gendwarfinl != 0 {
+ if base.Debug.DwarfInl != 0 {
dumpInlCalls(inlcalls)
dumpInlVars(dwVars)
}
@@ -205,15 +206,15 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// abstract function DIE for an inlined routine imported from a
// previously compiled package.
func genAbstractFunc(fn *obj.LSym) {
- ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
+ ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
if ifn == nil {
- Ctxt.Diag("failed to locate precursor fn for %v", fn)
+ base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
return
}
- if Debug_gendwarfinl != 0 {
- Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
}
- Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
+ base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
}
// Undo any versioning performed when a name was written
@@ -235,15 +236,15 @@ func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
dcl := preInliningDcls(fnsym)
m := make(map[varPos]int)
for i, n := range dcl {
- pos := Ctxt.InnermostPos(n.Pos)
+ pos := base.Ctxt.InnermostPos(n.Pos())
vp := varPos{
- DeclName: unversion(n.Sym.Name),
+ DeclName: unversion(n.Sym().Name),
DeclFile: pos.RelFilename(),
DeclLine: pos.RelLine(),
DeclCol: pos.Col(),
}
if _, found := m[vp]; found {
- Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name)
+ base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
}
m[vp] = i
}
@@ -260,17 +261,17 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// is one. We do this first so that parents appear before their
// children in the resulting table.
parCallIdx := -1
- parInlIdx := Ctxt.InlTree.Parent(inlIdx)
+ parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
if parInlIdx >= 0 {
parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
}
// Create new entry for this inline
- inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
- callXPos := Ctxt.InlTree.CallPos(inlIdx)
- absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
- pb := Ctxt.PosTable.Pos(callXPos).Base()
- callFileSym := Ctxt.Lookup(pb.SymFilename())
+ inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
+ callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
+ absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+ pb := base.Ctxt.PosTable.Pos(callXPos).Base()
+ callFileSym := base.Ctxt.Lookup(pb.SymFilename())
ic := dwarf.InlCall{
InlIndex: inlIdx,
CallFile: callFileSym,
@@ -298,7 +299,7 @@ func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
// the index for a node from the inlined body of D will refer to the
// call to D from C. Whew.
func posInlIndex(xpos src.XPos) int {
- pos := Ctxt.PosTable.Pos(xpos)
+ pos := base.Ctxt.PosTable.Pos(xpos)
if b := pos.Base(); b != nil {
ii := b.InliningIndex()
if ii >= 0 {
@@ -324,7 +325,7 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
// Append range to correct inlined call
callIdx, found := imap[ii]
if !found {
- Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
+ base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
}
call := &calls[callIdx]
call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
@@ -332,23 +333,23 @@ func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int)
func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
for i := 0; i < ilevel; i++ {
- Ctxt.Logf(" ")
+ base.Ctxt.Logf(" ")
}
ic := inlcalls.Calls[idx]
- callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
- Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+ base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
for _, f := range ic.InlVars {
- Ctxt.Logf(" %v", f.Name)
+ base.Ctxt.Logf(" %v", f.Name)
}
- Ctxt.Logf(" ) C: (")
+ base.Ctxt.Logf(" ) C: (")
for _, k := range ic.Children {
- Ctxt.Logf(" %v", k)
+ base.Ctxt.Logf(" %v", k)
}
- Ctxt.Logf(" ) R:")
+ base.Ctxt.Logf(" ) R:")
for _, r := range ic.Ranges {
- Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+ base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
}
- Ctxt.Logf("\n")
+ base.Ctxt.Logf("\n")
for _, k := range ic.Children {
dumpInlCall(inlcalls, k, ilevel+1)
}
@@ -373,7 +374,7 @@ func dumpInlVars(dwvars []*dwarf.Var) {
if dwv.IsInAbstract {
ia = 1
}
- Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
+ base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
}
}
@@ -410,7 +411,7 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
// Callee
ic := inlCalls.Calls[idx]
- callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
calleeRanges := ic.Ranges
// Caller
@@ -418,14 +419,14 @@ func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx,
parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
if parentIdx != -1 {
pic := inlCalls.Calls[parentIdx]
- caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
+ caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
parentRanges = pic.Ranges
}
// Callee ranges contained in caller ranges?
c, m := rangesContainsAll(parentRanges, calleeRanges)
if !c {
- Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
+ base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
}
// Now visit kids
diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go
index 103949c1f9..33b05a5bf0 100644
--- a/src/cmd/compile/internal/gc/embed.go
+++ b/src/cmd/compile/internal/gc/embed.go
@@ -5,40 +5,19 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
- "encoding/json"
- "io/ioutil"
- "log"
+
"path"
"sort"
"strconv"
"strings"
)
-var embedlist []*Node
-
-var embedCfg struct {
- Patterns map[string][]string
- Files map[string]string
-}
-
-func readEmbedCfg(file string) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- log.Fatalf("-embedcfg: %v", err)
- }
- if err := json.Unmarshal(data, &embedCfg); err != nil {
- log.Fatalf("%s: %v", file, err)
- }
- if embedCfg.Patterns == nil {
- log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
- }
- if embedCfg.Files == nil {
- log.Fatalf("%s: invalid embedcfg: missing Files", file)
- }
-}
+var embedlist []ir.Node
const (
embedUnknown = iota
@@ -49,7 +28,7 @@ const (
var numLocalEmbed int
-func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) (newExprs []*Node) {
+func varEmbed(p *noder, names []ir.Node, typ ir.Node, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
@@ -66,30 +45,30 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
pos := embeds[0].Pos
if !haveEmbed {
- p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
+ p.errorAt(pos, "invalid go:embed: missing import \"embed\"")
return exprs
}
- if embedCfg.Patterns == nil {
- p.yyerrorpos(pos, "invalid go:embed: build system did not supply embed configuration")
+ if base.Flag.Cfg.Embed.Patterns == nil {
+ p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration")
return exprs
}
if len(names) > 1 {
- p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
+ p.errorAt(pos, "go:embed cannot apply to multiple vars")
return exprs
}
if len(exprs) > 0 {
- p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
+ p.errorAt(pos, "go:embed cannot apply to var with initializer")
return exprs
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
- p.yyerrorpos(pos, "go:embed cannot apply to var without type")
+ p.errorAt(pos, "go:embed cannot apply to var without type")
return exprs
}
kind := embedKindApprox(typ)
if kind == embedUnknown {
- p.yyerrorpos(pos, "go:embed cannot apply to var of type %v", typ)
+ p.errorAt(pos, "go:embed cannot apply to var of type %v", typ)
return exprs
}
@@ -98,13 +77,13 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
var list []string
for _, e := range embeds {
for _, pattern := range e.Patterns {
- files, ok := embedCfg.Patterns[pattern]
+ files, ok := base.Flag.Cfg.Embed.Patterns[pattern]
if !ok {
- p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
+ p.errorAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
}
for _, file := range files {
- if embedCfg.Files[file] == "" {
- p.yyerrorpos(e.Pos, "invalid go:embed: build system did not map file: %s", file)
+ if base.Flag.Cfg.Embed.Files[file] == "" {
+ p.errorAt(e.Pos, "invalid go:embed: build system did not map file: %s", file)
continue
}
if !have[file] {
@@ -126,23 +105,23 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
if kind == embedString || kind == embedBytes {
if len(list) > 1 {
- p.yyerrorpos(pos, "invalid go:embed: multiple files for type %v", typ)
+ p.errorAt(pos, "invalid go:embed: multiple files for type %v", typ)
return exprs
}
}
v := names[0]
- if dclcontext != PEXTERN {
+ if dclcontext != ir.PEXTERN {
numLocalEmbed++
- v = newnamel(v.Pos, lookupN("embed.", numLocalEmbed))
- v.Sym.Def = asTypesNode(v)
- v.Name.Param.Ntype = typ
- v.SetClass(PEXTERN)
+ v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed))
+ v.Sym().Def = v
+ v.Name().Param.Ntype = typ
+ v.SetClass(ir.PEXTERN)
externdcl = append(externdcl, v)
- exprs = []*Node{v}
+ exprs = []ir.Node{v}
}
- v.Name.Param.SetEmbedFiles(list)
+ v.Name().Param.SetEmbedFiles(list)
embedlist = append(embedlist, v)
return exprs
}
@@ -151,18 +130,18 @@ func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []Pragma
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
-func embedKindApprox(typ *Node) int {
- if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+func embedKindApprox(typ ir.Node) int {
+ if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
// These are not guaranteed to match only string and []byte -
// maybe the local package has redefined one of those words.
// But it's the best we can do now during the noder.
// The stricter check happens later, in initEmbed calling embedKind.
- if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == localpkg {
+ if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == ir.LocalPkg {
return embedString
}
- if typ.Op == OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == localpkg {
+ if typ.Op() == ir.OTARRAY && typ.Left() == nil && typ.Right().Sym() != nil && typ.Right().Sym().Name == "byte" && typ.Right().Sym().Pkg == ir.LocalPkg {
return embedBytes
}
return embedUnknown
@@ -170,10 +149,10 @@ func embedKindApprox(typ *Node) int {
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
- if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+ if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
- if typ == types.Types[TSTRING] {
+ if typ == types.Types[types.TSTRING] {
return embedString
}
if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
@@ -213,19 +192,19 @@ func dumpembeds() {
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
-func initEmbed(v *Node) {
- files := v.Name.Param.EmbedFiles()
- switch kind := embedKind(v.Type); kind {
+func initEmbed(v ir.Node) {
+ files := v.Name().Param.EmbedFiles()
+ switch kind := embedKind(v.Type()); kind {
case embedUnknown:
- yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
+ base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
case embedString, embedBytes:
file := files[0]
- fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
if err != nil {
- yyerrorl(v.Pos, "embed %s: %v", file, err)
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
- sym := v.Sym.Linksym()
+ sym := v.Sym().Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
@@ -234,7 +213,7 @@ func initEmbed(v *Node) {
}
case embedFiles:
- slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
+ slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
@@ -249,7 +228,7 @@ func initEmbed(v *Node) {
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
- off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
+ off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
@@ -257,17 +236,17 @@ func initEmbed(v *Node) {
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
- fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil {
- yyerrorl(v.Pos, "embed %s: %v", file, err)
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
- off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
+ off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
- sym := v.Sym.Linksym()
+ sym := v.Sym().Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
deleted file mode 100644
index 6f328ab5ea..0000000000
--- a/src/cmd/compile/internal/gc/esc.go
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "cmd/compile/internal/types"
- "fmt"
-)
-
-func escapes(all []*Node) {
- visitBottomUp(all, escapeFuncs)
-}
-
-const (
- EscFuncUnknown = 0 + iota
- EscFuncPlanned
- EscFuncStarted
- EscFuncTagged
-)
-
-func min8(a, b int8) int8 {
- if a < b {
- return a
- }
- return b
-}
-
-func max8(a, b int8) int8 {
- if a > b {
- return a
- }
- return b
-}
-
-const (
- EscUnknown = iota
- EscNone // Does not escape to heap, result, or parameters.
- EscHeap // Reachable from the heap
- EscNever // By construction will not escape.
-)
-
-// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *Node) *types.Sym {
- if fn == nil || fn.Func.Nname == nil {
- return nil
- }
- return fn.Func.Nname.Sym
-}
-
-// Mark labels that have no backjumps to them as not increasing e.loopdepth.
-// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
-// and set it to one of the following two. Then in esc we'll clear it again.
-var (
- looping Node
- nonlooping Node
-)
-
-func isSliceSelfAssign(dst, src *Node) bool {
- // Detect the following special case.
- //
- // func (b *Buffer) Foo() {
- // n, m := ...
- // b.buf = b.buf[n:m]
- // }
- //
- // This assignment is a no-op for escape analysis,
- // it does not store any new pointers into b that were not already there.
- // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
- // Here we assume that the statement will not contain calls,
- // that is, that order will move any calls to init.
- // Otherwise base ONAME value could change between the moments
- // when we evaluate it for dst and for src.
-
- // dst is ONAME dereference.
- if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
- return false
- }
- // src is a slice operation.
- switch src.Op {
- case OSLICE, OSLICE3, OSLICESTR:
- // OK.
- case OSLICEARR, OSLICE3ARR:
- // Since arrays are embedded into containing object,
- // slice of non-pointer array will introduce a new pointer into b that was not already there
- // (pointer to b itself). After such assignment, if b contents escape,
- // b escapes as well. If we ignore such OSLICEARR, we will conclude
- // that b does not escape when b contents do.
- //
- // Pointer to an array is OK since it's not stored inside b directly.
- // For slicing an array (not pointer to array), there is an implicit OADDR.
- // We check that to determine non-pointer array slicing.
- if src.Left.Op == OADDR {
- return false
- }
- default:
- return false
- }
- // slice is applied to ONAME dereference.
- if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
- return false
- }
- // dst and src reference the same base ONAME.
- return dst.Left == src.Left.Left
-}
-
-// isSelfAssign reports whether assignment from src to dst can
-// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src *Node) bool {
- if isSliceSelfAssign(dst, src) {
- return true
- }
-
- // Detect trivial assignments that assign back to the same object.
- //
- // It covers these cases:
- // val.x = val.y
- // val.x[i] = val.y[j]
- // val.x1.x2 = val.x1.y2
- // ... etc
- //
- // These assignments do not change assigned object lifetime.
-
- if dst == nil || src == nil || dst.Op != src.Op {
- return false
- }
-
- switch dst.Op {
- case ODOT, ODOTPTR:
- // Safe trailing accessors that are permitted to differ.
- case OINDEX:
- if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
- return false
- }
- default:
- return false
- }
-
- // The expression prefix must be both "safe" and identical.
- return samesafeexpr(dst.Left, src.Left)
-}
-
-// mayAffectMemory reports whether evaluation of n may affect the program's
-// memory state. If the expression can't affect memory state, then it can be
-// safely ignored by the escape analysis.
-func mayAffectMemory(n *Node) bool {
- // We may want to use a list of "memory safe" ops instead of generally
- // "side-effect free", which would include all calls and other ops that can
- // allocate or change global state. For now, it's safer to start with the latter.
- //
- // We're ignoring things like division by zero, index out of range,
- // and nil pointer dereference here.
- switch n.Op {
- case ONAME, OCLOSUREVAR, OLITERAL:
- return false
-
- // Left+Right group.
- case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
- return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
-
- // Left group.
- case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
- ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
- return mayAffectMemory(n.Left)
-
- default:
- return true
- }
-}
-
-// heapAllocReason returns the reason the given Node must be heap
-// allocated, or the empty string if it doesn't.
-func heapAllocReason(n *Node) string {
- if n.Type == nil {
- return ""
- }
-
- // Parameters are always passed via the stack.
- if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
- return ""
- }
-
- if n.Type.Width > maxStackVarSize {
- return "too large for stack"
- }
-
- if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
- return "too large for stack"
- }
- if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op == OMAKESLICE {
- r := n.Right
- if r == nil {
- r = n.Left
- }
- if !smallintconst(r) {
- return "non-constant size"
- }
- if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
- return "too large for stack"
- }
- }
-
- return ""
-}
-
-// addrescapes tags node n as having had its address taken
-// by "increasing" the "value" of n.Esc to EscHeap.
-// Storage is allocated as necessary to allow the address
-// to be taken.
-func addrescapes(n *Node) {
- switch n.Op {
- default:
- // Unexpected Op, probably due to a previous type error. Ignore.
-
- case ODEREF, ODOTPTR:
- // Nothing to do.
-
- case ONAME:
- if n == nodfp {
- break
- }
-
- // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
- // on PPARAM it means something different.
- if n.Class() == PAUTO && n.Esc == EscNever {
- break
- }
-
- // If a closure reference escapes, mark the outer variable as escaping.
- if n.Name.IsClosureVar() {
- addrescapes(n.Name.Defn)
- break
- }
-
- if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
- break
- }
-
- // This is a plain parameter or local variable that needs to move to the heap,
- // but possibly for the function outside the one we're compiling.
- // That is, if we have:
- //
- // func f(x int) {
- // func() {
- // global = &x
- // }
- // }
- //
- // then we're analyzing the inner closure but we need to move x to the
- // heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
- oldfn := Curfn
- Curfn = n.Name.Curfn
- if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
- Curfn = Curfn.Func.Closure
- }
- ln := lineno
- lineno = Curfn.Pos
- moveToHeap(n)
- Curfn = oldfn
- lineno = ln
-
- // ODOTPTR has already been introduced,
- // so these are the non-pointer ODOT and OINDEX.
- // In &x[0], if x is a slice, then x does not
- // escape--the pointer inside x does, but that
- // is always a heap pointer anyway.
- case ODOT, OINDEX, OPAREN, OCONVNOP:
- if !n.Left.Type.IsSlice() {
- addrescapes(n.Left)
- }
- }
-}
-
-// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *Node) {
- if Debug.r != 0 {
- Dump("MOVE", n)
- }
- if compiling_runtime {
- yyerror("%v escapes to heap, not allowed in runtime", n)
- }
- if n.Class() == PAUTOHEAP {
- Dump("n", n)
- Fatalf("double move to heap")
- }
-
- // Allocate a local stack variable to hold the pointer to the heap copy.
- // temp will add it to the function declaration list automatically.
- heapaddr := temp(types.NewPtr(n.Type))
- heapaddr.Sym = lookup("&" + n.Sym.Name)
- heapaddr.Orig.Sym = heapaddr.Sym
- heapaddr.Pos = n.Pos
-
- // Unset AutoTemp to persist the &foo variable name through SSA to
- // liveness analysis.
- // TODO(mdempsky/drchase): Cleaner solution?
- heapaddr.Name.SetAutoTemp(false)
-
- // Parameters have a local stack copy used at function start/end
- // in addition to the copy in the heap that may live longer than
- // the function.
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
- if n.Xoffset == BADWIDTH {
- Fatalf("addrescapes before param assignment")
- }
-
- // We rewrite n below to be a heap variable (indirection of heapaddr).
- // Preserve a copy so we can still write code referring to the original,
- // and substitute that copy into the function declaration list
- // so that analyses of the local (on-stack) variables use it.
- stackcopy := newname(n.Sym)
- stackcopy.Type = n.Type
- stackcopy.Xoffset = n.Xoffset
- stackcopy.SetClass(n.Class())
- stackcopy.Name.Param.Heapaddr = heapaddr
- if n.Class() == PPARAMOUT {
- // Make sure the pointer to the heap copy is kept live throughout the function.
- // The function could panic at any point, and then a defer could recover.
- // Thus, we need the pointer to the heap copy always available so the
- // post-deferreturn code can copy the return value back to the stack.
- // See issue 16095.
- heapaddr.Name.SetIsOutputParamHeapAddr(true)
- }
- n.Name.Param.Stackcopy = stackcopy
-
- // Substitute the stackcopy into the function variable list so that
- // liveness and other analyses use the underlying stack slot
- // and not the now-pseudo-variable n.
- found := false
- for i, d := range Curfn.Func.Dcl {
- if d == n {
- Curfn.Func.Dcl[i] = stackcopy
- found = true
- break
- }
- // Parameters are before locals, so can stop early.
- // This limits the search even in functions with many local variables.
- if d.Class() == PAUTO {
- break
- }
- }
- if !found {
- Fatalf("cannot find %v in local variable list", n)
- }
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
- }
-
- // Modify n in place so that uses of n now mean indirection of the heapaddr.
- n.SetClass(PAUTOHEAP)
- n.Xoffset = 0
- n.Name.Param.Heapaddr = heapaddr
- n.Esc = EscHeap
- if Debug.m != 0 {
- Warnl(n.Pos, "moved to heap: %v", n)
- }
-}
-
-// This special tag is applied to uintptr variables
-// that we believe may hold unsafe.Pointers for
-// calls into assembly functions.
-const unsafeUintptrTag = "unsafe-uintptr"
-
-// This special tag is applied to uintptr parameters of functions
-// marked go:uintptrescapes.
-const uintptrEscapesTag = "uintptr-escapes"
-
-func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
- name := func() string {
- if f.Sym != nil {
- return f.Sym.Name
- }
- return fmt.Sprintf("arg#%d", narg)
- }
-
- if fn.Nbody.Len() == 0 {
- // Assume that uintptr arguments must be held live across the call.
- // This is most important for syscall.Syscall.
- // See golang.org/issue/13372.
- // This really doesn't have much to do with escape analysis per se,
- // but we are reusing the ability to annotate an individual function
- // argument and pass those annotations along to importing code.
- if f.Type.IsUintptr() {
- if Debug.m != 0 {
- Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
- }
- return unsafeUintptrTag
- }
-
- if !f.Type.HasPointers() { // don't bother tagging for scalars
- return ""
- }
-
- var esc EscLeaks
-
- // External functions are assumed unsafe, unless
- // //go:noescape is given before the declaration.
- if fn.Func.Pragma&Noescape != 0 {
- if Debug.m != 0 && f.Sym != nil {
- Warnl(f.Pos, "%v does not escape", name())
- }
- } else {
- if Debug.m != 0 && f.Sym != nil {
- Warnl(f.Pos, "leaking param: %v", name())
- }
- esc.AddHeap(0)
- }
-
- return esc.Encode()
- }
-
- if fn.Func.Pragma&UintptrEscapes != 0 {
- if f.Type.IsUintptr() {
- if Debug.m != 0 {
- Warnl(f.Pos, "marking %v as escaping uintptr", name())
- }
- return uintptrEscapesTag
- }
- if f.IsDDD() && f.Type.Elem().IsUintptr() {
- // final argument is ...uintptr.
- if Debug.m != 0 {
- Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
- }
- return uintptrEscapesTag
- }
- }
-
- if !f.Type.HasPointers() { // don't bother tagging for scalars
- return ""
- }
-
- // Unnamed parameters are unused and therefore do not escape.
- if f.Sym == nil || f.Sym.IsBlank() {
- var esc EscLeaks
- return esc.Encode()
- }
-
- n := asNode(f.Nname)
- loc := e.oldLoc(n)
- esc := loc.paramEsc
- esc.Optimize()
-
- if Debug.m != 0 && !loc.escapes {
- if esc.Empty() {
- Warnl(f.Pos, "%v does not escape", name())
- }
- if x := esc.Heap(); x >= 0 {
- if x == 0 {
- Warnl(f.Pos, "leaking param: %v", name())
- } else {
- // TODO(mdempsky): Mention level=x like below?
- Warnl(f.Pos, "leaking param content: %v", name())
- }
- }
- for i := 0; i < numEscResults; i++ {
- if x := esc.Result(i); x >= 0 {
- res := fn.Type.Results().Field(i).Sym
- Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
- }
- }
- }
-
- return esc.Encode()
-}
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
index 618bdf78e2..783bc8c41d 100644
--- a/src/cmd/compile/internal/gc/escape.go
+++ b/src/cmd/compile/internal/gc/escape.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"cmd/internal/src"
@@ -84,7 +86,7 @@ import (
type Escape struct {
allLocs []*EscLocation
- curfn *Node
+ curfn ir.Node
// loopDepth counts the current loop nesting depth within
// curfn. It increments within each "for" loop and at each
@@ -99,8 +101,8 @@ type Escape struct {
// An EscLocation represents an abstract location that stores a Go
// variable.
type EscLocation struct {
- n *Node // represented variable or expression, if any
- curfn *Node // enclosing function
+ n ir.Node // represented variable or expression, if any
+ curfn ir.Node // enclosing function
edges []EscEdge // incoming edges
loopDepth int // loopDepth at declaration
@@ -140,12 +142,47 @@ type EscEdge struct {
notes *EscNote
}
+func init() {
+ ir.EscFmt = escFmt
+}
+
+// escFmt is called from node printing to print information about escape analysis results.
+func escFmt(n ir.Node, short bool) string {
+ text := ""
+ switch n.Esc() {
+ case EscUnknown:
+ break
+
+ case EscHeap:
+ text = "esc(h)"
+
+ case EscNone:
+ text = "esc(no)"
+
+ case EscNever:
+ if !short {
+ text = "esc(N)"
+ }
+
+ default:
+ text = fmt.Sprintf("esc(%d)", n.Esc())
+ }
+
+ if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
+ if text != "" {
+ text += " "
+ }
+ text += fmt.Sprintf("ld(%d)", e.loopDepth)
+ }
+ return text
+}
+
// escapeFuncs performs escape analysis on a minimal batch of
// functions.
-func escapeFuncs(fns []*Node, recursive bool) {
+func escapeFuncs(fns []ir.Node, recursive bool) {
for _, fn := range fns {
- if fn.Op != ODCLFUNC {
- Fatalf("unexpected node: %v", fn)
+ if fn.Op() != ir.ODCLFUNC {
+ base.Fatalf("unexpected node: %v", fn)
}
}
@@ -165,40 +202,40 @@ func escapeFuncs(fns []*Node, recursive bool) {
e.finish(fns)
}
-func (e *Escape) initFunc(fn *Node) {
- if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
- Fatalf("unexpected node: %v", fn)
+func (e *Escape) initFunc(fn ir.Node) {
+ if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown {
+ base.Fatalf("unexpected node: %v", fn)
}
- fn.Esc = EscFuncPlanned
- if Debug.m > 3 {
- Dump("escAnalyze", fn)
+ fn.SetEsc(EscFuncPlanned)
+ if base.Flag.LowerM > 3 {
+ ir.Dump("escAnalyze", fn)
}
e.curfn = fn
e.loopDepth = 1
// Allocate locations for local variables.
- for _, dcl := range fn.Func.Dcl {
- if dcl.Op == ONAME {
+ for _, dcl := range fn.Func().Dcl {
+ if dcl.Op() == ir.ONAME {
e.newLoc(dcl, false)
}
}
}
-func (e *Escape) walkFunc(fn *Node) {
- fn.Esc = EscFuncStarted
+func (e *Escape) walkFunc(fn ir.Node) {
+ fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
- inspectList(fn.Nbody, func(n *Node) bool {
- switch n.Op {
- case OLABEL:
- n.Sym.Label = asTypesNode(&nonlooping)
+ ir.InspectList(fn.Body(), func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OLABEL:
+ n.Sym().Label = nonlooping
- case OGOTO:
+ case ir.OGOTO:
// If we visited the label before the goto,
// then this is a looping label.
- if n.Sym.Label == asTypesNode(&nonlooping) {
- n.Sym.Label = asTypesNode(&looping)
+ if n.Sym().Label == nonlooping {
+ n.Sym().Label = looping
}
}
@@ -207,7 +244,7 @@ func (e *Escape) walkFunc(fn *Node) {
e.curfn = fn
e.loopDepth = 1
- e.block(fn.Nbody)
+ e.block(fn.Body())
}
// Below we implement the methods for walking the AST and recording
@@ -237,172 +274,172 @@ func (e *Escape) walkFunc(fn *Node) {
// }
// stmt evaluates a single Go statement.
-func (e *Escape) stmt(n *Node) {
+func (e *Escape) stmt(n ir.Node) {
if n == nil {
return
}
lno := setlineno(n)
defer func() {
- lineno = lno
+ base.Pos = lno
}()
- if Debug.m > 2 {
- fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected stmt: %v", n)
+ base.Fatalf("unexpected stmt: %v", n)
- case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
+ case ir.ODCLCONST, ir.ODCLTYPE, ir.OEMPTY, ir.OFALL, ir.OINLMARK:
// nop
- case OBREAK, OCONTINUE, OGOTO:
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
// TODO(mdempsky): Handle dead code?
- case OBLOCK:
- e.stmts(n.List)
+ case ir.OBLOCK:
+ e.stmts(n.List())
- case ODCL:
+ case ir.ODCL:
// Record loop depth at declaration.
- if !n.Left.isBlank() {
- e.dcl(n.Left)
+ if !ir.IsBlank(n.Left()) {
+ e.dcl(n.Left())
}
- case OLABEL:
- switch asNode(n.Sym.Label) {
- case &nonlooping:
- if Debug.m > 2 {
- fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
+ case ir.OLABEL:
+ switch ir.AsNode(n.Sym().Label) {
+ case nonlooping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
}
- case &looping:
- if Debug.m > 2 {
- fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
+ case looping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
}
e.loopDepth++
default:
- Fatalf("label missing tag")
+ base.Fatalf("label missing tag")
}
- n.Sym.Label = nil
+ n.Sym().Label = nil
- case OIF:
- e.discard(n.Left)
- e.block(n.Nbody)
- e.block(n.Rlist)
+ case ir.OIF:
+ e.discard(n.Left())
+ e.block(n.Body())
+ e.block(n.Rlist())
- case OFOR, OFORUNTIL:
+ case ir.OFOR, ir.OFORUNTIL:
e.loopDepth++
- e.discard(n.Left)
- e.stmt(n.Right)
- e.block(n.Nbody)
+ e.discard(n.Left())
+ e.stmt(n.Right())
+ e.block(n.Body())
e.loopDepth--
- case ORANGE:
+ case ir.ORANGE:
// for List = range Right { Nbody }
e.loopDepth++
- ks := e.addrs(n.List)
- e.block(n.Nbody)
+ ks := e.addrs(n.List())
+ e.block(n.Body())
e.loopDepth--
// Right is evaluated outside the loop.
k := e.discardHole()
if len(ks) >= 2 {
- if n.Right.Type.IsArray() {
+ if n.Right().Type().IsArray() {
k = ks[1].note(n, "range")
} else {
k = ks[1].deref(n, "range-deref")
}
}
- e.expr(e.later(k), n.Right)
+ e.expr(e.later(k), n.Right())
- case OSWITCH:
- typesw := n.Left != nil && n.Left.Op == OTYPESW
+ case ir.OSWITCH:
+ typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW
var ks []EscHole
- for _, cas := range n.List.Slice() { // cases
- if typesw && n.Left.Left != nil {
- cv := cas.Rlist.First()
+ for _, cas := range n.List().Slice() { // cases
+ if typesw && n.Left().Left() != nil {
+ cv := cas.Rlist().First()
k := e.dcl(cv) // type switch variables have no ODCL.
- if cv.Type.HasPointers() {
- ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
}
}
- e.discards(cas.List)
- e.block(cas.Nbody)
+ e.discards(cas.List())
+ e.block(cas.Body())
}
if typesw {
- e.expr(e.teeHole(ks...), n.Left.Right)
+ e.expr(e.teeHole(ks...), n.Left().Right())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
- case OSELECT:
- for _, cas := range n.List.Slice() {
- e.stmt(cas.Left)
- e.block(cas.Nbody)
+ case ir.OSELECT:
+ for _, cas := range n.List().Slice() {
+ e.stmt(cas.Left())
+ e.block(cas.Body())
}
- case OSELRECV:
- e.assign(n.Left, n.Right, "selrecv", n)
- case OSELRECV2:
- e.assign(n.Left, n.Right, "selrecv", n)
- e.assign(n.List.First(), nil, "selrecv", n)
- case ORECV:
+ case ir.OSELRECV:
+ e.assign(n.Left(), n.Right(), "selrecv", n)
+ case ir.OSELRECV2:
+ e.assign(n.Left(), n.Right(), "selrecv", n)
+ e.assign(n.List().First(), nil, "selrecv", n)
+ case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
- case OSEND:
- e.discard(n.Left)
- e.assignHeap(n.Right, "send", n)
-
- case OAS, OASOP:
- e.assign(n.Left, n.Right, "assign", n)
-
- case OAS2:
- for i, nl := range n.List.Slice() {
- e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
- }
-
- case OAS2DOTTYPE: // v, ok = x.(type)
- e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
- e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
- case OAS2MAPR: // v, ok = m[k]
- e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
- e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
- case OAS2RECV: // v, ok = <-ch
- e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
- e.assign(n.List.Second(), nil, "assign-pair-receive", n)
-
- case OAS2FUNC:
- e.stmts(n.Right.Ninit)
- e.call(e.addrs(n.List), n.Right, nil)
- case ORETURN:
- results := e.curfn.Type.Results().FieldSlice()
- for i, v := range n.List.Slice() {
- e.assign(asNode(results[i].Nname), v, "return", n)
- }
- case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
+ case ir.OSEND:
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "send", n)
+
+ case ir.OAS, ir.OASOP:
+ e.assign(n.Left(), n.Right(), "assign", n)
+
+ case ir.OAS2:
+ for i, nl := range n.List().Slice() {
+ e.assign(nl, n.Rlist().Index(i), "assign-pair", n)
+ }
+
+ case ir.OAS2DOTTYPE: // v, ok = x.(type)
+ e.assign(n.List().First(), n.Right(), "assign-pair-dot-type", n)
+ e.assign(n.List().Second(), nil, "assign-pair-dot-type", n)
+ case ir.OAS2MAPR: // v, ok = m[k]
+ e.assign(n.List().First(), n.Right(), "assign-pair-mapr", n)
+ e.assign(n.List().Second(), nil, "assign-pair-mapr", n)
+ case ir.OAS2RECV: // v, ok = <-ch
+ e.assign(n.List().First(), n.Right(), "assign-pair-receive", n)
+ e.assign(n.List().Second(), nil, "assign-pair-receive", n)
+
+ case ir.OAS2FUNC:
+ e.stmts(n.Right().Init())
+ e.call(e.addrs(n.List()), n.Right(), nil)
+ case ir.ORETURN:
+ results := e.curfn.Type().Results().FieldSlice()
+ for i, v := range n.List().Slice() {
+ e.assign(ir.AsNode(results[i].Nname), v, "return", n)
+ }
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n, nil)
- case OGO, ODEFER:
- e.stmts(n.Left.Ninit)
- e.call(nil, n.Left, n)
+ case ir.OGO, ir.ODEFER:
+ e.stmts(n.Left().Init())
+ e.call(nil, n.Left(), n)
- case ORETJMP:
+ case ir.ORETJMP:
// TODO(mdempsky): What do? esc.go just ignores it.
}
}
-func (e *Escape) stmts(l Nodes) {
+func (e *Escape) stmts(l ir.Nodes) {
for _, n := range l.Slice() {
e.stmt(n)
}
}
// block is like stmts, but preserves loopDepth.
-func (e *Escape) block(l Nodes) {
+func (e *Escape) block(l ir.Nodes) {
old := e.loopDepth
e.stmts(l)
e.loopDepth = old
@@ -410,123 +447,123 @@ func (e *Escape) block(l Nodes) {
// expr models evaluating an expression n and flowing the result into
// hole k.
-func (e *Escape) expr(k EscHole, n *Node) {
+func (e *Escape) expr(k EscHole, n ir.Node) {
if n == nil {
return
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
e.exprSkipInit(k, n)
}
-func (e *Escape) exprSkipInit(k EscHole, n *Node) {
+func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
if n == nil {
return
}
lno := setlineno(n)
defer func() {
- lineno = lno
+ base.Pos = lno
}()
uintptrEscapesHack := k.uintptrEscapesHack
k.uintptrEscapesHack = false
- if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
+ if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.Left().Type().IsUnsafePtr() {
// nop
- } else if k.derefs >= 0 && !n.Type.HasPointers() {
+ } else if k.derefs >= 0 && !n.Type().HasPointers() {
k = e.discardHole()
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected expr: %v", n)
+ base.Fatalf("unexpected expr: %v", n)
- case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
+ case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OCLOSUREVAR, ir.OTYPE, ir.OMETHEXPR:
// nop
- case ONAME:
- if n.Class() == PFUNC || n.Class() == PEXTERN {
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC || n.Class() == ir.PEXTERN {
return
}
e.flow(k, e.oldLoc(n))
- case OPLUS, ONEG, OBITNOT, ONOT:
- e.discard(n.Left)
- case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
- e.discard(n.Left)
- e.discard(n.Right)
-
- case OADDR:
- e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
- case ODEREF:
- e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
- case ODOT, ODOTMETH, ODOTINTER:
- e.expr(k.note(n, "dot"), n.Left)
- case ODOTPTR:
- e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
- case ODOTTYPE, ODOTTYPE2:
- e.expr(k.dotType(n.Type, n, "dot"), n.Left)
- case OINDEX:
- if n.Left.Type.IsArray() {
- e.expr(k.note(n, "fixed-array-index-of"), n.Left)
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ e.discard(n.Left())
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE, ir.OANDAND, ir.OOROR:
+ e.discard(n.Left())
+ e.discard(n.Right())
+
+ case ir.OADDR:
+ e.expr(k.addr(n, "address-of"), n.Left()) // "address-of"
+ case ir.ODEREF:
+ e.expr(k.deref(n, "indirection"), n.Left()) // "indirection"
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ e.expr(k.note(n, "dot"), n.Left())
+ case ir.ODOTPTR:
+ e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer"
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ e.expr(k.dotType(n.Type(), n, "dot"), n.Left())
+ case ir.OINDEX:
+ if n.Left().Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.Left())
} else {
// TODO(mdempsky): Fix why reason text.
- e.expr(k.deref(n, "dot of pointer"), n.Left)
- }
- e.discard(n.Right)
- case OINDEXMAP:
- e.discard(n.Left)
- e.discard(n.Right)
- case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
- e.expr(k.note(n, "slice"), n.Left)
+ e.expr(k.deref(n, "dot of pointer"), n.Left())
+ }
+ e.discard(n.Right())
+ case ir.OINDEXMAP:
+ e.discard(n.Left())
+ e.discard(n.Right())
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+ e.expr(k.note(n, "slice"), n.Left())
low, high, max := n.SliceBounds()
e.discard(low)
e.discard(high)
e.discard(max)
- case OCONV, OCONVNOP:
- if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
+ case ir.OCONV, ir.OCONVNOP:
+ if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
- e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
- } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
- e.unsafeValue(k, n.Left)
+ e.assignHeap(n.Left(), "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() {
+ e.unsafeValue(k, n.Left())
} else {
- e.expr(k, n.Left)
+ e.expr(k, n.Left())
}
- case OCONVIFACE:
- if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
+ case ir.OCONVIFACE:
+ if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) {
k = e.spill(k, n)
}
- e.expr(k.note(n, "interface-converted"), n.Left)
+ e.expr(k.note(n, "interface-converted"), n.Left())
- case ORECV:
- e.discard(n.Left)
+ case ir.ORECV:
+ e.discard(n.Left())
- case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
+ case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
e.call([]EscHole{k}, n, nil)
- case ONEW:
+ case ir.ONEW:
e.spill(k, n)
- case OMAKESLICE:
+ case ir.OMAKESLICE:
e.spill(k, n)
- e.discard(n.Left)
- e.discard(n.Right)
- case OMAKECHAN:
- e.discard(n.Left)
- case OMAKEMAP:
+ e.discard(n.Left())
+ e.discard(n.Right())
+ case ir.OMAKECHAN:
+ e.discard(n.Left())
+ case ir.OMAKEMAP:
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
- case ORECOVER:
+ case ir.ORECOVER:
// nop
- case OCALLPART:
+ case ir.OCALLPART:
// Flow the receiver argument to both the closure and
// to the receiver parameter.
@@ -539,113 +576,113 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
// parameters all flow to the heap.
//
// TODO(mdempsky): Change ks into a callback, so that
- // we don't have to create this dummy slice?
+ // we don't have to create this slice?
var ks []EscHole
for i := m.Type.NumResults(); i > 0; i-- {
ks = append(ks, e.heapHole())
}
- paramK := e.tagHole(ks, asNode(m.Type.Nname()), m.Type.Recv())
+ paramK := e.tagHole(ks, ir.AsNode(m.Nname), m.Type.Recv())
- e.expr(e.teeHole(paramK, closureK), n.Left)
+ e.expr(e.teeHole(paramK, closureK), n.Left())
- case OPTRLIT:
- e.expr(e.spill(k, n), n.Left)
+ case ir.OPTRLIT:
+ e.expr(e.spill(k, n), n.Left())
- case OARRAYLIT:
- for _, elt := range n.List.Slice() {
- if elt.Op == OKEY {
- elt = elt.Right
+ case ir.OARRAYLIT:
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.Right()
}
e.expr(k.note(n, "array literal element"), elt)
}
- case OSLICELIT:
+ case ir.OSLICELIT:
k = e.spill(k, n)
k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
- for _, elt := range n.List.Slice() {
- if elt.Op == OKEY {
- elt = elt.Right
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.Right()
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
- case OSTRUCTLIT:
- for _, elt := range n.List.Slice() {
- e.expr(k.note(n, "struct literal element"), elt.Left)
+ case ir.OSTRUCTLIT:
+ for _, elt := range n.List().Slice() {
+ e.expr(k.note(n, "struct literal element"), elt.Left())
}
- case OMAPLIT:
+ case ir.OMAPLIT:
e.spill(k, n)
// Map keys and values are always stored in the heap.
- for _, elt := range n.List.Slice() {
- e.assignHeap(elt.Left, "map literal key", n)
- e.assignHeap(elt.Right, "map literal value", n)
+ for _, elt := range n.List().Slice() {
+ e.assignHeap(elt.Left(), "map literal key", n)
+ e.assignHeap(elt.Right(), "map literal value", n)
}
- case OCLOSURE:
+ case ir.OCLOSURE:
k = e.spill(k, n)
// Link addresses of captured variables to closure.
- for _, v := range n.Func.Closure.Func.Cvars.Slice() {
- if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
+ for _, v := range n.Func().ClosureVars.Slice() {
+ if v.Op() == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs
continue
}
k := k
- if !v.Name.Byval() {
+ if !v.Name().Byval() {
k = k.addr(v, "reference")
}
- e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
+ e.expr(k.note(n, "captured by a closure"), v.Name().Defn)
}
- case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
+ case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
- case OADDSTR:
+ case ir.OADDSTR:
e.spill(k, n)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
- e.discards(n.List)
+ e.discards(n.List())
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
-func (e *Escape) unsafeValue(k EscHole, n *Node) {
- if n.Type.Etype != TUINTPTR {
- Fatalf("unexpected type %v for %v", n.Type, n)
+func (e *Escape) unsafeValue(k EscHole, n ir.Node) {
+ if n.Type().Etype != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
- case OCONV, OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- e.expr(k, n.Left)
+ switch n.Op() {
+ case ir.OCONV, ir.OCONVNOP:
+ if n.Left().Type().IsUnsafePtr() {
+ e.expr(k, n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
- case ODOTPTR:
+ case ir.ODOTPTR:
if isReflectHeaderDataField(n) {
- e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
+ e.expr(k.deref(n, "reflect.Header.Data"), n.Left())
} else {
- e.discard(n.Left)
- }
- case OPLUS, ONEG, OBITNOT:
- e.unsafeValue(k, n.Left)
- case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
- e.unsafeValue(k, n.Left)
- e.unsafeValue(k, n.Right)
- case OLSH, ORSH:
- e.unsafeValue(k, n.Left)
+ e.discard(n.Left())
+ }
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+ e.unsafeValue(k, n.Left())
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+ e.unsafeValue(k, n.Left())
+ e.unsafeValue(k, n.Right())
+ case ir.OLSH, ir.ORSH:
+ e.unsafeValue(k, n.Left())
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
- e.discard(n.Right)
+ e.discard(n.Right())
default:
e.exprSkipInit(e.discardHole(), n)
}
@@ -653,11 +690,11 @@ func (e *Escape) unsafeValue(k EscHole, n *Node) {
// discard evaluates an expression n for side-effects, but discards
// its value.
-func (e *Escape) discard(n *Node) {
+func (e *Escape) discard(n ir.Node) {
e.expr(e.discardHole(), n)
}
-func (e *Escape) discards(l Nodes) {
+func (e *Escape) discards(l ir.Nodes) {
for _, n := range l.Slice() {
e.discard(n)
}
@@ -665,8 +702,8 @@ func (e *Escape) discards(l Nodes) {
// addr evaluates an addressable expression n and returns an EscHole
// that represents storing into the represented location.
-func (e *Escape) addr(n *Node) EscHole {
- if n == nil || n.isBlank() {
+func (e *Escape) addr(n ir.Node) EscHole {
+ if n == nil || ir.IsBlank(n) {
// Can happen at least in OSELRECV.
// TODO(mdempsky): Anywhere else?
return e.discardHole()
@@ -674,38 +711,38 @@ func (e *Escape) addr(n *Node) EscHole {
k := e.heapHole()
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("unexpected addr: %v", n)
- case ONAME:
- if n.Class() == PEXTERN {
+ base.Fatalf("unexpected addr: %v", n)
+ case ir.ONAME:
+ if n.Class() == ir.PEXTERN {
break
}
k = e.oldLoc(n).asHole()
- case ODOT:
- k = e.addr(n.Left)
- case OINDEX:
- e.discard(n.Right)
- if n.Left.Type.IsArray() {
- k = e.addr(n.Left)
+ case ir.ODOT:
+ k = e.addr(n.Left())
+ case ir.OINDEX:
+ e.discard(n.Right())
+ if n.Left().Type().IsArray() {
+ k = e.addr(n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
- case ODEREF, ODOTPTR:
+ case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
- case OINDEXMAP:
- e.discard(n.Left)
- e.assignHeap(n.Right, "key of map put", n)
+ case ir.OINDEXMAP:
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "key of map put", n)
}
- if !n.Type.HasPointers() {
+ if !n.Type().HasPointers() {
k = e.discardHole()
}
return k
}
-func (e *Escape) addrs(l Nodes) []EscHole {
+func (e *Escape) addrs(l ir.Nodes) []EscHole {
var ks []EscHole
for _, n := range l.Slice() {
ks = append(ks, e.addr(n))
@@ -714,15 +751,15 @@ func (e *Escape) addrs(l Nodes) []EscHole {
}
// assign evaluates the assignment dst = src.
-func (e *Escape) assign(dst, src *Node, why string, where *Node) {
+func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
- if ignore && Debug.m != 0 {
- Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
+ if ignore && base.Flag.LowerM != 0 {
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
}
k := e.addr(dst)
- if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
+ if dst != nil && dst.Op() == ir.ODOTPTR && isReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
} else {
if ignore {
@@ -732,22 +769,22 @@ func (e *Escape) assign(dst, src *Node, why string, where *Node) {
}
}
-func (e *Escape) assignHeap(src *Node, why string, where *Node) {
+func (e *Escape) assignHeap(src ir.Node, why string, where ir.Node) {
e.expr(e.heapHole().note(where, why), src)
}
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *Escape) call(ks []EscHole, call, where *Node) {
- topLevelDefer := where != nil && where.Op == ODEFER && e.loopDepth == 1
+func (e *Escape) call(ks []EscHole, call, where ir.Node) {
+ topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
- where.Esc = EscNever
+ where.SetEsc(EscNever)
}
- argument := func(k EscHole, arg *Node) {
+ argument := func(k EscHole, arg ir.Node) {
if topLevelDefer {
// Top level defers arguments don't escape to
// heap, but they do need to last until end of
@@ -760,66 +797,66 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
e.expr(k.note(call, "call parameter"), arg)
}
- switch call.Op {
+ switch call.Op() {
default:
- Fatalf("unexpected call op: %v", call.Op)
+ base.Fatalf("unexpected call op: %v", call.Op())
- case OCALLFUNC, OCALLMETH, OCALLINTER:
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
fixVariadicCall(call)
// Pick out the function callee, if statically known.
- var fn *Node
- switch call.Op {
- case OCALLFUNC:
- switch v := staticValue(call.Left); {
- case v.Op == ONAME && v.Class() == PFUNC:
+ var fn ir.Node
+ switch call.Op() {
+ case ir.OCALLFUNC:
+ switch v := staticValue(call.Left()); {
+ case v.Op() == ir.ONAME && v.Class() == ir.PFUNC:
fn = v
- case v.Op == OCLOSURE:
- fn = v.Func.Closure.Func.Nname
+ case v.Op() == ir.OCLOSURE:
+ fn = v.Func().Nname
}
- case OCALLMETH:
- fn = asNode(call.Left.Type.FuncType().Nname)
+ case ir.OCALLMETH:
+ fn = methodExprName(call.Left())
}
- fntype := call.Left.Type
+ fntype := call.Left().Type()
if fn != nil {
- fntype = fn.Type
+ fntype = fn.Type()
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
- for i, result := range fn.Type.Results().FieldSlice() {
- e.expr(ks[i], asNode(result.Nname))
+ for i, result := range fn.Type().Results().FieldSlice() {
+ e.expr(ks[i], ir.AsNode(result.Nname))
}
}
if r := fntype.Recv(); r != nil {
- argument(e.tagHole(ks, fn, r), call.Left.Left)
+ argument(e.tagHole(ks, fn, r), call.Left().Left())
} else {
// Evaluate callee function expression.
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
}
- args := call.List.Slice()
+ args := call.List().Slice()
for i, param := range fntype.Params().FieldSlice() {
argument(e.tagHole(ks, fn, param), args[i])
}
- case OAPPEND:
- args := call.List.Slice()
+ case ir.OAPPEND:
+ args := call.List().Slice()
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
- if args[0].Type.Elem().HasPointers() {
+ if args[0].Type().Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, args[0])
if call.IsDDD() {
appendedK := e.discardHole()
- if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, args[1])
@@ -829,27 +866,27 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
}
}
- case OCOPY:
- argument(e.discardHole(), call.Left)
+ case ir.OCOPY:
+ argument(e.discardHole(), call.Left())
copiedK := e.discardHole()
- if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
+ if call.Right().Type().IsSlice() && call.Right().Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
- argument(copiedK, call.Right)
+ argument(copiedK, call.Right())
- case OPANIC:
- argument(e.heapHole(), call.Left)
+ case ir.OPANIC:
+ argument(e.heapHole(), call.Left())
- case OCOMPLEX:
- argument(e.discardHole(), call.Left)
- argument(e.discardHole(), call.Right)
- case ODELETE, OPRINT, OPRINTN, ORECOVER:
- for _, arg := range call.List.Slice() {
+ case ir.OCOMPLEX:
+ argument(e.discardHole(), call.Left())
+ argument(e.discardHole(), call.Right())
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ for _, arg := range call.List().Slice() {
argument(e.discardHole(), arg)
}
- case OLEN, OCAP, OREAL, OIMAG, OCLOSE:
- argument(e.discardHole(), call.Left)
+ case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+ argument(e.discardHole(), call.Left())
}
}
@@ -857,14 +894,14 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
-func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole {
+func (e *Escape) tagHole(ks []EscHole, fn ir.Node, param *types.Field) EscHole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
}
if e.inMutualBatch(fn) {
- return e.addr(asNode(param.Nname))
+ return e.addr(ir.AsNode(param.Nname))
}
// Call to previously tagged function.
@@ -898,10 +935,10 @@ func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole {
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn *Node) bool {
- if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
- if fn.Name.Defn.Esc == EscFuncUnknown {
- Fatalf("graph inconsistency")
+func (e *Escape) inMutualBatch(fn ir.Node) bool {
+ if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged {
+ if fn.Name().Defn.Esc() == EscFuncUnknown {
+ base.Fatalf("graph inconsistency")
}
return true
}
@@ -923,15 +960,15 @@ type EscHole struct {
type EscNote struct {
next *EscNote
- where *Node
+ where ir.Node
why string
}
-func (k EscHole) note(where *Node, why string) EscHole {
+func (k EscHole) note(where ir.Node, why string) EscHole {
if where == nil || why == "" {
- Fatalf("note: missing where/why")
+ base.Fatalf("note: missing where/why")
}
- if Debug.m >= 2 || logopt.Enabled() {
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
k.notes = &EscNote{
next: k.notes,
where: where,
@@ -944,15 +981,15 @@ func (k EscHole) note(where *Node, why string) EscHole {
func (k EscHole) shift(delta int) EscHole {
k.derefs += delta
if k.derefs < -1 {
- Fatalf("derefs underflow: %v", k.derefs)
+ base.Fatalf("derefs underflow: %v", k.derefs)
}
return k
}
-func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
-func (k EscHole) addr(where *Node, why string) EscHole { return k.shift(-1).note(where, why) }
+func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).note(where, why) }
+func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) }
-func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
+func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole {
if !t.IsInterface() && !isdirectiface(t) {
k = k.shift(1)
}
@@ -981,7 +1018,7 @@ func (e *Escape) teeHole(ks ...EscHole) EscHole {
// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
// instead.
if k.derefs < 0 {
- Fatalf("teeHole: negative derefs")
+ base.Fatalf("teeHole: negative derefs")
}
e.flow(k, loc)
@@ -989,7 +1026,7 @@ func (e *Escape) teeHole(ks ...EscHole) EscHole {
return loc.asHole()
}
-func (e *Escape) dcl(n *Node) EscHole {
+func (e *Escape) dcl(n ir.Node) EscHole {
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
@@ -998,7 +1035,7 @@ func (e *Escape) dcl(n *Node) EscHole {
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
-func (e *Escape) spill(k EscHole, n *Node) EscHole {
+func (e *Escape) spill(k EscHole, n ir.Node) EscHole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
@@ -1015,23 +1052,23 @@ func (e *Escape) later(k EscHole) EscHole {
// canonicalNode returns the canonical *Node that n logically
// represents.
-func canonicalNode(n *Node) *Node {
- if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
- n = n.Name.Defn
- if n.Name.IsClosureVar() {
- Fatalf("still closure var")
+func canonicalNode(n ir.Node) ir.Node {
+ if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() {
+ n = n.Name().Defn
+ if n.Name().IsClosureVar() {
+ base.Fatalf("still closure var")
}
}
return n
}
-func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
+func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation {
if e.curfn == nil {
- Fatalf("e.curfn isn't set")
+ base.Fatalf("e.curfn isn't set")
}
- if n != nil && n.Type != nil && n.Type.NotInHeap() {
- yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
}
n = canonicalNode(n)
@@ -1043,12 +1080,12 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
- if n.Op == ONAME && n.Name.Curfn != e.curfn {
- Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
+ if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn {
+ base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn)
}
if n.HasOpt() {
- Fatalf("%v already has a location", n)
+ base.Fatalf("%v already has a location", n)
}
n.SetOpt(loc)
@@ -1059,7 +1096,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
return loc
}
-func (e *Escape) oldLoc(n *Node) *EscLocation {
+func (e *Escape) oldLoc(n ir.Node) *EscLocation {
n = canonicalNode(n)
return n.Opt().(*EscLocation)
}
@@ -1077,14 +1114,14 @@ func (e *Escape) flow(k EscHole, src *EscLocation) {
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
- if Debug.m >= 2 || logopt.Enabled() {
- pos := linestr(src.n.Pos)
- if Debug.m >= 2 {
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ pos := base.FmtPos(src.n.Pos())
+ if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
- logopt.LogOpt(src.n.Pos, "escapes", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
@@ -1152,16 +1189,16 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
l := todo[len(todo)-1]
todo = todo[:len(todo)-1]
- base := l.derefs
+ derefs := l.derefs
// If l.derefs < 0, then l's address flows to root.
- addressOf := base < 0
+ addressOf := derefs < 0
if addressOf {
// For a flow path like "root = &l; l = x",
// l's address flows to root, but x's does
// not. We recognize this by lower bounding
- // base at 0.
- base = 0
+ // derefs at 0.
+ derefs = 0
// If l's address flows to a non-transient
// location, then l can't be transiently
@@ -1178,31 +1215,31 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// corresponding result parameter, then record
// that value flow for tagging the function
// later.
- if l.isName(PPARAM) {
- if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
- if Debug.m >= 2 {
- fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
+ if l.isName(ir.PPARAM) {
+ if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, e.explainLoc(root), derefs)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(),
- fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), base), explanation)
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e.curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation)
}
}
- l.leakTo(root, base)
+ l.leakTo(root, derefs)
}
// If l's address flows somewhere that
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
- if logopt.Enabled() || Debug.m >= 2 {
- if Debug.m >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
+ if logopt.Enabled() || base.Flag.LowerM >= 2 {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "escape", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
l.escapes = true
@@ -1215,10 +1252,10 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
if edge.src.escapes {
continue
}
- derefs := base + edge.derefs
- if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
+ d := derefs + edge.derefs
+ if edge.src.walkgen != walkgen || edge.src.derefs > d {
edge.src.walkgen = walkgen
- edge.src.derefs = derefs
+ edge.src.derefs = d
edge.src.dst = l
edge.src.dstEdgeIdx = i
todo = append(todo, edge.src)
@@ -1230,12 +1267,12 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// explainPath prints an explanation of how src flows to the walk root.
func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
visited := make(map[*EscLocation]bool)
- pos := linestr(src.n.Pos)
+ pos := base.FmtPos(src.n.Pos())
var explanation []*logopt.LoggedOpt
for {
// Prevent infinite loop.
if visited[src] {
- if Debug.m >= 2 {
+ if base.Flag.LowerM >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
@@ -1244,7 +1281,7 @@ func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
dst := src.dst
edge := &dst.edges[src.dstEdgeIdx]
if edge.src != src {
- Fatalf("path inconsistency: %v != %v", edge.src, src)
+ base.Fatalf("path inconsistency: %v != %v", edge.src, src)
}
explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
@@ -1263,7 +1300,7 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
- print := Debug.m >= 2
+ print := base.Flag.LowerM >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
if print {
@@ -1272,19 +1309,19 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n
if logopt.Enabled() {
var epos src.XPos
if notes != nil {
- epos = notes.where.Pos
+ epos = notes.where.Pos()
} else if srcloc != nil && srcloc.n != nil {
- epos = srcloc.n.Pos
+ epos = srcloc.n.Pos()
}
- explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", e.curfn.funcname(), flow))
+ explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e.curfn), flow))
}
for note := notes; note != nil; note = note.next {
if print {
- fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos))
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
}
if logopt.Enabled() {
- explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(),
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e.curfn),
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
}
}
@@ -1299,7 +1336,7 @@ func (e *Escape) explainLoc(l *EscLocation) string {
// TODO(mdempsky): Omit entirely.
return "{temp}"
}
- if l.n.Op == ONAME {
+ if l.n.Op() == ir.ONAME {
return fmt.Sprintf("%v", l.n)
}
return fmt.Sprintf("{storage for %v}", l.n)
@@ -1316,14 +1353,14 @@ func (e *Escape) outlives(l, other *EscLocation) bool {
// We don't know what callers do with returned values, so
// pessimistically we need to assume they flow to the heap and
// outlive everything too.
- if l.isName(PPARAMOUT) {
+ if l.isName(ir.PPARAMOUT) {
// Exception: Directly called closures can return
// locations allocated outside of them without forcing
// them to the heap. For example:
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
+ if containsClosure(other.curfn, l.curfn) && l.curfn.Func().ClosureCalled {
return false
}
@@ -1357,9 +1394,9 @@ func (e *Escape) outlives(l, other *EscLocation) bool {
}
// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *Node) bool {
- if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
- Fatalf("bad containsClosure: %v, %v", f, c)
+func containsClosure(f, c ir.Node) bool {
+ if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC {
+ base.Fatalf("bad containsClosure: %v, %v", f, c)
}
// Common case.
@@ -1369,8 +1406,8 @@ func containsClosure(f, c *Node) bool {
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
- fn := f.Func.Nname.Sym.Name
- cn := c.Func.Nname.Sym.Name
+ fn := f.Func().Nname.Sym().Name
+ cn := c.Func().Nname.Sym().Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}
@@ -1378,9 +1415,9 @@ func containsClosure(f, c *Node) bool {
func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
// If sink is a result parameter and we can fit return bits
// into the escape analysis tag, then record a return leak.
- if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
+ if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
// TODO(mdempsky): Eliminate dependency on Vargen here.
- ri := int(sink.n.Name.Vargen) - 1
+ ri := int(sink.n.Name().Vargen) - 1
if ri < numEscResults {
// Leak to result parameter.
l.paramEsc.AddResult(ri, derefs)
@@ -1392,14 +1429,14 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
l.paramEsc.AddHeap(derefs)
}
-func (e *Escape) finish(fns []*Node) {
+func (e *Escape) finish(fns []ir.Node) {
// Record parameter tags for package export data.
for _, fn := range fns {
- fn.Esc = EscFuncTagged
+ fn.SetEsc(EscFuncTagged)
narg := 0
for _, fs := range &types.RecvsParams {
- for _, f := range fs(fn.Type).Fields().Slice() {
+ for _, f := range fs(fn.Type()).Fields().Slice() {
narg++
f.Note = e.paramTag(fn, narg, f)
}
@@ -1416,21 +1453,21 @@ func (e *Escape) finish(fns []*Node) {
// Update n.Esc based on escape analysis results.
if loc.escapes {
- if n.Op != ONAME {
- if Debug.m != 0 {
- Warnl(n.Pos, "%S escapes to heap", n)
+ if n.Op() != ir.ONAME {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "%S escapes to heap", n)
}
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname())
+ logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn))
}
}
- n.Esc = EscHeap
+ n.SetEsc(EscHeap)
addrescapes(n)
} else {
- if Debug.m != 0 && n.Op != ONAME {
- Warnl(n.Pos, "%S does not escape", n)
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
+ base.WarnfAt(n.Pos(), "%S does not escape", n)
}
- n.Esc = EscNone
+ n.SetEsc(EscNone)
if loc.transient {
n.SetTransient(true)
}
@@ -1438,8 +1475,8 @@ func (e *Escape) finish(fns []*Node) {
}
}
-func (l *EscLocation) isName(c Class) bool {
- return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
+func (l *EscLocation) isName(c ir.Class) bool {
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.Class() == c
}
const numEscResults = 7
@@ -1481,7 +1518,7 @@ func (l *EscLeaks) add(i, derefs int) {
func (l *EscLeaks) set(i, derefs int) {
v := derefs + 1
if v < 0 {
- Fatalf("invalid derefs count: %v", derefs)
+ base.Fatalf("invalid derefs count: %v", derefs)
}
if v > math.MaxUint8 {
v = math.MaxUint8
@@ -1536,3 +1573,466 @@ func ParseLeaks(s string) EscLeaks {
copy(l[:], s[4:])
return l
}
+
+func escapes(all []ir.Node) {
+ visitBottomUp(all, escapeFuncs)
+}
+
+const (
+ EscFuncUnknown = 0 + iota
+ EscFuncPlanned
+ EscFuncStarted
+ EscFuncTagged
+)
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+const (
+ EscUnknown = iota
+ EscNone // Does not escape to heap, result, or parameters.
+ EscHeap // Reachable from the heap
+ EscNever // By construction will not escape.
+)
+
+// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
+func funcSym(fn ir.Node) *types.Sym {
+ if fn == nil || fn.Func().Nname == nil {
+ return nil
+ }
+ return fn.Func().Nname.Sym()
+}
+
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
+// and set it to one of the following two. Then in esc we'll clear it again.
+var (
+ looping = ir.Nod(ir.OXXX, nil, nil)
+ nonlooping = ir.Nod(ir.OXXX, nil, nil)
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ if dst.Op() != ir.ODEREF && dst.Op() != ir.ODOTPTR || dst.Left().Op() != ir.ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op() {
+ case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+ // OK.
+ case ir.OSLICEARR, ir.OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ if src.Left().Op() == ir.OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ if src.Left().Op() != ir.ODEREF && src.Left().Op() != ir.ODOTPTR || src.Left().Left().Op() != ir.ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dst.Left() == src.Left().Left()
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+ if isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
+ if dst == nil || src == nil || dst.Op() != src.Op() {
+ return false
+ }
+
+ switch dst.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ // Safe trailing accessors that are permitted to differ.
+ case ir.OINDEX:
+ if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) {
+ return false
+ }
+ default:
+ return false
+ }
+
+ // The expression prefix must be both "safe" and identical.
+ return samesafeexpr(dst.Left(), src.Left())
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+ // We may want to use a list of "memory safe" ops instead of generally
+ // "side-effect free", which would include all calls and other ops that can
+ // allocate or change global state. For now, it's safer to start with the latter.
+ //
+ // We're ignoring things like division by zero, index out of range,
+ // and nil pointer dereference here.
+ switch n.Op() {
+ case ir.ONAME, ir.OCLOSUREVAR, ir.OLITERAL, ir.ONIL:
+ return false
+
+ // Left+Right group.
+ case ir.OINDEX, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
+
+ // Left group.
+ case ir.ODOT, ir.ODOTPTR, ir.ODEREF, ir.OCONVNOP, ir.OCONV, ir.OLEN, ir.OCAP,
+ ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ return mayAffectMemory(n.Left())
+
+ default:
+ return true
+ }
+}
+
+// heapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func heapAllocReason(n ir.Node) string {
+ if n.Type() == nil {
+ return ""
+ }
+
+ // Parameters are always passed via the stack.
+ if n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) {
+ return ""
+ }
+
+ if n.Type().Width > maxStackVarSize {
+ return "too large for stack"
+ }
+
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if n.Op() == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OMAKESLICE {
+ r := n.Right()
+ if r == nil {
+ r = n.Left()
+ }
+ if !smallintconst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type(); t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
+ return "too large for stack"
+ }
+ }
+
+ return ""
+}
+
+// addrescapes tags node n as having had its address taken
+// by "increasing" the "value" of n.Esc to EscHeap.
+// Storage is allocated as necessary to allow the address
+// to be taken.
+func addrescapes(n ir.Node) {
+ switch n.Op() {
+ default:
+ // Unexpected Op, probably due to a previous type error. Ignore.
+
+ case ir.ODEREF, ir.ODOTPTR:
+ // Nothing to do.
+
+ case ir.ONAME:
+ if n == nodfp {
+ break
+ }
+
+ // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
+ // on PPARAM it means something different.
+ if n.Class() == ir.PAUTO && n.Esc() == EscNever {
+ break
+ }
+
+ // If a closure reference escapes, mark the outer variable as escaping.
+ if n.Name().IsClosureVar() {
+ addrescapes(n.Name().Defn)
+ break
+ }
+
+ if n.Class() != ir.PPARAM && n.Class() != ir.PPARAMOUT && n.Class() != ir.PAUTO {
+ break
+ }
+
+ // This is a plain parameter or local variable that needs to move to the heap,
+ // but possibly for the function outside the one we're compiling.
+ // That is, if we have:
+ //
+ // func f(x int) {
+ // func() {
+ // global = &x
+ // }
+ // }
+ //
+ // then we're analyzing the inner closure but we need to move x to the
+ // heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
+ oldfn := Curfn
+ Curfn = n.Name().Curfn
+ if Curfn.Op() == ir.OCLOSURE {
+ Curfn = Curfn.Func().Decl
+ panic("can't happen")
+ }
+ ln := base.Pos
+ base.Pos = Curfn.Pos()
+ moveToHeap(n)
+ Curfn = oldfn
+ base.Pos = ln
+
+ // ODOTPTR has already been introduced,
+ // so these are the non-pointer ODOT and OINDEX.
+ // In &x[0], if x is a slice, then x does not
+ // escape--the pointer inside x does, but that
+ // is always a heap pointer anyway.
+ case ir.ODOT, ir.OINDEX, ir.OPAREN, ir.OCONVNOP:
+ if !n.Left().Type().IsSlice() {
+ addrescapes(n.Left())
+ }
+ }
+}
+
+// moveToHeap records the parameter or local variable n as moved to the heap.
+func moveToHeap(n ir.Node) {
+ if base.Flag.LowerR != 0 {
+ ir.Dump("MOVE", n)
+ }
+ if base.Flag.CompilingRuntime {
+ base.Errorf("%v escapes to heap, not allowed in runtime", n)
+ }
+ if n.Class() == ir.PAUTOHEAP {
+ ir.Dump("n", n)
+ base.Fatalf("double move to heap")
+ }
+
+ // Allocate a local stack variable to hold the pointer to the heap copy.
+ // temp will add it to the function declaration list automatically.
+ heapaddr := temp(types.NewPtr(n.Type()))
+ heapaddr.SetSym(lookup("&" + n.Sym().Name))
+ heapaddr.Orig().SetSym(heapaddr.Sym())
+ heapaddr.SetPos(n.Pos())
+
+ // Unset AutoTemp to persist the &foo variable name through SSA to
+ // liveness analysis.
+ // TODO(mdempsky/drchase): Cleaner solution?
+ heapaddr.Name().SetAutoTemp(false)
+
+ // Parameters have a local stack copy used at function start/end
+ // in addition to the copy in the heap that may live longer than
+ // the function.
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
+ if n.Offset() == types.BADWIDTH {
+ base.Fatalf("addrescapes before param assignment")
+ }
+
+ // We rewrite n below to be a heap variable (indirection of heapaddr).
+ // Preserve a copy so we can still write code referring to the original,
+ // and substitute that copy into the function declaration list
+ // so that analyses of the local (on-stack) variables use it.
+ stackcopy := NewName(n.Sym())
+ stackcopy.SetType(n.Type())
+ stackcopy.SetOffset(n.Offset())
+ stackcopy.SetClass(n.Class())
+ stackcopy.Name().Param.Heapaddr = heapaddr
+ if n.Class() == ir.PPARAMOUT {
+ // Make sure the pointer to the heap copy is kept live throughout the function.
+ // The function could panic at any point, and then a defer could recover.
+ // Thus, we need the pointer to the heap copy always available so the
+ // post-deferreturn code can copy the return value back to the stack.
+ // See issue 16095.
+ heapaddr.Name().SetIsOutputParamHeapAddr(true)
+ }
+ n.Name().Param.Stackcopy = stackcopy
+
+ // Substitute the stackcopy into the function variable list so that
+ // liveness and other analyses use the underlying stack slot
+ // and not the now-pseudo-variable n.
+ found := false
+ for i, d := range Curfn.Func().Dcl {
+ if d == n {
+ Curfn.Func().Dcl[i] = stackcopy
+ found = true
+ break
+ }
+ // Parameters are before locals, so can stop early.
+ // This limits the search even in functions with many local variables.
+ if d.Class() == ir.PAUTO {
+ break
+ }
+ }
+ if !found {
+ base.Fatalf("cannot find %v in local variable list", n)
+ }
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
+ }
+
+ // Modify n in place so that uses of n now mean indirection of the heapaddr.
+ n.SetClass(ir.PAUTOHEAP)
+ n.SetOffset(0)
+ n.Name().Param.Heapaddr = heapaddr
+ n.SetEsc(EscHeap)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+ }
+}
+
+// This special tag is applied to uintptr variables
+// that we believe may hold unsafe.Pointers for
+// calls into assembly functions.
+const unsafeUintptrTag = "unsafe-uintptr"
+
+// This special tag is applied to uintptr parameters of functions
+// marked go:uintptrescapes.
+const uintptrEscapesTag = "uintptr-escapes"
+
+func (e *Escape) paramTag(fn ir.Node, narg int, f *types.Field) string {
+ name := func() string {
+ if f.Sym != nil {
+ return f.Sym.Name
+ }
+ return fmt.Sprintf("arg#%d", narg)
+ }
+
+ if fn.Body().Len() == 0 {
+ // Assume that uintptr arguments must be held live across the call.
+ // This is most important for syscall.Syscall.
+ // See golang.org/issue/13372.
+ // This really doesn't have much to do with escape analysis per se,
+ // but we are reusing the ability to annotate an individual function
+ // argument and pass those annotations along to importing code.
+ if f.Type.IsUintptr() {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
+ }
+ return unsafeUintptrTag
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ var esc EscLeaks
+
+ // External functions are assumed unsafe, unless
+ // //go:noescape is given before the declaration.
+ if fn.Func().Pragma&ir.Noescape != 0 {
+ if base.Flag.LowerM != 0 && f.Sym != nil {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
+ }
+ } else {
+ if base.Flag.LowerM != 0 && f.Sym != nil {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
+ }
+ esc.AddHeap(0)
+ }
+
+ return esc.Encode()
+ }
+
+ if fn.Func().Pragma&ir.UintptrEscapes != 0 {
+ if f.Type.IsUintptr() {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
+ }
+ return uintptrEscapesTag
+ }
+ if f.IsDDD() && f.Type.Elem().IsUintptr() {
+ // final argument is ...uintptr.
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
+ }
+ return uintptrEscapesTag
+ }
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ // Unnamed parameters are unused and therefore do not escape.
+ if f.Sym == nil || f.Sym.IsBlank() {
+ var esc EscLeaks
+ return esc.Encode()
+ }
+
+ n := ir.AsNode(f.Nname)
+ loc := e.oldLoc(n)
+ esc := loc.paramEsc
+ esc.Optimize()
+
+ if base.Flag.LowerM != 0 && !loc.escapes {
+ if esc.Empty() {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
+ }
+ if x := esc.Heap(); x >= 0 {
+ if x == 0 {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
+ } else {
+ // TODO(mdempsky): Mention level=x like below?
+ base.WarnfAt(f.Pos, "leaking param content: %v", name())
+ }
+ }
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ res := fn.Type().Results().Field(i).Sym
+ base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
+ }
+ }
+ }
+
+ return esc.Encode()
+}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index c6917e0f81..10033793bf 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -5,34 +5,33 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/src"
"fmt"
-)
-
-var (
- Debug_export int // if set, print debugging information about export data
+ "go/constant"
)
func exportf(bout *bio.Writer, format string, args ...interface{}) {
fmt.Fprintf(bout, format, args...)
- if Debug_export != 0 {
+ if base.Debug.Export != 0 {
fmt.Printf(format, args...)
}
}
-var asmlist []*Node
+var asmlist []ir.Node
// exportsym marks n for export (or reexport).
-func exportsym(n *Node) {
- if n.Sym.OnExportList() {
+func exportsym(n ir.Node) {
+ if n.Sym().OnExportList() {
return
}
- n.Sym.SetOnExportList(true)
+ n.Sym().SetOnExportList(true)
- if Debug.E != 0 {
- fmt.Printf("export symbol %v\n", n.Sym)
+ if base.Flag.E != 0 {
+ fmt.Printf("export symbol %v\n", n.Sym())
}
exportlist = append(exportlist, n)
@@ -42,22 +41,22 @@ func initname(s string) bool {
return s == "init"
}
-func autoexport(n *Node, ctxt Class) {
- if n.Sym.Pkg != localpkg {
+func autoexport(n ir.Node, ctxt ir.Class) {
+ if n.Sym().Pkg != ir.LocalPkg {
return
}
- if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
+ if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN {
return
}
- if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
+ if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
return
}
- if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
+ if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
exportsym(n)
}
- if asmhdr != "" && !n.Sym.Asm() {
- n.Sym.SetAsm(true)
+ if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
+ n.Sym().SetAsm(true)
asmlist = append(asmlist, n)
}
}
@@ -70,28 +69,28 @@ func dumpexport(bout *bio.Writer) {
size := bout.Offset() - off
exportf(bout, "\n$$\n")
- if Debug_export != 0 {
- fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
+ if base.Debug.Export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
}
}
-func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
- n := asNode(s.PkgDef())
+func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node {
+ n := ir.AsNode(s.PkgDef())
if n == nil {
// iimport should have created a stub ONONAME
// declaration for all imported symbols. The exception
// is declarations for Runtimepkg, which are populated
// by loadsys instead.
if s.Pkg != Runtimepkg {
- Fatalf("missing ONONAME for %v\n", s)
+ base.Fatalf("missing ONONAME for %v\n", s)
}
n = dclname(s)
- s.SetPkgDef(asTypesNode(n))
+ s.SetPkgDef(n)
s.Importdef = ipkg
}
- if n.Op != ONONAME && n.Op != op {
- redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
+ if n.Op() != ir.ONONAME && n.Op() != op {
+ redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return n
}
@@ -100,57 +99,57 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
- n := importsym(ipkg, s, OTYPE)
- if n.Op != OTYPE {
- t := types.New(TFORW)
+ n := importsym(ipkg, s, ir.OTYPE)
+ if n.Op() != ir.OTYPE {
+ t := types.New(types.TFORW)
t.Sym = s
- t.Nod = asTypesNode(n)
+ t.Nod = n
- n.Op = OTYPE
- n.Pos = pos
- n.Type = t
- n.SetClass(PEXTERN)
+ n.SetOp(ir.OTYPE)
+ n.SetPos(pos)
+ n.SetType(t)
+ n.SetClass(ir.PEXTERN)
}
- t := n.Type
+ t := n.Type()
if t == nil {
- Fatalf("importtype %v", s)
+ base.Fatalf("importtype %v", s)
}
return t
}
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
+func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node {
n := importsym(ipkg, s, op)
- if n.Op != ONONAME {
- if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
- redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
+ if n.Op() != ir.ONONAME {
+ if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) {
+ redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return nil
}
- n.Op = op
- n.Pos = pos
+ n.SetOp(op)
+ n.SetPos(pos)
n.SetClass(ctxt)
- if ctxt == PFUNC {
- n.Sym.SetFunc(true)
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
}
- n.Type = t
+ n.SetType(t)
return n
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
-func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
- n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
+func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) {
+ n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
if n == nil { // TODO: Check that value matches.
return
}
n.SetVal(val)
- if Debug.E != 0 {
+ if base.Flag.E != 0 {
fmt.Printf("import const %v %L = %v\n", s, t, val)
}
}
@@ -158,15 +157,14 @@ func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
- n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
+ n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
if n == nil {
return
}
- n.Func = new(Func)
- t.SetNname(asTypesNode(n))
+ n.SetFunc(new(ir.Func))
- if Debug.E != 0 {
+ if base.Flag.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
}
@@ -174,12 +172,12 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
- n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
+ n := importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
if n == nil {
return
}
- if Debug.E != 0 {
+ if base.Flag.E != 0 {
fmt.Printf("import var %v %L\n", s, t)
}
}
@@ -187,43 +185,43 @@ func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
- n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
+ n := importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
if n == nil {
return
}
- if Debug.E != 0 {
+ if base.Flag.E != 0 {
fmt.Printf("import type %v = %L\n", s, t)
}
}
func dumpasmhdr() {
- b, err := bio.Create(asmhdr)
+ b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
+ fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", ir.LocalPkg.Name)
for _, n := range asmlist {
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
continue
}
- switch n.Op {
- case OLITERAL:
- t := n.Val().Ctype()
- if t == CTFLT || t == CTCPLX {
+ switch n.Op() {
+ case ir.OLITERAL:
+ t := n.Val().Kind()
+ if t == constant.Float || t == constant.Complex {
break
}
- fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
+ fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
- case OTYPE:
- t := n.Type
+ case ir.OTYPE:
+ t := n.Type()
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
break
}
- fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
+ fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
for _, f := range t.Fields().Slice() {
if !f.Sym.IsBlank() {
- fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
}
}
}
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
index 929653ebbd..44e918f2c1 100644
--- a/src/cmd/compile/internal/gc/gen.go
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
@@ -28,14 +30,14 @@ func sysvar(name string) *obj.LSym {
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
-func (n *Node) isParamStackCopy() bool {
- return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
+func isParamStackCopy(n ir.Node) bool {
+ return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
-func (n *Node) isParamHeapCopy() bool {
- return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
+func isParamHeapCopy(n ir.Node) bool {
+ return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.
@@ -50,37 +52,37 @@ func autotmpname(n int) string {
}
// make a new Node off the books
-func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
+func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node {
if curfn == nil {
- Fatalf("no curfn for tempAt")
+ base.Fatalf("no curfn for tempAt")
}
- if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
- Dump("tempAt", curfn)
- Fatalf("adding tempAt to wrong closure function")
+ if curfn.Op() == ir.OCLOSURE {
+ ir.Dump("tempAt", curfn)
+ base.Fatalf("adding tempAt to wrong closure function")
}
if t == nil {
- Fatalf("tempAt called with nil type")
+ base.Fatalf("tempAt called with nil type")
}
s := &types.Sym{
- Name: autotmpname(len(curfn.Func.Dcl)),
- Pkg: localpkg,
+ Name: autotmpname(len(curfn.Func().Dcl)),
+ Pkg: ir.LocalPkg,
}
- n := newnamel(pos, s)
- s.Def = asTypesNode(n)
- n.Type = t
- n.SetClass(PAUTO)
- n.Esc = EscNever
- n.Name.Curfn = curfn
- n.Name.SetUsed(true)
- n.Name.SetAutoTemp(true)
- curfn.Func.Dcl = append(curfn.Func.Dcl, n)
+ n := ir.NewNameAt(pos, s)
+ s.Def = n
+ n.SetType(t)
+ n.SetClass(ir.PAUTO)
+ n.SetEsc(EscNever)
+ n.Name().Curfn = curfn
+ n.Name().SetUsed(true)
+ n.Name().SetAutoTemp(true)
+ curfn.Func().Dcl = append(curfn.Func().Dcl, n)
dowidth(t)
- return n.Orig
+ return n.Orig()
}
-func temp(t *types.Type) *Node {
- return tempAt(lineno, Curfn, t)
+func temp(t *types.Type) ir.Node {
+ return tempAt(base.Pos, Curfn, t)
}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
index c7627bddcf..0c6b81ffb7 100644
--- a/src/cmd/compile/internal/gc/go.go
+++ b/src/cmd/compile/internal/gc/go.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@@ -12,10 +14,6 @@ import (
"sync"
)
-const (
- BADWIDTH = types.BADWIDTH
-)
-
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
@@ -39,7 +37,7 @@ var (
// isRuntimePkg reports whether p is package runtime.
func isRuntimePkg(p *types.Pkg) bool {
- if compiling_runtime && p == localpkg {
+ if base.Flag.CompilingRuntime && p == ir.LocalPkg {
return true
}
return p.Path == "runtime"
@@ -47,31 +45,12 @@ func isRuntimePkg(p *types.Pkg) bool {
// isReflectPkg reports whether p is package reflect.
func isReflectPkg(p *types.Pkg) bool {
- if p == localpkg {
- return myimportpath == "reflect"
+ if p == ir.LocalPkg {
+ return base.Ctxt.Pkgpath == "reflect"
}
return p.Path == "reflect"
}
-// The Class of a variable/function describes the "storage class"
-// of a variable or function. During parsing, storage classes are
-// called declaration contexts.
-type Class uint8
-
-//go:generate stringer -type=Class
-const (
- Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
- PEXTERN // global variables
- PAUTO // local variables
- PAUTOHEAP // local variables or parameters moved to heap
- PPARAM // input arguments
- PPARAMOUT // output results
- PFUNC // global functions
-
- // Careful: Class is stored in three bits in Node.flags.
- _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
-)
-
// Slices in the runtime are represented by three components:
//
// type slice struct {
@@ -99,40 +78,10 @@ var (
var pragcgobuf [][]string
-var outfile string
-var linkobj string
-
-// nerrors is the number of compiler errors reported
-// since the last call to saveerrors.
-var nerrors int
-
-// nsavederrors is the total number of compiler errors
-// reported before the last call to saveerrors.
-var nsavederrors int
-
-var nsyntaxerrors int
-
var decldepth int32
var nolocalimports bool
-// gc debug flags
-type DebugFlags struct {
- P, B, C, E, G,
- K, L, N, S,
- W, e, h, j,
- l, m, r, w int
-}
-
-var Debug DebugFlags
-
-var debugstr string
-
-var Debug_checknil int
-var Debug_typeassert int
-
-var localpkg *types.Pkg // package being compiled
-
var inimport bool // set during import
var itabpkg *types.Pkg // fake pkg for itab entries
@@ -155,87 +104,53 @@ var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver
var zerosize int64
-var myimportpath string
-
-var localimport string
-
-var asmhdr string
-
-var simtype [NTYPE]types.EType
+var simtype [types.NTYPE]types.EType
var (
- isInt [NTYPE]bool
- isFloat [NTYPE]bool
- isComplex [NTYPE]bool
- issimple [NTYPE]bool
+ isInt [types.NTYPE]bool
+ isFloat [types.NTYPE]bool
+ isComplex [types.NTYPE]bool
+ issimple [types.NTYPE]bool
)
var (
- okforeq [NTYPE]bool
- okforadd [NTYPE]bool
- okforand [NTYPE]bool
- okfornone [NTYPE]bool
- okforcmp [NTYPE]bool
- okforbool [NTYPE]bool
- okforcap [NTYPE]bool
- okforlen [NTYPE]bool
- okforarith [NTYPE]bool
- okforconst [NTYPE]bool
+ okforeq [types.NTYPE]bool
+ okforadd [types.NTYPE]bool
+ okforand [types.NTYPE]bool
+ okfornone [types.NTYPE]bool
+ okforcmp [types.NTYPE]bool
+ okforbool [types.NTYPE]bool
+ okforcap [types.NTYPE]bool
+ okforlen [types.NTYPE]bool
+ okforarith [types.NTYPE]bool
)
var (
- okfor [OEND][]bool
- iscmp [OEND]bool
+ okfor [ir.OEND][]bool
+ iscmp [ir.OEND]bool
)
-var minintval [NTYPE]*Mpint
-
-var maxintval [NTYPE]*Mpint
+var xtop []ir.Node
-var minfltval [NTYPE]*Mpflt
+var exportlist []ir.Node
-var maxfltval [NTYPE]*Mpflt
-
-var xtop []*Node
-
-var exportlist []*Node
-
-var importlist []*Node // imported functions and methods with inlinable bodies
+var importlist []ir.Node // imported functions and methods with inlinable bodies
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
funcsyms []*types.Sym
)
-var dclcontext Class // PEXTERN/PAUTO
+var dclcontext ir.Class // PEXTERN/PAUTO
-var Curfn *Node
+var Curfn ir.Node
var Widthptr int
var Widthreg int
-var nblank *Node
-
var typecheckok bool
-var compiling_runtime bool
-
-// Compiling the standard library
-var compiling_std bool
-
-var use_writebarrier bool
-
-var pure_go bool
-
-var flag_installsuffix string
-
-var flag_race bool
-
-var flag_msan bool
-
-var flagDWARF bool
-
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
var instrumenting bool
@@ -243,20 +158,7 @@ var instrumenting bool
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
-// Controls generation of DWARF inlined instance records. Zero
-// disables, 1 emits inlined routines but suppresses var info,
-// and 2 emits inlined routines with tracking of formals/locals.
-var genDwarfInline int
-
-var debuglive int
-
-var Ctxt *obj.Link
-
-var writearchive bool
-
-var nodfp *Node
-
-var disable_checknil int
+var nodfp ir.Node
var autogeneratedPos src.XPos
@@ -293,7 +195,7 @@ var thearch Arch
var (
staticuint64s,
- zerobase *Node
+ zerobase ir.Node
assertE2I,
assertE2I2,
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
index d599a383e7..950033a8a3 100644
--- a/src/cmd/compile/internal/gc/gsubr.go
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -31,6 +31,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/objabi"
@@ -45,7 +47,7 @@ type Progs struct {
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
- curfn *Node // fn these Progs are for
+ curfn ir.Node // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
@@ -55,10 +57,10 @@ type Progs struct {
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn *Node, worker int) *Progs {
+func newProgs(fn ir.Node, worker int) *Progs {
pp := new(Progs)
- if Ctxt.CanReuseProgs() {
- sz := len(sharedProgArray) / nBackendWorkers
+ if base.Ctxt.CanReuseProgs() {
+ sz := len(sharedProgArray) / base.Flag.LowerC
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.curfn = fn
@@ -67,7 +69,7 @@ func newProgs(fn *Node, worker int) *Progs {
pp.next = pp.NewProg()
pp.clearp(pp.next)
- pp.pos = fn.Pos
+ pp.pos = fn.Pos()
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
@@ -83,19 +85,19 @@ func (pp *Progs) NewProg() *obj.Prog {
} else {
p = new(obj.Prog)
}
- p.Ctxt = Ctxt
+ p.Ctxt = base.Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
- obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
+ obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
- if Ctxt.CanReuseProgs() {
+ if base.Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.progcache[:pp.cacheidx]
for i := range s {
@@ -133,8 +135,8 @@ func (pp *Progs) Prog(as obj.As) *obj.Prog {
pp.clearp(pp.next)
p.Link = pp.next
- if !pp.pos.IsKnown() && Debug.K != 0 {
- Warn("prog: unknown position (line 0)")
+ if !pp.pos.IsKnown() && base.Flag.K != 0 {
+ base.Warn("prog: unknown position (line 0)")
}
p.As = as
@@ -172,17 +174,17 @@ func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16
return q
}
-func (pp *Progs) settext(fn *Node) {
+func (pp *Progs) settext(fn ir.Node) {
if pp.Text != nil {
- Fatalf("Progs.settext called twice")
+ base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
- fn.Func.lsym.Func().Text = ptxt
+ fn.Func().LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
- ptxt.From.Sym = fn.Func.lsym
+ ptxt.From.Sym = fn.Func().LSym
}
// initLSym defines f's obj.LSym and initializes it based on the
@@ -191,36 +193,36 @@ func (pp *Progs) settext(fn *Node) {
//
// initLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
-func (f *Func) initLSym(hasBody bool) {
- if f.lsym != nil {
- Fatalf("Func.initLSym called twice")
+func initLSym(f *ir.Func, hasBody bool) {
+ if f.LSym != nil {
+ base.Fatalf("Func.initLSym called twice")
}
- if nam := f.Nname; !nam.isBlank() {
- f.lsym = nam.Sym.Linksym()
- if f.Pragma&Systemstack != 0 {
- f.lsym.Set(obj.AttrCFunc, true)
+ if nam := f.Nname; !ir.IsBlank(nam) {
+ f.LSym = nam.Sym().Linksym()
+ if f.Pragma&ir.Systemstack != 0 {
+ f.LSym.Set(obj.AttrCFunc, true)
}
var aliasABI obj.ABI
needABIAlias := false
- defABI, hasDefABI := symabiDefs[f.lsym.Name]
+ defABI, hasDefABI := symabiDefs[f.LSym.Name]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
- f.lsym.SetABI(obj.ABI0)
+ f.LSym.SetABI(obj.ABI0)
needABIAlias, aliasABI = true, obj.ABIInternal
} else {
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
- if f.lsym.ABI() != want {
- Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
+ if f.LSym.ABI() != want {
+ base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
}
}
- isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
- if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
+ isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
+ if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
@@ -232,7 +234,7 @@ func (f *Func) initLSym(hasBody bool) {
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
- if f.lsym.ABI() != obj.ABI0 {
+ if f.LSym.ABI() != obj.ABI0 {
needABIAlias, aliasABI = true, obj.ABI0
}
}
@@ -243,13 +245,13 @@ func (f *Func) initLSym(hasBody bool) {
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
- Name: f.lsym.Name,
+ Name: f.LSym.Name,
Type: objabi.SABIALIAS,
- R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
+ R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
}
asym.SetABI(aliasABI)
asym.Set(obj.AttrDuplicateOK, true)
- Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
+ base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
}
}
@@ -268,7 +270,7 @@ func (f *Func) initLSym(hasBody bool) {
if f.Needctxt() {
flag |= obj.NEEDCTXT
}
- if f.Pragma&Nosplit != 0 {
+ if f.Pragma&ir.Nosplit != 0 {
flag |= obj.NOSPLIT
}
if f.ReflectMethod() {
@@ -278,31 +280,31 @@ func (f *Func) initLSym(hasBody bool) {
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
- if myimportpath == "reflect" {
- switch f.Nname.Sym.Name {
+ if base.Ctxt.Pkgpath == "reflect" {
+ switch f.Nname.Sym().Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
- Ctxt.InitTextSym(f.lsym, flag)
+ base.Ctxt.InitTextSym(f.LSym, flag)
}
-func ggloblnod(nam *Node) {
- s := nam.Sym.Linksym()
+func ggloblnod(nam ir.Node) {
+ s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
- if nam.Name.Readonly() {
+ if nam.Name().Readonly() {
flags = obj.RODATA
}
- if nam.Type != nil && !nam.Type.HasPointers() {
+ if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
- Ctxt.Globl(s, nam.Type.Width, flags)
- if nam.Name.LibfuzzerExtraCounter() {
+ base.Ctxt.Globl(s, nam.Type().Width, flags)
+ if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
- if nam.Sym.Linkname != "" {
+ if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
@@ -315,7 +317,7 @@ func ggloblsym(s *obj.LSym, width int32, flags int16) {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
- Ctxt.Globl(s, int64(width), int(flags))
+ base.Ctxt.Globl(s, int64(width), int(flags))
}
func Addrconst(a *obj.Addr, v int64) {
@@ -326,7 +328,7 @@ func Addrconst(a *obj.Addr, v int64) {
func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
- Fatalf("patch: not a branch")
+ base.Fatalf("patch: not a branch")
}
p.To.SetTarget(to)
p.To.Offset = to.Pc
diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go
index 1f53d8ca7d..ef52e40f21 100644
--- a/src/cmd/compile/internal/gc/iexport.go
+++ b/src/cmd/compile/internal/gc/iexport.go
@@ -204,12 +204,15 @@ package gc
import (
"bufio"
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/goobj"
"cmd/internal/src"
"crypto/md5"
"encoding/binary"
"fmt"
+ "go/constant"
"io"
"math/big"
"sort"
@@ -243,22 +246,21 @@ const (
)
func iexport(out *bufio.Writer) {
- // Mark inline bodies that are reachable through exported types.
+ // Mark inline bodies that are reachable through exported objects.
// (Phase 0 of bexport.go.)
{
// TODO(mdempsky): Separate from bexport logic.
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range exportlist {
- sym := n.Sym
- p.markType(asNode(sym.Def).Type)
+ p.markObject(n)
}
}
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
- declIndex: map[*Node]uint64{},
- inlineIndex: map[*Node]uint64{},
+ declIndex: map[ir.Node]uint64{},
+ inlineIndex: map[ir.Node]uint64{},
typIndex: map[*types.Type]uint64{},
}
@@ -266,7 +268,7 @@ func iexport(out *bufio.Writer) {
p.typIndex[pt] = uint64(i)
}
if len(p.typIndex) > predeclReserved {
- Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
+ base.Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
}
// Initialize work queue with exported declarations.
@@ -277,8 +279,8 @@ func iexport(out *bufio.Writer) {
// Loop until no more work. We use a queue because while
// writing out inline bodies, we may discover additional
// declarations that are needed.
- for !p.declTodo.empty() {
- p.doDecl(p.declTodo.popLeft())
+ for !p.declTodo.Empty() {
+ p.doDecl(p.declTodo.PopLeft())
}
// Append indices to data0 section.
@@ -304,30 +306,30 @@ func iexport(out *bufio.Writer) {
// Add fingerprint (used by linker object file).
// Attach this to the end, so tools (e.g. gcimporter) don't care.
- copy(Ctxt.Fingerprint[:], h.Sum(nil)[:])
- out.Write(Ctxt.Fingerprint[:])
+ copy(base.Ctxt.Fingerprint[:], h.Sum(nil)[:])
+ out.Write(base.Ctxt.Fingerprint[:])
}
// writeIndex writes out an object index. mainIndex indicates whether
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
-func (w *exportWriter) writeIndex(index map[*Node]uint64, mainIndex bool) {
+func (w *exportWriter) writeIndex(index map[ir.Node]uint64, mainIndex bool) {
// Build a map from packages to objects from that package.
- pkgObjs := map[*types.Pkg][]*Node{}
+ pkgObjs := map[*types.Pkg][]ir.Node{}
// For the main index, make sure to include every package that
// we reference, even if we're not exporting (or reexporting)
// any symbols from it.
if mainIndex {
- pkgObjs[localpkg] = nil
+ pkgObjs[ir.LocalPkg] = nil
for pkg := range w.p.allPkgs {
pkgObjs[pkg] = nil
}
}
for n := range index {
- pkgObjs[n.Sym.Pkg] = append(pkgObjs[n.Sym.Pkg], n)
+ pkgObjs[n.Sym().Pkg] = append(pkgObjs[n.Sym().Pkg], n)
}
var pkgs []*types.Pkg
@@ -335,7 +337,7 @@ func (w *exportWriter) writeIndex(index map[*Node]uint64, mainIndex bool) {
pkgs = append(pkgs, pkg)
sort.Slice(objs, func(i, j int) bool {
- return objs[i].Sym.Name < objs[j].Sym.Name
+ return objs[i].Sym().Name < objs[j].Sym().Name
})
}
@@ -354,7 +356,7 @@ func (w *exportWriter) writeIndex(index map[*Node]uint64, mainIndex bool) {
objs := pkgObjs[pkg]
w.uint64(uint64(len(objs)))
for _, n := range objs {
- w.string(n.Sym.Name)
+ w.string(n.Sym().Name)
w.uint64(index[n])
}
}
@@ -366,14 +368,14 @@ type iexporter struct {
// main index.
allPkgs map[*types.Pkg]bool
- declTodo nodeQueue
+ declTodo ir.NodeQueue
strings intWriter
stringIndex map[string]uint64
data0 intWriter
- declIndex map[*Node]uint64
- inlineIndex map[*Node]uint64
+ declIndex map[ir.Node]uint64
+ inlineIndex map[ir.Node]uint64
typIndex map[*types.Type]uint64
}
@@ -392,13 +394,13 @@ func (p *iexporter) stringOff(s string) uint64 {
}
// pushDecl adds n to the declaration work queue, if not already present.
-func (p *iexporter) pushDecl(n *Node) {
- if n.Sym == nil || asNode(n.Sym.Def) != n && n.Op != OTYPE {
- Fatalf("weird Sym: %v, %v", n, n.Sym)
+func (p *iexporter) pushDecl(n ir.Node) {
+ if n.Sym() == nil || ir.AsNode(n.Sym().Def) != n && n.Op() != ir.OTYPE {
+ base.Fatalf("weird Sym: %v, %v", n, n.Sym())
}
// Don't export predeclared declarations.
- if n.Sym.Pkg == builtinpkg || n.Sym.Pkg == unsafepkg {
+ if n.Sym().Pkg == ir.BuiltinPkg || n.Sym().Pkg == unsafepkg {
return
}
@@ -407,7 +409,7 @@ func (p *iexporter) pushDecl(n *Node) {
}
p.declIndex[n] = ^uint64(0) // mark n present in work queue
- p.declTodo.pushRight(n)
+ p.declTodo.PushRight(n)
}
// exportWriter handles writing out individual data section chunks.
@@ -421,56 +423,56 @@ type exportWriter struct {
prevColumn int64
}
-func (p *iexporter) doDecl(n *Node) {
+func (p *iexporter) doDecl(n ir.Node) {
w := p.newWriter()
- w.setPkg(n.Sym.Pkg, false)
+ w.setPkg(n.Sym().Pkg, false)
- switch n.Op {
- case ONAME:
+ switch n.Op() {
+ case ir.ONAME:
switch n.Class() {
- case PEXTERN:
+ case ir.PEXTERN:
// Variable.
w.tag('V')
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
w.varExt(n)
- case PFUNC:
- if n.IsMethod() {
- Fatalf("unexpected method: %v", n)
+ case ir.PFUNC:
+ if ir.IsMethod(n) {
+ base.Fatalf("unexpected method: %v", n)
}
// Function.
w.tag('F')
- w.pos(n.Pos)
- w.signature(n.Type)
+ w.pos(n.Pos())
+ w.signature(n.Type())
w.funcExt(n)
default:
- Fatalf("unexpected class: %v, %v", n, n.Class())
+ base.Fatalf("unexpected class: %v, %v", n, n.Class())
}
- case OLITERAL:
+ case ir.OLITERAL:
// Constant.
n = typecheck(n, ctxExpr)
w.tag('C')
- w.pos(n.Pos)
- w.value(n.Type, n.Val())
+ w.pos(n.Pos())
+ w.value(n.Type(), n.Val())
- case OTYPE:
- if IsAlias(n.Sym) {
+ case ir.OTYPE:
+ if IsAlias(n.Sym()) {
// Alias.
w.tag('A')
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
break
}
// Defined type.
w.tag('T')
- w.pos(n.Pos)
+ w.pos(n.Pos())
- underlying := n.Type.Orig
+ underlying := n.Type().Orig
if underlying == types.Errortype.Orig {
// For "type T error", use error as the
// underlying type instead of error's own
@@ -482,7 +484,7 @@ func (p *iexporter) doDecl(n *Node) {
}
w.typ(underlying)
- t := n.Type
+ t := n.Type()
if t.IsInterface() {
w.typeExt(t)
break
@@ -503,7 +505,7 @@ func (p *iexporter) doDecl(n *Node) {
}
default:
- Fatalf("unexpected node: %v", n)
+ base.Fatalf("unexpected node: %v", n)
}
p.declIndex[n] = w.flush()
@@ -513,17 +515,17 @@ func (w *exportWriter) tag(tag byte) {
w.data.WriteByte(tag)
}
-func (p *iexporter) doInline(f *Node) {
+func (p *iexporter) doInline(f ir.Node) {
w := p.newWriter()
w.setPkg(fnpkg(f), false)
- w.stmtList(asNodes(f.Func.Inl.Body))
+ w.stmtList(ir.AsNodes(f.Func().Inl.Body))
p.inlineIndex[f] = w.flush()
}
func (w *exportWriter) pos(pos src.XPos) {
- p := Ctxt.PosTable.Pos(pos)
+ p := base.Ctxt.PosTable.Pos(pos)
file := p.Base().AbsFilename()
line := int64(p.RelLine())
column := int64(p.RelCol())
@@ -568,18 +570,18 @@ func (w *exportWriter) pkg(pkg *types.Pkg) {
w.string(pkg.Path)
}
-func (w *exportWriter) qualifiedIdent(n *Node) {
+func (w *exportWriter) qualifiedIdent(n ir.Node) {
// Ensure any referenced declarations are written out too.
w.p.pushDecl(n)
- s := n.Sym
+ s := n.Sym()
w.string(s.Name)
w.pkg(s.Pkg)
}
func (w *exportWriter) selector(s *types.Sym) {
if w.currPkg == nil {
- Fatalf("missing currPkg")
+ base.Fatalf("missing currPkg")
}
// Method selectors are rewritten into method symbols (of the
@@ -591,10 +593,10 @@ func (w *exportWriter) selector(s *types.Sym) {
} else {
pkg := w.currPkg
if types.IsExported(name) {
- pkg = localpkg
+ pkg = ir.LocalPkg
}
if s.Pkg != pkg {
- Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
+ base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
}
}
@@ -632,8 +634,8 @@ func (w *exportWriter) startType(k itag) {
func (w *exportWriter) doTyp(t *types.Type) {
if t.Sym != nil {
- if t.Sym.Pkg == builtinpkg || t.Sym.Pkg == unsafepkg {
- Fatalf("builtin type missing from typIndex: %v", t)
+ if t.Sym.Pkg == ir.BuiltinPkg || t.Sym.Pkg == unsafepkg {
+ base.Fatalf("builtin type missing from typIndex: %v", t)
}
w.startType(definedType)
@@ -642,35 +644,35 @@ func (w *exportWriter) doTyp(t *types.Type) {
}
switch t.Etype {
- case TPTR:
+ case types.TPTR:
w.startType(pointerType)
w.typ(t.Elem())
- case TSLICE:
+ case types.TSLICE:
w.startType(sliceType)
w.typ(t.Elem())
- case TARRAY:
+ case types.TARRAY:
w.startType(arrayType)
w.uint64(uint64(t.NumElem()))
w.typ(t.Elem())
- case TCHAN:
+ case types.TCHAN:
w.startType(chanType)
w.uint64(uint64(t.ChanDir()))
w.typ(t.Elem())
- case TMAP:
+ case types.TMAP:
w.startType(mapType)
w.typ(t.Key())
w.typ(t.Elem())
- case TFUNC:
+ case types.TFUNC:
w.startType(signatureType)
w.setPkg(t.Pkg(), true)
w.signature(t)
- case TSTRUCT:
+ case types.TSTRUCT:
w.startType(structType)
w.setPkg(t.Pkg(), true)
@@ -683,7 +685,7 @@ func (w *exportWriter) doTyp(t *types.Type) {
w.string(f.Note)
}
- case TINTER:
+ case types.TINTER:
var embeddeds, methods []*types.Field
for _, m := range t.Methods().Slice() {
if m.Sym != nil {
@@ -710,7 +712,7 @@ func (w *exportWriter) doTyp(t *types.Type) {
}
default:
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
}
}
@@ -718,7 +720,7 @@ func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) {
if pkg == nil {
// TODO(mdempsky): Proactively set Pkg for types and
// remove this fallback logic.
- pkg = localpkg
+ pkg = ir.LocalPkg
}
if write {
@@ -745,44 +747,40 @@ func (w *exportWriter) paramList(fs []*types.Field) {
func (w *exportWriter) param(f *types.Field) {
w.pos(f.Pos)
- w.localIdent(origSym(f.Sym), 0)
+ w.localIdent(ir.OrigSym(f.Sym), 0)
w.typ(f.Type)
}
-func constTypeOf(typ *types.Type) Ctype {
+func constTypeOf(typ *types.Type) constant.Kind {
switch typ {
case types.UntypedInt, types.UntypedRune:
- return CTINT
+ return constant.Int
case types.UntypedFloat:
- return CTFLT
+ return constant.Float
case types.UntypedComplex:
- return CTCPLX
+ return constant.Complex
}
switch typ.Etype {
- case TCHAN, TFUNC, TMAP, TNIL, TINTER, TPTR, TSLICE, TUNSAFEPTR:
- return CTNIL
- case TBOOL:
- return CTBOOL
- case TSTRING:
- return CTSTR
- case TINT, TINT8, TINT16, TINT32, TINT64,
- TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
- return CTINT
- case TFLOAT32, TFLOAT64:
- return CTFLT
- case TCOMPLEX64, TCOMPLEX128:
- return CTCPLX
- }
-
- Fatalf("unexpected constant type: %v", typ)
+ case types.TBOOL:
+ return constant.Bool
+ case types.TSTRING:
+ return constant.String
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+ types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ return constant.Int
+ case types.TFLOAT32, types.TFLOAT64:
+ return constant.Float
+ case types.TCOMPLEX64, types.TCOMPLEX128:
+ return constant.Complex
+ }
+
+ base.Fatalf("unexpected constant type: %v", typ)
return 0
}
-func (w *exportWriter) value(typ *types.Type, v Val) {
- if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt {
- Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt)
- }
+func (w *exportWriter) value(typ *types.Type, v constant.Value) {
+ ir.AssertValidTypeForConst(typ, v)
w.typ(typ)
// Each type has only one admissible constant representation,
@@ -791,21 +789,17 @@ func (w *exportWriter) value(typ *types.Type, v Val) {
// and provides a useful consistency check.
switch constTypeOf(typ) {
- case CTNIL:
- // Only one value; nothing to encode.
- _ = v.U.(*NilVal)
- case CTBOOL:
- w.bool(v.U.(bool))
- case CTSTR:
- w.string(v.U.(string))
- case CTINT:
- w.mpint(&v.U.(*Mpint).Val, typ)
- case CTFLT:
- w.mpfloat(&v.U.(*Mpflt).Val, typ)
- case CTCPLX:
- x := v.U.(*Mpcplx)
- w.mpfloat(&x.Real.Val, typ)
- w.mpfloat(&x.Imag.Val, typ)
+ case constant.Bool:
+ w.bool(constant.BoolVal(v))
+ case constant.String:
+ w.string(constant.StringVal(v))
+ case constant.Int:
+ w.mpint(v, typ)
+ case constant.Float:
+ w.mpfloat(v, typ)
+ case constant.Complex:
+ w.mpfloat(constant.Real(v), typ)
+ w.mpfloat(constant.Imag(v), typ)
}
}
@@ -815,9 +809,9 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) {
}
switch typ.Etype {
- case TFLOAT32, TCOMPLEX64:
+ case types.TFLOAT32, types.TCOMPLEX64:
return true, 3
- case TFLOAT64, TCOMPLEX128:
+ case types.TFLOAT64, types.TCOMPLEX128:
return true, 7
}
@@ -827,7 +821,7 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) {
// The go/types API doesn't expose sizes to importers, so they
// don't know how big these types are.
switch typ.Etype {
- case TINT, TUINT, TUINTPTR:
+ case types.TINT, types.TUINT, types.TUINTPTR:
maxBytes = 8
}
@@ -854,20 +848,24 @@ func intSize(typ *types.Type) (signed bool, maxBytes uint) {
// single byte.
//
// TODO(mdempsky): Is this level of complexity really worthwhile?
-func (w *exportWriter) mpint(x *big.Int, typ *types.Type) {
+func (w *exportWriter) mpint(x constant.Value, typ *types.Type) {
signed, maxBytes := intSize(typ)
- negative := x.Sign() < 0
+ negative := constant.Sign(x) < 0
if !signed && negative {
- Fatalf("negative unsigned integer; type %v, value %v", typ, x)
+ base.Fatalf("negative unsigned integer; type %v, value %v", typ, x)
+ }
+
+ b := constant.Bytes(x) // little endian
+ for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
+ b[i], b[j] = b[j], b[i]
}
- b := x.Bytes()
if len(b) > 0 && b[0] == 0 {
- Fatalf("leading zeros")
+ base.Fatalf("leading zeros")
}
if uint(len(b)) > maxBytes {
- Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
+ base.Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
}
maxSmall := 256 - maxBytes
@@ -904,7 +902,7 @@ func (w *exportWriter) mpint(x *big.Int, typ *types.Type) {
}
}
if n < maxSmall || n >= 256 {
- Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
+ base.Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
}
w.data.WriteByte(byte(n))
@@ -917,9 +915,10 @@ func (w *exportWriter) mpint(x *big.Int, typ *types.Type) {
// mantissa is an integer. The value is written out as mantissa (as a
// multi-precision integer) and then the exponent, except exponent is
// omitted if mantissa is zero.
-func (w *exportWriter) mpfloat(f *big.Float, typ *types.Type) {
+func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) {
+ f := bigFloatVal(v)
if f.IsInf() {
- Fatalf("infinite constant")
+ base.Fatalf("infinite constant")
}
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
@@ -933,9 +932,9 @@ func (w *exportWriter) mpfloat(f *big.Float, typ *types.Type) {
manti, acc := mant.Int(nil)
if acc != big.Exact {
- Fatalf("mantissa scaling failed for %f (%s)", f, acc)
+ base.Fatalf("mantissa scaling failed for %f (%s)", f, acc)
}
- w.mpint(manti, typ)
+ w.mpint(makeInt(manti), typ)
if manti.Sign() != 0 {
w.int64(exp)
}
@@ -956,37 +955,37 @@ func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
// Compiler-specific extensions.
-func (w *exportWriter) varExt(n *Node) {
- w.linkname(n.Sym)
- w.symIdx(n.Sym)
+func (w *exportWriter) varExt(n ir.Node) {
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
}
-func (w *exportWriter) funcExt(n *Node) {
- w.linkname(n.Sym)
- w.symIdx(n.Sym)
+func (w *exportWriter) funcExt(n ir.Node) {
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
// Escape analysis.
for _, fs := range &types.RecvsParams {
- for _, f := range fs(n.Type).FieldSlice() {
+ for _, f := range fs(n.Type()).FieldSlice() {
w.string(f.Note)
}
}
// Inline body.
- if n.Func.Inl != nil {
- w.uint64(1 + uint64(n.Func.Inl.Cost))
- if n.Func.ExportInline() {
+ if n.Func().Inl != nil {
+ w.uint64(1 + uint64(n.Func().Inl.Cost))
+ if n.Func().ExportInline() {
w.p.doInline(n)
}
// Endlineno for inlined function.
- if n.Name.Defn != nil {
- w.pos(n.Name.Defn.Func.Endlineno)
+ if n.Name().Defn != nil {
+ w.pos(n.Name().Defn.Func().Endlineno)
} else {
// When the exported node was defined externally,
// e.g. io exports atomic.(*Value).Load or bytes exports errors.New.
// Keep it as we don't distinguish this case in iimport.go.
- w.pos(n.Func.Endlineno)
+ w.pos(n.Func().Endlineno)
}
} else {
w.uint64(0)
@@ -995,7 +994,7 @@ func (w *exportWriter) funcExt(n *Node) {
func (w *exportWriter) methExt(m *types.Field) {
w.bool(m.Nointerface())
- w.funcExt(asNode(m.Type.Nname()))
+ w.funcExt(ir.AsNode(m.Nname))
}
func (w *exportWriter) linkname(s *types.Sym) {
@@ -1031,15 +1030,15 @@ func (w *exportWriter) typeExt(t *types.Type) {
// Inline bodies.
-func (w *exportWriter) stmtList(list Nodes) {
+func (w *exportWriter) stmtList(list ir.Nodes) {
for _, n := range list.Slice() {
w.node(n)
}
- w.op(OEND)
+ w.op(ir.OEND)
}
-func (w *exportWriter) node(n *Node) {
- if opprec[n.Op] < 0 {
+func (w *exportWriter) node(n ir.Node) {
+ if ir.OpPrec[n.Op()] < 0 {
w.stmt(n)
} else {
w.expr(n)
@@ -1048,149 +1047,149 @@ func (w *exportWriter) node(n *Node) {
// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
-func (w *exportWriter) stmt(n *Node) {
- if n.Ninit.Len() > 0 && !stmtwithinit(n.Op) {
+func (w *exportWriter) stmt(n ir.Node) {
+ if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) {
// can't use stmtList here since we don't want the final OEND
- for _, n := range n.Ninit.Slice() {
+ for _, n := range n.Init().Slice() {
w.stmt(n)
}
}
- switch op := n.Op; op {
- case ODCL:
- w.op(ODCL)
- w.pos(n.Left.Pos)
- w.localName(n.Left)
- w.typ(n.Left.Type)
+ switch op := n.Op(); op {
+ case ir.ODCL:
+ w.op(ir.ODCL)
+ w.pos(n.Left().Pos())
+ w.localName(n.Left())
+ w.typ(n.Left().Type())
// case ODCLFIELD:
// unimplemented - handled by default case
- case OAS:
+ case ir.OAS:
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typecheck to reproduce
// the "v = <N>" again.
- if n.Right != nil {
- w.op(OAS)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
+ if n.Right() != nil {
+ w.op(ir.OAS)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
}
- case OASOP:
- w.op(OASOP)
- w.pos(n.Pos)
+ case ir.OASOP:
+ w.op(ir.OASOP)
+ w.pos(n.Pos())
w.op(n.SubOp())
- w.expr(n.Left)
+ w.expr(n.Left())
if w.bool(!n.Implicit()) {
- w.expr(n.Right)
+ w.expr(n.Right())
}
- case OAS2:
- w.op(OAS2)
- w.pos(n.Pos)
- w.exprList(n.List)
- w.exprList(n.Rlist)
+ case ir.OAS2:
+ w.op(ir.OAS2)
+ w.pos(n.Pos())
+ w.exprList(n.List())
+ w.exprList(n.Rlist())
- case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- w.op(OAS2)
- w.pos(n.Pos)
- w.exprList(n.List)
- w.exprList(asNodes([]*Node{n.Right}))
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ w.op(ir.OAS2)
+ w.pos(n.Pos())
+ w.exprList(n.List())
+ w.exprList(ir.AsNodes([]ir.Node{n.Right()}))
- case ORETURN:
- w.op(ORETURN)
- w.pos(n.Pos)
- w.exprList(n.List)
+ case ir.ORETURN:
+ w.op(ir.ORETURN)
+ w.pos(n.Pos())
+ w.exprList(n.List())
// case ORETJMP:
// unreachable - generated by compiler for trampolin routines
- case OGO, ODEFER:
+ case ir.OGO, ir.ODEFER:
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
-
- case OIF:
- w.op(OIF)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.expr(n.Left)
- w.stmtList(n.Nbody)
- w.stmtList(n.Rlist)
-
- case OFOR:
- w.op(OFOR)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.exprsOrNil(n.Left, n.Right)
- w.stmtList(n.Nbody)
-
- case ORANGE:
- w.op(ORANGE)
- w.pos(n.Pos)
- w.stmtList(n.List)
- w.expr(n.Right)
- w.stmtList(n.Nbody)
-
- case OSELECT, OSWITCH:
+ w.pos(n.Pos())
+ w.expr(n.Left())
+
+ case ir.OIF:
+ w.op(ir.OIF)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.Left())
+ w.stmtList(n.Body())
+ w.stmtList(n.Rlist())
+
+ case ir.OFOR:
+ w.op(ir.OFOR)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Left(), n.Right())
+ w.stmtList(n.Body())
+
+ case ir.ORANGE:
+ w.op(ir.ORANGE)
+ w.pos(n.Pos())
+ w.stmtList(n.List())
+ w.expr(n.Right())
+ w.stmtList(n.Body())
+
+ case ir.OSELECT, ir.OSWITCH:
w.op(op)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.exprsOrNil(n.Left, nil)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Left(), nil)
w.caseList(n)
// case OCASE:
// handled by caseList
- case OFALL:
- w.op(OFALL)
- w.pos(n.Pos)
+ case ir.OFALL:
+ w.op(ir.OFALL)
+ w.pos(n.Pos())
- case OBREAK, OCONTINUE:
+ case ir.OBREAK, ir.OCONTINUE:
w.op(op)
- w.pos(n.Pos)
- w.exprsOrNil(n.Left, nil)
+ w.pos(n.Pos())
+ w.exprsOrNil(n.Left(), nil)
- case OEMPTY:
+ case ir.OEMPTY:
// nothing to emit
- case OGOTO, OLABEL:
+ case ir.OGOTO, ir.OLABEL:
w.op(op)
- w.pos(n.Pos)
- w.string(n.Sym.Name)
+ w.pos(n.Pos())
+ w.string(n.Sym().Name)
default:
- Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op)
+ base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op())
}
}
-func (w *exportWriter) caseList(sw *Node) {
- namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
+func (w *exportWriter) caseList(sw ir.Node) {
+ namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
- cases := sw.List.Slice()
+ cases := sw.List().Slice()
w.uint64(uint64(len(cases)))
for _, cas := range cases {
- if cas.Op != OCASE {
- Fatalf("expected OCASE, got %v", cas)
+ if cas.Op() != ir.OCASE {
+ base.Fatalf("expected OCASE, got %v", cas)
}
- w.pos(cas.Pos)
- w.stmtList(cas.List)
+ w.pos(cas.Pos())
+ w.stmtList(cas.List())
if namedTypeSwitch {
- w.localName(cas.Rlist.First())
+ w.localName(cas.Rlist().First())
}
- w.stmtList(cas.Nbody)
+ w.stmtList(cas.Body())
}
}
-func (w *exportWriter) exprList(list Nodes) {
+func (w *exportWriter) exprList(list ir.Nodes) {
for _, n := range list.Slice() {
w.expr(n)
}
- w.op(OEND)
+ w.op(ir.OEND)
}
-func (w *exportWriter) expr(n *Node) {
+func (w *exportWriter) expr(n ir.Node) {
// from nodefmt (fmt.go)
//
// nodefmt reverts nodes back to their original - we don't need to do
@@ -1201,64 +1200,70 @@ func (w *exportWriter) expr(n *Node) {
// }
// from exprfmt (fmt.go)
- for n.Op == OPAREN || n.Implicit() && (n.Op == ODEREF || n.Op == OADDR || n.Op == ODOT || n.Op == ODOTPTR) {
- n = n.Left
+ for n.Op() == ir.OPAREN || n.Implicit() && (n.Op() == ir.ODEREF || n.Op() == ir.OADDR || n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR) {
+ n = n.Left()
}
- switch op := n.Op; op {
+ switch op := n.Op(); op {
// expressions
// (somewhat closely following the structure of exprfmt in fmt.go)
- case OLITERAL:
- if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
- w.expr(n.Orig)
+ case ir.ONIL:
+ if !n.Type().HasNil() {
+ base.Fatalf("unexpected type for nil: %v", n.Type())
+ }
+ if n.Orig() != nil && n.Orig() != n {
+ w.expr(n.Orig())
break
}
- w.op(OLITERAL)
- w.pos(n.Pos)
- w.value(n.Type, n.Val())
+ w.op(ir.OLITERAL)
+ w.pos(n.Pos())
+ w.typ(n.Type())
- case ONAME:
+ case ir.OLITERAL:
+ w.op(ir.OLITERAL)
+ w.pos(n.Pos())
+ w.value(n.Type(), n.Val())
+
+ case ir.OMETHEXPR:
// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
// but for export, this should be rendered as (*pkg.T).meth.
// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
- if n.isMethodExpression() {
- w.op(OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left) // n.Left.Op == OTYPE
- w.selector(n.Right.Sym)
- break
- }
+ w.op(ir.OXDOT)
+ w.pos(n.Pos())
+ w.expr(n.Left()) // n.Left.Op == OTYPE
+ w.selector(n.Right().Sym())
+ case ir.ONAME:
// Package scope name.
- if (n.Class() == PEXTERN || n.Class() == PFUNC) && !n.isBlank() {
- w.op(ONONAME)
+ if (n.Class() == ir.PEXTERN || n.Class() == ir.PFUNC) && !ir.IsBlank(n) {
+ w.op(ir.ONONAME)
w.qualifiedIdent(n)
break
}
// Function scope name.
- w.op(ONAME)
+ w.op(ir.ONAME)
w.localName(n)
// case OPACK, ONONAME:
// should have been resolved by typechecking - handled by default case
- case OTYPE:
- w.op(OTYPE)
- w.typ(n.Type)
+ case ir.OTYPE:
+ w.op(ir.OTYPE)
+ w.typ(n.Type())
- case OTYPESW:
- w.op(OTYPESW)
- w.pos(n.Pos)
+ case ir.OTYPESW:
+ w.op(ir.OTYPESW)
+ w.pos(n.Pos())
var s *types.Sym
- if n.Left != nil {
- if n.Left.Op != ONONAME {
- Fatalf("expected ONONAME, got %v", n.Left)
+ if n.Left() != nil {
+ if n.Left().Op() != ir.ONONAME {
+ base.Fatalf("expected ONONAME, got %v", n.Left())
}
- s = n.Left.Sym
+ s = n.Left().Sym()
}
w.localIdent(s, 0) // declared pseudo-variable, if any
- w.exprsOrNil(n.Right, nil)
+ w.exprsOrNil(n.Right(), nil)
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
@@ -1269,163 +1274,163 @@ func (w *exportWriter) expr(n *Node) {
// case OCOMPLIT:
// should have been resolved by typechecking - handled by default case
- case OPTRLIT:
- w.op(OADDR)
- w.pos(n.Pos)
- w.expr(n.Left)
+ case ir.OPTRLIT:
+ w.op(ir.OADDR)
+ w.pos(n.Pos())
+ w.expr(n.Left())
- case OSTRUCTLIT:
- w.op(OSTRUCTLIT)
- w.pos(n.Pos)
- w.typ(n.Type)
- w.elemList(n.List) // special handling of field names
+ case ir.OSTRUCTLIT:
+ w.op(ir.OSTRUCTLIT)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.elemList(n.List()) // special handling of field names
- case OARRAYLIT, OSLICELIT, OMAPLIT:
- w.op(OCOMPLIT)
- w.pos(n.Pos)
- w.typ(n.Type)
- w.exprList(n.List)
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ w.op(ir.OCOMPLIT)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.exprList(n.List())
- case OKEY:
- w.op(OKEY)
- w.pos(n.Pos)
- w.exprsOrNil(n.Left, n.Right)
+ case ir.OKEY:
+ w.op(ir.OKEY)
+ w.pos(n.Pos())
+ w.exprsOrNil(n.Left(), n.Right())
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- case OCALLPART:
+ case ir.OCALLPART:
// An OCALLPART is an OXDOT before type checking.
- w.op(OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.op(ir.OXDOT)
+ w.pos(n.Pos())
+ w.expr(n.Left())
// Right node should be ONAME
- w.selector(n.Right.Sym)
-
- case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
- w.op(OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.selector(n.Sym)
-
- case ODOTTYPE, ODOTTYPE2:
- w.op(ODOTTYPE)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.typ(n.Type)
-
- case OINDEX, OINDEXMAP:
- w.op(OINDEX)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
-
- case OSLICE, OSLICESTR, OSLICEARR:
- w.op(OSLICE)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.selector(n.Right().Sym())
+
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH:
+ w.op(ir.OXDOT)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.selector(n.Sym())
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ w.op(ir.ODOTTYPE)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.typ(n.Type())
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ w.op(ir.OINDEX)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
+
+ case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR:
+ w.op(ir.OSLICE)
+ w.pos(n.Pos())
+ w.expr(n.Left())
low, high, _ := n.SliceBounds()
w.exprsOrNil(low, high)
- case OSLICE3, OSLICE3ARR:
- w.op(OSLICE3)
- w.pos(n.Pos)
- w.expr(n.Left)
+ case ir.OSLICE3, ir.OSLICE3ARR:
+ w.op(ir.OSLICE3)
+ w.pos(n.Pos())
+ w.expr(n.Left())
low, high, max := n.SliceBounds()
w.exprsOrNil(low, high)
w.expr(max)
- case OCOPY, OCOMPLEX:
+ case ir.OCOPY, ir.OCOMPLEX:
// treated like other builtin calls (see e.g., OREAL)
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
- w.op(OEND)
-
- case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
- w.op(OCONV)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.typ(n.Type)
-
- case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
+ w.op(ir.OEND)
+
+ case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR:
+ w.op(ir.OCONV)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.typ(n.Type())
+
+ case ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
w.op(op)
- w.pos(n.Pos)
- if n.Left != nil {
- w.expr(n.Left)
- w.op(OEND)
+ w.pos(n.Pos())
+ if n.Left() != nil {
+ w.expr(n.Left())
+ w.op(ir.OEND)
} else {
- w.exprList(n.List) // emits terminating OEND
+ w.exprList(n.List()) // emits terminating OEND
}
// only append() calls may contain '...' arguments
- if op == OAPPEND {
+ if op == ir.OAPPEND {
w.bool(n.IsDDD())
} else if n.IsDDD() {
- Fatalf("exporter: unexpected '...' with %v call", op)
+ base.Fatalf("exporter: unexpected '...' with %v call", op)
}
- case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
- w.op(OCALL)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.expr(n.Left)
- w.exprList(n.List)
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
+ w.op(ir.OCALL)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.Left())
+ w.exprList(n.List())
w.bool(n.IsDDD())
- case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
w.op(op) // must keep separate from OMAKE for importer
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
switch {
default:
// empty list
- w.op(OEND)
- case n.List.Len() != 0: // pre-typecheck
- w.exprList(n.List) // emits terminating OEND
- case n.Right != nil:
- w.expr(n.Left)
- w.expr(n.Right)
- w.op(OEND)
- case n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()):
- w.expr(n.Left)
- w.op(OEND)
+ w.op(ir.OEND)
+ case n.List().Len() != 0: // pre-typecheck
+ w.exprList(n.List()) // emits terminating OEND
+ case n.Right() != nil:
+ w.expr(n.Left())
+ w.expr(n.Right())
+ w.op(ir.OEND)
+ case n.Left() != nil && (n.Op() == ir.OMAKESLICE || !n.Left().Type().IsUntyped()):
+ w.expr(n.Left())
+ w.op(ir.OEND)
}
// unary expressions
- case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
+ case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV:
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.pos(n.Pos())
+ w.expr(n.Left())
// binary expressions
- case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
- OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
+ case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR:
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
- case OADDSTR:
- w.op(OADDSTR)
- w.pos(n.Pos)
- w.exprList(n.List)
+ case ir.OADDSTR:
+ w.op(ir.OADDSTR)
+ w.pos(n.Pos())
+ w.exprList(n.List())
- case ODCLCONST:
+ case ir.ODCLCONST:
// if exporting, DCLCONST should just be removed as its usage
// has already been replaced with literals
default:
- Fatalf("cannot export %v (%d) node\n"+
- "\t==> please file an issue and assign to gri@", n.Op, int(n.Op))
+ base.Fatalf("cannot export %v (%d) node\n"+
+ "\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
}
}
-func (w *exportWriter) op(op Op) {
+func (w *exportWriter) op(op ir.Op) {
w.uint64(uint64(op))
}
-func (w *exportWriter) exprsOrNil(a, b *Node) {
+func (w *exportWriter) exprsOrNil(a, b ir.Node) {
ab := 0
if a != nil {
ab |= 1
@@ -1442,15 +1447,15 @@ func (w *exportWriter) exprsOrNil(a, b *Node) {
}
}
-func (w *exportWriter) elemList(list Nodes) {
+func (w *exportWriter) elemList(list ir.Nodes) {
w.uint64(uint64(list.Len()))
for _, n := range list.Slice() {
- w.selector(n.Sym)
- w.expr(n.Left)
+ w.selector(n.Sym())
+ w.expr(n.Left())
}
}
-func (w *exportWriter) localName(n *Node) {
+func (w *exportWriter) localName(n ir.Node) {
// Escape analysis happens after inline bodies are saved, but
// we're using the same ONAME nodes, so we might still see
// PAUTOHEAP here.
@@ -1459,11 +1464,11 @@ func (w *exportWriter) localName(n *Node) {
// PPARAM/PPARAMOUT, because we only want to include vargen in
// non-param names.
var v int32
- if n.Class() == PAUTO || (n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy == nil) {
- v = n.Name.Vargen
+ if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy == nil) {
+ v = n.Name().Vargen
}
- w.localIdent(n.Sym, v)
+ w.localIdent(n.Sym(), v)
}
func (w *exportWriter) localIdent(s *types.Sym, v int32) {
@@ -1481,18 +1486,18 @@ func (w *exportWriter) localIdent(s *types.Sym, v int32) {
// TODO(mdempsky): Fix autotmp hack.
if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
- Fatalf("unexpected dot in identifier: %v", name)
+ base.Fatalf("unexpected dot in identifier: %v", name)
}
if v > 0 {
if strings.Contains(name, "·") {
- Fatalf("exporter: unexpected · in symbol name")
+ base.Fatalf("exporter: unexpected · in symbol name")
}
name = fmt.Sprintf("%s·%d", name, v)
}
if !types.IsExported(name) && s.Pkg != w.currPkg {
- Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path)
+ base.Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path)
}
w.string(name)
diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go
index c0114d0e53..77078c118a 100644
--- a/src/cmd/compile/internal/gc/iimport.go
+++ b/src/cmd/compile/internal/gc/iimport.go
@@ -8,6 +8,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/goobj"
@@ -15,6 +17,7 @@ import (
"cmd/internal/src"
"encoding/binary"
"fmt"
+ "go/constant"
"io"
"math/big"
"os"
@@ -38,8 +41,8 @@ var (
inlineImporter = map[*types.Sym]iimporterAndOffset{}
)
-func expandDecl(n *Node) {
- if n.Op != ONONAME {
+func expandDecl(n ir.Node) {
+ if n.Op() != ir.ONONAME {
return
}
@@ -52,26 +55,26 @@ func expandDecl(n *Node) {
r.doDecl(n)
}
-func expandInline(fn *Node) {
- if fn.Func.Inl.Body != nil {
+func expandInline(fn ir.Node) {
+ if fn.Func().Inl.Body != nil {
return
}
r := importReaderFor(fn, inlineImporter)
if r == nil {
- Fatalf("missing import reader for %v", fn)
+ base.Fatalf("missing import reader for %v", fn)
}
r.doInline(fn)
}
-func importReaderFor(n *Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
- x, ok := importers[n.Sym]
+func importReaderFor(n ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
+ x, ok := importers[n.Sym()]
if !ok {
return nil
}
- return x.p.newReader(x.off, n.Sym.Pkg)
+ return x.p.newReader(x.off, n.Sym().Pkg)
}
type intReader struct {
@@ -82,8 +85,8 @@ type intReader struct {
func (r *intReader) int64() int64 {
i, err := binary.ReadVarint(r.Reader)
if err != nil {
- yyerror("import %q: read error: %v", r.pkg.Path, err)
- errorexit()
+ base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+ base.ErrorExit()
}
return i
}
@@ -91,31 +94,31 @@ func (r *intReader) int64() int64 {
func (r *intReader) uint64() uint64 {
i, err := binary.ReadUvarint(r.Reader)
if err != nil {
- yyerror("import %q: read error: %v", r.pkg.Path, err)
- errorexit()
+ base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+ base.ErrorExit()
}
return i
}
func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
- ir := &intReader{in, pkg}
+ ird := &intReader{in, pkg}
- version := ir.uint64()
+ version := ird.uint64()
if version != iexportVersion {
- yyerror("import %q: unknown export format version %d", pkg.Path, version)
- errorexit()
+ base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ base.ErrorExit()
}
- sLen := ir.uint64()
- dLen := ir.uint64()
+ sLen := ird.uint64()
+ dLen := ird.uint64()
// Map string (and data) section into memory as a single large
// string. This reduces heap fragmentation and allows
// returning individual substrings very efficiently.
data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
if err != nil {
- yyerror("import %q: mapping input: %v", pkg.Path, err)
- errorexit()
+ base.Errorf("import %q: mapping input: %v", pkg.Path, err)
+ base.ErrorExit()
}
stringData := data[:sLen]
declData := data[sLen:]
@@ -138,29 +141,29 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType)
}
// Declaration index.
- for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
- pkg := p.pkgAt(ir.uint64())
- pkgName := p.stringAt(ir.uint64())
- pkgHeight := int(ir.uint64())
+ for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ird.uint64())
+ pkgName := p.stringAt(ird.uint64())
+ pkgHeight := int(ird.uint64())
if pkg.Name == "" {
pkg.Name = pkgName
pkg.Height = pkgHeight
- numImport[pkgName]++
+ ir.NumImport[pkgName]++
// TODO(mdempsky): This belongs somewhere else.
- pkg.Lookup("_").Def = asTypesNode(nblank)
+ pkg.Lookup("_").Def = ir.BlankNode
} else {
if pkg.Name != pkgName {
- Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
+ base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
}
if pkg.Height != pkgHeight {
- Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
+ base.Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
}
}
- for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
- s := pkg.Lookup(p.stringAt(ir.uint64()))
- off := ir.uint64()
+ for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.stringAt(ird.uint64()))
+ off := ird.uint64()
if _, ok := declImporter[s]; ok {
continue
@@ -170,19 +173,19 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType)
// Create stub declaration. If used, this will
// be overwritten by expandDecl.
if s.Def != nil {
- Fatalf("unexpected definition for %v: %v", s, asNode(s.Def))
+ base.Fatalf("unexpected definition for %v: %v", s, ir.AsNode(s.Def))
}
- s.Def = asTypesNode(npos(src.NoXPos, dclname(s)))
+ s.Def = npos(src.NoXPos, dclname(s))
}
}
// Inline body index.
- for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
- pkg := p.pkgAt(ir.uint64())
+ for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ird.uint64())
- for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
- s := pkg.Lookup(p.stringAt(ir.uint64()))
- off := ir.uint64()
+ for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.stringAt(ird.uint64()))
+ off := ird.uint64()
if _, ok := inlineImporter[s]; ok {
continue
@@ -194,8 +197,8 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType)
// Fingerprint.
_, err = io.ReadFull(in, fingerprint[:])
if err != nil {
- yyerror("import %s: error reading fingerprint", pkg.Path)
- errorexit()
+ base.Errorf("import %s: error reading fingerprint", pkg.Path)
+ base.ErrorExit()
}
return fingerprint
}
@@ -217,7 +220,7 @@ func (p *iimporter) stringAt(off uint64) string {
slen, n := binary.Uvarint(x[:n])
if n <= 0 {
- Fatalf("varint failed")
+ base.Fatalf("varint failed")
}
spos := off + uint64(n)
return p.stringData[spos : spos+slen]
@@ -278,9 +281,9 @@ func (r *importReader) setPkg() {
r.currPkg = r.pkg()
}
-func (r *importReader) doDecl(n *Node) {
- if n.Op != ONONAME {
- Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op)
+func (r *importReader) doDecl(n ir.Node) {
+ if n.Op() != ir.ONONAME {
+ base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym(), n.Op())
}
tag := r.byte()
@@ -290,23 +293,24 @@ func (r *importReader) doDecl(n *Node) {
case 'A':
typ := r.typ()
- importalias(r.p.ipkg, pos, n.Sym, typ)
+ importalias(r.p.ipkg, pos, n.Sym(), typ)
case 'C':
- typ, val := r.value()
+ typ := r.typ()
+ val := r.value(typ)
- importconst(r.p.ipkg, pos, n.Sym, typ, val)
+ importconst(r.p.ipkg, pos, n.Sym(), typ, val)
case 'F':
typ := r.signature(nil)
- importfunc(r.p.ipkg, pos, n.Sym, typ)
+ importfunc(r.p.ipkg, pos, n.Sym(), typ)
r.funcExt(n)
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
- t := importtype(r.p.ipkg, pos, n.Sym)
+ t := importtype(r.p.ipkg, pos, n.Sym())
// We also need to defer width calculations until
// after the underlying type has been assigned.
@@ -327,23 +331,14 @@ func (r *importReader) doDecl(n *Node) {
recv := r.param()
mtyp := r.signature(recv)
- f := types.NewField()
- f.Pos = mpos
- f.Sym = msym
- f.Type = mtyp
- ms[i] = f
-
- m := newfuncnamel(mpos, methodSym(recv.Type, msym))
- m.Type = mtyp
- m.SetClass(PFUNC)
+ m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(ir.Func))
+ m.SetType(mtyp)
+ m.SetClass(ir.PFUNC)
// methodSym already marked m.Sym as a function.
- // (comment from parser.go)
- // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
- // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
- // out by typecheck's lookdot as this $$.ttype. So by providing
- // this back link here we avoid special casing there.
- mtyp.SetNname(asTypesNode(m))
+ f := types.NewField(mpos, msym, mtyp)
+ f.Nname = m
+ ms[i] = f
}
t.Methods().Set(ms)
@@ -355,40 +350,32 @@ func (r *importReader) doDecl(n *Node) {
case 'V':
typ := r.typ()
- importvar(r.p.ipkg, pos, n.Sym, typ)
+ importvar(r.p.ipkg, pos, n.Sym(), typ)
r.varExt(n)
default:
- Fatalf("unexpected tag: %v", tag)
+ base.Fatalf("unexpected tag: %v", tag)
}
}
-func (p *importReader) value() (typ *types.Type, v Val) {
- typ = p.typ()
-
+func (p *importReader) value(typ *types.Type) constant.Value {
switch constTypeOf(typ) {
- case CTNIL:
- v.U = &NilVal{}
- case CTBOOL:
- v.U = p.bool()
- case CTSTR:
- v.U = p.string()
- case CTINT:
- x := new(Mpint)
- x.Rune = typ == types.UntypedRune
- p.mpint(&x.Val, typ)
- v.U = x
- case CTFLT:
- x := newMpflt()
- p.float(x, typ)
- v.U = x
- case CTCPLX:
- x := newMpcmplx()
- p.float(&x.Real, typ)
- p.float(&x.Imag, typ)
- v.U = x
+ case constant.Bool:
+ return constant.MakeBool(p.bool())
+ case constant.String:
+ return constant.MakeString(p.string())
+ case constant.Int:
+ var i big.Int
+ p.mpint(&i, typ)
+ return makeInt(&i)
+ case constant.Float:
+ return p.float(typ)
+ case constant.Complex:
+ return makeComplex(p.float(typ), p.float(typ))
}
- return
+
+ base.Fatalf("unexpected value type: %v", typ)
+ panic("unreachable")
}
func (p *importReader) mpint(x *big.Int, typ *types.Type) {
@@ -420,7 +407,7 @@ func (p *importReader) mpint(x *big.Int, typ *types.Type) {
v = -(n &^ 1) >> 1
}
if v < 1 || uint(v) > maxBytes {
- Fatalf("weird decoding: %v, %v => %v", n, signed, v)
+ base.Fatalf("weird decoding: %v, %v => %v", n, signed, v)
}
b := make([]byte, v)
p.Read(b)
@@ -430,14 +417,15 @@ func (p *importReader) mpint(x *big.Int, typ *types.Type) {
}
}
-func (p *importReader) float(x *Mpflt, typ *types.Type) {
+func (p *importReader) float(typ *types.Type) constant.Value {
var mant big.Int
p.mpint(&mant, typ)
- m := x.Val.SetInt(&mant)
- if m.Sign() == 0 {
- return
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(p.int64()))
}
- m.SetMantExp(m, int(p.int64()))
+ return constant.Make(&f)
}
func (r *importReader) ident() *types.Sym {
@@ -447,7 +435,7 @@ func (r *importReader) ident() *types.Sym {
}
pkg := r.currPkg
if types.IsExported(name) {
- pkg = localpkg
+ pkg = ir.LocalPkg
}
return pkg.Lookup(name)
}
@@ -476,10 +464,10 @@ func (r *importReader) pos() src.XPos {
}
if r.prevBase == nil {
- Fatalf("missing posbase")
+ base.Fatalf("missing posbase")
}
pos := src.MakePos(r.prevBase, uint(r.prevLine), uint(r.prevColumn))
- return Ctxt.PosTable.XPos(pos)
+ return base.Ctxt.PosTable.XPos(pos)
}
func (r *importReader) typ() *types.Type {
@@ -490,7 +478,7 @@ func (p *iimporter) typAt(off uint64) *types.Type {
t, ok := p.typCache[off]
if !ok {
if off < predeclReserved {
- Fatalf("predeclared type missing from cache: %d", off)
+ base.Fatalf("predeclared type missing from cache: %d", off)
}
t = p.newReader(off-predeclReserved, nil).typ1()
p.typCache[off] = t
@@ -501,7 +489,7 @@ func (p *iimporter) typAt(off uint64) *types.Type {
func (r *importReader) typ1() *types.Type {
switch k := r.kind(); k {
default:
- Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
+ base.Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
return nil
case definedType:
@@ -511,14 +499,14 @@ func (r *importReader) typ1() *types.Type {
// support inlining functions with local defined
// types. Therefore, this must be a package-scope
// type.
- n := asNode(r.qualifiedIdent().PkgDef())
- if n.Op == ONONAME {
+ n := ir.AsNode(r.qualifiedIdent().PkgDef())
+ if n.Op() == ir.ONONAME {
expandDecl(n)
}
- if n.Op != OTYPE {
- Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n)
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
}
- return n.Type
+ return n.Type()
case pointerType:
return types.NewPtr(r.typ())
case sliceType:
@@ -547,10 +535,7 @@ func (r *importReader) typ1() *types.Type {
emb := r.bool()
note := r.string()
- f := types.NewField()
- f.Pos = pos
- f.Sym = sym
- f.Type = typ
+ f := types.NewField(pos, sym, typ)
if emb {
f.Embedded = 1
}
@@ -558,7 +543,7 @@ func (r *importReader) typ1() *types.Type {
fs[i] = f
}
- t := types.New(TSTRUCT)
+ t := types.New(types.TSTRUCT)
t.SetPkg(r.currPkg)
t.SetFields(fs)
return t
@@ -571,10 +556,7 @@ func (r *importReader) typ1() *types.Type {
pos := r.pos()
typ := r.typ()
- f := types.NewField()
- f.Pos = pos
- f.Type = typ
- embeddeds[i] = f
+ embeddeds[i] = types.NewField(pos, nil, typ)
}
methods := make([]*types.Field, r.uint64())
@@ -583,14 +565,10 @@ func (r *importReader) typ1() *types.Type {
sym := r.ident()
typ := r.signature(fakeRecvField())
- f := types.NewField()
- f.Pos = pos
- f.Sym = sym
- f.Type = typ
- methods[i] = f
+ methods[i] = types.NewField(pos, sym, typ)
}
- t := types.New(TINTER)
+ t := types.New(types.TINTER)
t.SetPkg(r.currPkg)
t.SetInterface(append(embeddeds, methods...))
@@ -624,11 +602,7 @@ func (r *importReader) paramList() []*types.Field {
}
func (r *importReader) param() *types.Field {
- f := types.NewField()
- f.Pos = r.pos()
- f.Sym = r.ident()
- f.Type = r.typ()
- return f
+ return types.NewField(r.pos(), r.ident(), r.typ())
}
func (r *importReader) bool() bool {
@@ -638,7 +612,7 @@ func (r *importReader) bool() bool {
func (r *importReader) int64() int64 {
n, err := binary.ReadVarint(r)
if err != nil {
- Fatalf("readVarint: %v", err)
+ base.Fatalf("readVarint: %v", err)
}
return n
}
@@ -646,7 +620,7 @@ func (r *importReader) int64() int64 {
func (r *importReader) uint64() uint64 {
n, err := binary.ReadUvarint(r)
if err != nil {
- Fatalf("readVarint: %v", err)
+ base.Fatalf("readVarint: %v", err)
}
return n
}
@@ -654,35 +628,35 @@ func (r *importReader) uint64() uint64 {
func (r *importReader) byte() byte {
x, err := r.ReadByte()
if err != nil {
- Fatalf("declReader.ReadByte: %v", err)
+ base.Fatalf("declReader.ReadByte: %v", err)
}
return x
}
// Compiler-specific extensions.
-func (r *importReader) varExt(n *Node) {
- r.linkname(n.Sym)
- r.symIdx(n.Sym)
+func (r *importReader) varExt(n ir.Node) {
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
}
-func (r *importReader) funcExt(n *Node) {
- r.linkname(n.Sym)
- r.symIdx(n.Sym)
+func (r *importReader) funcExt(n ir.Node) {
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
// Escape analysis.
for _, fs := range &types.RecvsParams {
- for _, f := range fs(n.Type).FieldSlice() {
+ for _, f := range fs(n.Type()).FieldSlice() {
f.Note = r.string()
}
}
// Inline body.
if u := r.uint64(); u > 0 {
- n.Func.Inl = &Inline{
+ n.Func().Inl = &ir.Inline{
Cost: int32(u - 1),
}
- n.Func.Endlineno = r.pos()
+ n.Func().Endlineno = r.pos()
}
}
@@ -690,7 +664,7 @@ func (r *importReader) methExt(m *types.Field) {
if r.bool() {
m.SetNointerface(true)
}
- r.funcExt(asNode(m.Type.Nname()))
+ r.funcExt(ir.AsNode(m.Nname))
}
func (r *importReader) linkname(s *types.Sym) {
@@ -702,7 +676,7 @@ func (r *importReader) symIdx(s *types.Sym) {
idx := int32(r.int64())
if idx != -1 {
if s.Linkname != "" {
- Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
+ base.Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
}
lsym.SymIdx = idx
lsym.Set(obj.AttrIndexed, true)
@@ -721,9 +695,9 @@ func (r *importReader) typeExt(t *types.Type) {
// so we can use index to reference the symbol.
var typeSymIdx = make(map[*types.Type][2]int64)
-func (r *importReader) doInline(n *Node) {
- if len(n.Func.Inl.Body) != 0 {
- Fatalf("%v already has inline body", n)
+func (r *importReader) doInline(n ir.Node) {
+ if len(n.Func().Inl.Body) != 0 {
+ base.Fatalf("%v already has inline body", n)
}
funchdr(n)
@@ -736,17 +710,17 @@ func (r *importReader) doInline(n *Node) {
// (not doing so can cause significant performance
// degradation due to unnecessary calls to empty
// functions).
- body = []*Node{}
+ body = []ir.Node{}
}
- n.Func.Inl.Body = body
+ n.Func().Inl.Body = body
importlist = append(importlist, n)
- if Debug.E > 0 && Debug.m > 2 {
- if Debug.m > 3 {
- fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body))
+ if base.Flag.E > 0 && base.Flag.LowerM > 2 {
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body))
} else {
- fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body))
+ fmt.Printf("inl body for %v %#v: %v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body))
}
}
}
@@ -766,16 +740,16 @@ func (r *importReader) doInline(n *Node) {
// unrefined nodes (since this is what the importer uses). The respective case
// entries are unreachable in the importer.
-func (r *importReader) stmtList() []*Node {
- var list []*Node
+func (r *importReader) stmtList() []ir.Node {
+ var list []ir.Node
for {
n := r.node()
if n == nil {
break
}
// OBLOCK nodes may be created when importing ODCL nodes - unpack them
- if n.Op == OBLOCK {
- list = append(list, n.List.Slice()...)
+ if n.Op() == ir.OBLOCK {
+ list = append(list, n.List().Slice()...)
} else {
list = append(list, n)
}
@@ -784,30 +758,30 @@ func (r *importReader) stmtList() []*Node {
return list
}
-func (r *importReader) caseList(sw *Node) []*Node {
- namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
+func (r *importReader) caseList(sw ir.Node) []ir.Node {
+ namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
- cases := make([]*Node, r.uint64())
+ cases := make([]ir.Node, r.uint64())
for i := range cases {
- cas := nodl(r.pos(), OCASE, nil, nil)
- cas.List.Set(r.stmtList())
+ cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil)
+ cas.PtrList().Set(r.stmtList())
if namedTypeSwitch {
// Note: per-case variables will have distinct, dotted
// names after import. That's okay: swt.go only needs
// Sym for diagnostics anyway.
- caseVar := newnamel(cas.Pos, r.ident())
+ caseVar := ir.NewNameAt(cas.Pos(), r.ident())
declare(caseVar, dclcontext)
- cas.Rlist.Set1(caseVar)
- caseVar.Name.Defn = sw.Left
+ cas.PtrRlist().Set1(caseVar)
+ caseVar.Name().Defn = sw.Left()
}
- cas.Nbody.Set(r.stmtList())
+ cas.PtrBody().Set(r.stmtList())
cases[i] = cas
}
return cases
}
-func (r *importReader) exprList() []*Node {
- var list []*Node
+func (r *importReader) exprList() []ir.Node {
+ var list []ir.Node
for {
n := r.expr()
if n == nil {
@@ -818,47 +792,57 @@ func (r *importReader) exprList() []*Node {
return list
}
-func (r *importReader) expr() *Node {
+func (r *importReader) expr() ir.Node {
n := r.node()
- if n != nil && n.Op == OBLOCK {
- Fatalf("unexpected block node: %v", n)
+ if n != nil && n.Op() == ir.OBLOCK {
+ base.Fatalf("unexpected block node: %v", n)
}
return n
}
// TODO(gri) split into expr and stmt
-func (r *importReader) node() *Node {
+func (r *importReader) node() ir.Node {
switch op := r.op(); op {
// expressions
// case OPAREN:
// unreachable - unpacked by exporter
- case OLITERAL:
+ // case ONIL:
+ // unreachable - mapped to OLITERAL
+
+ case ir.OLITERAL:
pos := r.pos()
- typ, val := r.value()
+ typ := r.typ()
- n := npos(pos, nodlit(val))
- n.Type = typ
+ var n ir.Node
+ if typ.HasNil() {
+ n = nodnil()
+ } else {
+ n = ir.NewLiteral(r.value(typ))
+ }
+ n = npos(pos, n)
+ n.SetType(typ)
return n
- case ONONAME:
+ case ir.ONONAME:
return mkname(r.qualifiedIdent())
- case ONAME:
+ case ir.ONAME:
return mkname(r.ident())
// case OPACK, ONONAME:
// unreachable - should have been resolved by typechecking
- case OTYPE:
+ case ir.OTYPE:
return typenod(r.typ())
- case OTYPESW:
- n := nodl(r.pos(), OTYPESW, nil, nil)
+ case ir.OTYPESW:
+ n := ir.NodAt(r.pos(), ir.OTYPESW, nil, nil)
if s := r.ident(); s != nil {
- n.Left = npos(n.Pos, newnoname(s))
+ n.SetLeft(npos(n.Pos(), newnoname(s)))
}
- n.Right, _ = r.exprsOrNil()
+ right, _ := r.exprsOrNil()
+ n.SetRight(right)
return n
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
@@ -870,27 +854,27 @@ func (r *importReader) node() *Node {
// case OPTRLIT:
// unreachable - mapped to case OADDR below by exporter
- case OSTRUCTLIT:
+ case ir.OSTRUCTLIT:
// TODO(mdempsky): Export position information for OSTRUCTKEY nodes.
- savedlineno := lineno
- lineno = r.pos()
- n := nodl(lineno, OCOMPLIT, nil, typenod(r.typ()))
- n.List.Set(r.elemList()) // special handling of field names
- lineno = savedlineno
+ savedlineno := base.Pos
+ base.Pos = r.pos()
+ n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, typenod(r.typ()))
+ n.PtrList().Set(r.elemList()) // special handling of field names
+ base.Pos = savedlineno
return n
// case OARRAYLIT, OSLICELIT, OMAPLIT:
// unreachable - mapped to case OCOMPLIT below by exporter
- case OCOMPLIT:
- n := nodl(r.pos(), OCOMPLIT, nil, typenod(r.typ()))
- n.List.Set(r.exprList())
+ case ir.OCOMPLIT:
+ n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, typenod(r.typ()))
+ n.PtrList().Set(r.exprList())
return n
- case OKEY:
+ case ir.OKEY:
pos := r.pos()
left, right := r.exprsOrNil()
- return nodl(pos, OKEY, left, right)
+ return ir.NodAt(pos, ir.OKEY, left, right)
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
@@ -901,29 +885,29 @@ func (r *importReader) node() *Node {
// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
// unreachable - mapped to case OXDOT below by exporter
- case OXDOT:
+ case ir.OXDOT:
// see parser.new_dotname
- return npos(r.pos(), nodSym(OXDOT, r.expr(), r.ident()))
+ return npos(r.pos(), nodSym(ir.OXDOT, r.expr(), r.ident()))
// case ODOTTYPE, ODOTTYPE2:
// unreachable - mapped to case ODOTTYPE below by exporter
- case ODOTTYPE:
- n := nodl(r.pos(), ODOTTYPE, r.expr(), nil)
- n.Type = r.typ()
+ case ir.ODOTTYPE:
+ n := ir.NodAt(r.pos(), ir.ODOTTYPE, r.expr(), nil)
+ n.SetType(r.typ())
return n
// case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
// unreachable - mapped to cases below by exporter
- case OINDEX:
- return nodl(r.pos(), op, r.expr(), r.expr())
+ case ir.OINDEX:
+ return ir.NodAt(r.pos(), op, r.expr(), r.expr())
- case OSLICE, OSLICE3:
- n := nodl(r.pos(), op, r.expr(), nil)
+ case ir.OSLICE, ir.OSLICE3:
+ n := ir.NodAt(r.pos(), op, r.expr(), nil)
low, high := r.exprsOrNil()
- var max *Node
- if n.Op.IsSlice3() {
+ var max ir.Node
+ if n.Op().IsSlice3() {
max = r.expr()
}
n.SetSliceBounds(low, high, max)
@@ -932,15 +916,15 @@ func (r *importReader) node() *Node {
// case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
// unreachable - mapped to OCONV case below by exporter
- case OCONV:
- n := nodl(r.pos(), OCONV, r.expr(), nil)
- n.Type = r.typ()
+ case ir.OCONV:
+ n := ir.NodAt(r.pos(), ir.OCONV, r.expr(), nil)
+ n.SetType(r.typ())
return n
- case OCOPY, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
+ case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
n := npos(r.pos(), builtinCall(op))
- n.List.Set(r.exprList())
- if op == OAPPEND {
+ n.PtrList().Set(r.exprList())
+ if op == ir.OAPPEND {
n.SetIsDDD(r.bool())
}
return n
@@ -948,45 +932,45 @@ func (r *importReader) node() *Node {
// case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
// unreachable - mapped to OCALL case below by exporter
- case OCALL:
- n := nodl(r.pos(), OCALL, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left = r.expr()
- n.List.Set(r.exprList())
+ case ir.OCALL:
+ n := ir.NodAt(r.pos(), ir.OCALL, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ n.SetLeft(r.expr())
+ n.PtrList().Set(r.exprList())
n.SetIsDDD(r.bool())
return n
- case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- n := npos(r.pos(), builtinCall(OMAKE))
- n.List.Append(typenod(r.typ()))
- n.List.Append(r.exprList()...)
+ case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+ n := npos(r.pos(), builtinCall(ir.OMAKE))
+ n.PtrList().Append(typenod(r.typ()))
+ n.PtrList().Append(r.exprList()...)
return n
// unary expressions
- case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
- return nodl(r.pos(), op, r.expr(), nil)
+ case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV:
+ return ir.NodAt(r.pos(), op, r.expr(), nil)
// binary expressions
- case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
- OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
- return nodl(r.pos(), op, r.expr(), r.expr())
+ case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR:
+ return ir.NodAt(r.pos(), op, r.expr(), r.expr())
- case OADDSTR:
+ case ir.OADDSTR:
pos := r.pos()
list := r.exprList()
x := npos(pos, list[0])
for _, y := range list[1:] {
- x = nodl(pos, OADD, x, y)
+ x = ir.NodAt(pos, ir.OADD, x, y)
}
return x
// --------------------------------------------------------------------
// statements
- case ODCL:
+ case ir.ODCL:
pos := r.pos()
lhs := npos(pos, dclname(r.ident()))
typ := typenod(r.typ())
- return npos(pos, liststmt(variter([]*Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
+ return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
// case ODCLFIELD:
// unimplemented
@@ -994,118 +978,121 @@ func (r *importReader) node() *Node {
// case OAS, OASWB:
// unreachable - mapped to OAS case below by exporter
- case OAS:
- return nodl(r.pos(), OAS, r.expr(), r.expr())
+ case ir.OAS:
+ return ir.NodAt(r.pos(), ir.OAS, r.expr(), r.expr())
- case OASOP:
- n := nodl(r.pos(), OASOP, nil, nil)
+ case ir.OASOP:
+ n := ir.NodAt(r.pos(), ir.OASOP, nil, nil)
n.SetSubOp(r.op())
- n.Left = r.expr()
+ n.SetLeft(r.expr())
if !r.bool() {
- n.Right = nodintconst(1)
+ n.SetRight(nodintconst(1))
n.SetImplicit(true)
} else {
- n.Right = r.expr()
+ n.SetRight(r.expr())
}
return n
// case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
// unreachable - mapped to OAS2 case below by exporter
- case OAS2:
- n := nodl(r.pos(), OAS2, nil, nil)
- n.List.Set(r.exprList())
- n.Rlist.Set(r.exprList())
+ case ir.OAS2:
+ n := ir.NodAt(r.pos(), ir.OAS2, nil, nil)
+ n.PtrList().Set(r.exprList())
+ n.PtrRlist().Set(r.exprList())
return n
- case ORETURN:
- n := nodl(r.pos(), ORETURN, nil, nil)
- n.List.Set(r.exprList())
+ case ir.ORETURN:
+ n := ir.NodAt(r.pos(), ir.ORETURN, nil, nil)
+ n.PtrList().Set(r.exprList())
return n
// case ORETJMP:
// unreachable - generated by compiler for trampolin routines (not exported)
- case OGO, ODEFER:
- return nodl(r.pos(), op, r.expr(), nil)
+ case ir.OGO, ir.ODEFER:
+ return ir.NodAt(r.pos(), op, r.expr(), nil)
- case OIF:
- n := nodl(r.pos(), OIF, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left = r.expr()
- n.Nbody.Set(r.stmtList())
- n.Rlist.Set(r.stmtList())
+ case ir.OIF:
+ n := ir.NodAt(r.pos(), ir.OIF, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ n.SetLeft(r.expr())
+ n.PtrBody().Set(r.stmtList())
+ n.PtrRlist().Set(r.stmtList())
return n
- case OFOR:
- n := nodl(r.pos(), OFOR, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left, n.Right = r.exprsOrNil()
- n.Nbody.Set(r.stmtList())
+ case ir.OFOR:
+ n := ir.NodAt(r.pos(), ir.OFOR, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ left, right := r.exprsOrNil()
+ n.SetLeft(left)
+ n.SetRight(right)
+ n.PtrBody().Set(r.stmtList())
return n
- case ORANGE:
- n := nodl(r.pos(), ORANGE, nil, nil)
- n.List.Set(r.stmtList())
- n.Right = r.expr()
- n.Nbody.Set(r.stmtList())
+ case ir.ORANGE:
+ n := ir.NodAt(r.pos(), ir.ORANGE, nil, nil)
+ n.PtrList().Set(r.stmtList())
+ n.SetRight(r.expr())
+ n.PtrBody().Set(r.stmtList())
return n
- case OSELECT, OSWITCH:
- n := nodl(r.pos(), op, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left, _ = r.exprsOrNil()
- n.List.Set(r.caseList(n))
+ case ir.OSELECT, ir.OSWITCH:
+ n := ir.NodAt(r.pos(), op, nil, nil)
+ n.PtrInit().Set(r.stmtList())
+ left, _ := r.exprsOrNil()
+ n.SetLeft(left)
+ n.PtrList().Set(r.caseList(n))
return n
// case OCASE:
// handled by caseList
- case OFALL:
- n := nodl(r.pos(), OFALL, nil, nil)
+ case ir.OFALL:
+ n := ir.NodAt(r.pos(), ir.OFALL, nil, nil)
return n
- case OBREAK, OCONTINUE:
+ case ir.OBREAK, ir.OCONTINUE:
pos := r.pos()
left, _ := r.exprsOrNil()
if left != nil {
- left = newname(left.Sym)
+ left = NewName(left.Sym())
}
- return nodl(pos, op, left, nil)
+ return ir.NodAt(pos, op, left, nil)
// case OEMPTY:
// unreachable - not emitted by exporter
- case OGOTO, OLABEL:
- n := nodl(r.pos(), op, nil, nil)
- n.Sym = lookup(r.string())
+ case ir.OGOTO, ir.OLABEL:
+ n := ir.NodAt(r.pos(), op, nil, nil)
+ n.SetSym(lookup(r.string()))
return n
- case OEND:
+ case ir.OEND:
return nil
default:
- Fatalf("cannot import %v (%d) node\n"+
+ base.Fatalf("cannot import %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", op, int(op))
panic("unreachable") // satisfy compiler
}
}
-func (r *importReader) op() Op {
- return Op(r.uint64())
+func (r *importReader) op() ir.Op {
+ return ir.Op(r.uint64())
}
-func (r *importReader) elemList() []*Node {
+func (r *importReader) elemList() []ir.Node {
c := r.uint64()
- list := make([]*Node, c)
+ list := make([]ir.Node, c)
for i := range list {
s := r.ident()
- list[i] = nodSym(OSTRUCTKEY, r.expr(), s)
+ list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s)
}
return list
}
-func (r *importReader) exprsOrNil() (a, b *Node) {
+func (r *importReader) exprsOrNil() (a, b ir.Node) {
ab := r.uint64()
if ab&1 != 0 {
a = r.expr()
diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go
index ec9cc4bddc..2b7ecd1d05 100644
--- a/src/cmd/compile/internal/gc/init.go
+++ b/src/cmd/compile/internal/gc/init.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
@@ -15,8 +17,9 @@ import (
// the name, normally "pkg.init", is altered to "pkg.init.0".
var renameinitgen int
-// Dummy function for autotmps generated during typechecking.
-var dummyInitFn = nod(ODCLFUNC, nil, nil)
+// Function collecting autotmps generated during typechecking,
+// to be included in the package-level init function.
+var initTodo = ir.Nod(ir.ODCLFUNC, nil, nil)
func renameinit() *types.Sym {
s := lookupN("init.", renameinitgen)
@@ -30,7 +33,7 @@ func renameinit() *types.Sym {
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
-func fninit(n []*Node) {
+func fninit(n []ir.Node) {
nf := initOrder(n)
var deps []*obj.LSym // initTask records for packages the current package depends on
@@ -43,16 +46,16 @@ func fninit(n []*Node) {
// Make a function that contains all the initialization statements.
if len(nf) > 0 {
- lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
+ base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
- fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
- for _, dcl := range dummyInitFn.Func.Dcl {
- dcl.Name.Curfn = fn
+ fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil))
+ for _, dcl := range initTodo.Func().Dcl {
+ dcl.Name().Curfn = fn
}
- fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...)
- dummyInitFn.Func.Dcl = nil
+ fn.Func().Dcl = append(fn.Func().Dcl, initTodo.Func().Dcl...)
+ initTodo.Func().Dcl = nil
- fn.Nbody.Set(nf)
+ fn.PtrBody().Set(nf)
funcbody()
fn = typecheck(fn, ctxStmt)
@@ -62,35 +65,35 @@ func fninit(n []*Node) {
xtop = append(xtop, fn)
fns = append(fns, initializers.Linksym())
}
- if dummyInitFn.Func.Dcl != nil {
- // We only generate temps using dummyInitFn if there
+ if initTodo.Func().Dcl != nil {
+ // We only generate temps using initTodo if there
// are package-scope initialization statements, so
// something's weird if we get here.
- Fatalf("dummyInitFn still has declarations")
+ base.Fatalf("initTodo still has declarations")
}
- dummyInitFn = nil
+ initTodo = nil
// Record user init functions.
for i := 0; i < renameinitgen; i++ {
s := lookupN("init.", i)
- fn := asNode(s.Def).Name.Defn
+ fn := ir.AsNode(s.Def).Name().Defn
// Skip init functions with empty bodies.
- if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
+ if fn.Body().Len() == 1 && fn.Body().First().Op() == ir.OEMPTY {
continue
}
fns = append(fns, s.Linksym())
}
- if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" {
+ if len(deps) == 0 && len(fns) == 0 && ir.LocalPkg.Name != "main" && ir.LocalPkg.Name != "runtime" {
return // nothing to initialize
}
// Make an .inittask structure.
sym := lookup(".inittask")
- nn := newname(sym)
- nn.Type = types.Types[TUINT8] // dummy type
- nn.SetClass(PEXTERN)
- sym.Def = asTypesNode(nn)
+ nn := NewName(sym)
+ nn.SetType(types.Types[types.TUINT8]) // fake type
+ nn.SetClass(ir.PEXTERN)
+ sym.Def = nn
exportsym(nn)
lsym := sym.Linksym()
ot := 0
diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go
index 41f1349bbe..1003f131b8 100644
--- a/src/cmd/compile/internal/gc/initorder.go
+++ b/src/cmd/compile/internal/gc/initorder.go
@@ -8,6 +8,10 @@ import (
"bytes"
"container/heap"
"fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
)
// Package initialization
@@ -60,7 +64,7 @@ const (
type InitOrder struct {
// blocking maps initialization assignments to the assignments
// that depend on it.
- blocking map[*Node][]*Node
+ blocking map[ir.Node][]ir.Node
// ready is the queue of Pending initialization assignments
// that are ready for initialization.
@@ -71,45 +75,43 @@ type InitOrder struct {
// package-level declarations (in declaration order) and outputs the
// corresponding list of statements to include in the init() function
// body.
-func initOrder(l []*Node) []*Node {
+func initOrder(l []ir.Node) []ir.Node {
s := InitSchedule{
- initplans: make(map[*Node]*InitPlan),
- inittemps: make(map[*Node]*Node),
+ initplans: make(map[ir.Node]*InitPlan),
+ inittemps: make(map[ir.Node]ir.Node),
}
o := InitOrder{
- blocking: make(map[*Node][]*Node),
+ blocking: make(map[ir.Node][]ir.Node),
}
// Process all package-level assignment in declaration order.
for _, n := range l {
- switch n.Op {
- case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ switch n.Op() {
+ case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
o.processAssign(n)
o.flushReady(s.staticInit)
- case ODCLCONST, ODCLFUNC, ODCLTYPE:
+ case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
// nop
default:
- Fatalf("unexpected package-level statement: %v", n)
+ base.Fatalf("unexpected package-level statement: %v", n)
}
}
// Check that all assignments are now Done; if not, there must
// have been a dependency cycle.
for _, n := range l {
- switch n.Op {
- case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ switch n.Op() {
+ case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
if n.Initorder() != InitDone {
// If there have already been errors
// printed, those errors may have
// confused us and there might not be
// a loop. Let the user fix those
// first.
- if nerrors > 0 {
- errorexit()
- }
+ base.ExitIfErrors()
- findInitLoopAndExit(firstLHS(n), new([]*Node))
- Fatalf("initialization unfinished, but failed to identify loop")
+ findInitLoopAndExit(firstLHS(n), new([]ir.Node))
+ base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
}
@@ -117,34 +119,34 @@ func initOrder(l []*Node) []*Node {
// Invariant consistency check. If this is non-zero, then we
// should have found a cycle above.
if len(o.blocking) != 0 {
- Fatalf("expected empty map: %v", o.blocking)
+ base.Fatalf("expected empty map: %v", o.blocking)
}
return s.out
}
-func (o *InitOrder) processAssign(n *Node) {
- if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH {
- Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+func (o *InitOrder) processAssign(n ir.Node) {
+ if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH {
+ base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
n.SetInitorder(InitPending)
- n.Xoffset = 0
+ n.SetOffset(0)
// Compute number of variable dependencies and build the
// inverse dependency ("blocking") graph.
for dep := range collectDeps(n, true) {
- defn := dep.Name.Defn
+ defn := dep.Name().Defn
// Skip dependencies on functions (PFUNC) and
// variables already initialized (InitDone).
- if dep.Class() != PEXTERN || defn.Initorder() == InitDone {
+ if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone {
continue
}
- n.Xoffset++
+ n.SetOffset(n.Offset() + 1)
o.blocking[defn] = append(o.blocking[defn], n)
}
- if n.Xoffset == 0 {
+ if n.Offset() == 0 {
heap.Push(&o.ready, n)
}
}
@@ -152,23 +154,23 @@ func (o *InitOrder) processAssign(n *Node) {
// flushReady repeatedly applies initialize to the earliest (in
// declaration order) assignment ready for initialization and updates
// the inverse dependency ("blocking") graph.
-func (o *InitOrder) flushReady(initialize func(*Node)) {
+func (o *InitOrder) flushReady(initialize func(ir.Node)) {
for o.ready.Len() != 0 {
- n := heap.Pop(&o.ready).(*Node)
- if n.Initorder() != InitPending || n.Xoffset != 0 {
- Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+ n := heap.Pop(&o.ready).(ir.Node)
+ if n.Initorder() != InitPending || n.Offset() != 0 {
+ base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
initialize(n)
n.SetInitorder(InitDone)
- n.Xoffset = BADWIDTH
+ n.SetOffset(types.BADWIDTH)
blocked := o.blocking[n]
delete(o.blocking, n)
for _, m := range blocked {
- m.Xoffset--
- if m.Xoffset == 0 {
+ m.SetOffset(m.Offset() - 1)
+ if m.Offset() == 0 {
heap.Push(&o.ready, m)
}
}
@@ -181,7 +183,7 @@ func (o *InitOrder) flushReady(initialize func(*Node)) {
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
-func findInitLoopAndExit(n *Node, path *[]*Node) {
+func findInitLoopAndExit(n ir.Node, path *[]ir.Node) {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but initialization cycles are rare.
@@ -194,14 +196,14 @@ func findInitLoopAndExit(n *Node, path *[]*Node) {
// There might be multiple loops involving n; by sorting
// references, we deterministically pick the one reported.
- refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *Node) bool {
- return ni.Pos.Before(nj.Pos)
+ refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj ir.Node) bool {
+ return ni.Pos().Before(nj.Pos())
})
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
- if ref.Class() == PEXTERN && ref.Name.Defn.Initorder() == InitDone {
+ if ref.Class() == ir.PEXTERN && ref.Name().Defn.Initorder() == InitDone {
continue
}
@@ -213,12 +215,12 @@ func findInitLoopAndExit(n *Node, path *[]*Node) {
// reportInitLoopAndExit reports and initialization loop as an error
// and exits. However, if l is not actually an initialization loop, it
// simply returns instead.
-func reportInitLoopAndExit(l []*Node) {
+func reportInitLoopAndExit(l []ir.Node) {
// Rotate loop so that the earliest variable declaration is at
// the start.
i := -1
for j, n := range l {
- if n.Class() == PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) {
+ if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
i = j
}
}
@@ -236,61 +238,60 @@ func reportInitLoopAndExit(l []*Node) {
var msg bytes.Buffer
fmt.Fprintf(&msg, "initialization loop:\n")
for _, n := range l {
- fmt.Fprintf(&msg, "\t%v: %v refers to\n", n.Line(), n)
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
}
- fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0])
+ fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
- yyerrorl(l[0].Pos, msg.String())
- errorexit()
+ base.ErrorfAt(l[0].Pos(), msg.String())
+ base.ErrorExit()
}
// collectDeps returns all of the package-level functions and
// variables that declaration n depends on. If transitive is true,
// then it also includes the transitive dependencies of any depended
// upon functions (but not variables).
-func collectDeps(n *Node, transitive bool) NodeSet {
+func collectDeps(n ir.Node, transitive bool) ir.NodeSet {
d := initDeps{transitive: transitive}
- switch n.Op {
- case OAS:
- d.inspect(n.Right)
- case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- d.inspect(n.Right)
- case ODCLFUNC:
- d.inspectList(n.Nbody)
+ switch n.Op() {
+ case ir.OAS:
+ d.inspect(n.Right())
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ d.inspect(n.Right())
+ case ir.ODCLFUNC:
+ d.inspectList(n.Body())
default:
- Fatalf("unexpected Op: %v", n.Op)
+ base.Fatalf("unexpected Op: %v", n.Op())
}
return d.seen
}
type initDeps struct {
transitive bool
- seen NodeSet
+ seen ir.NodeSet
}
-func (d *initDeps) inspect(n *Node) { inspect(n, d.visit) }
-func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) }
+func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) }
+func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) }
// visit calls foundDep on any package-level functions or variables
// referenced by n, if any.
-func (d *initDeps) visit(n *Node) bool {
- switch n.Op {
- case ONAME:
- if n.isMethodExpression() {
- d.foundDep(asNode(n.Type.FuncType().Nname))
- return false
- }
+func (d *initDeps) visit(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OMETHEXPR:
+ d.foundDep(methodExprName(n))
+ return false
+ case ir.ONAME:
switch n.Class() {
- case PEXTERN, PFUNC:
+ case ir.PEXTERN, ir.PFUNC:
d.foundDep(n)
}
- case OCLOSURE:
- d.inspectList(n.Func.Closure.Nbody)
+ case ir.OCLOSURE:
+ d.inspectList(n.Func().Decl.Body())
- case ODOTMETH, OCALLPART:
- d.foundDep(asNode(n.Type.FuncType().Nname))
+ case ir.ODOTMETH, ir.OCALLPART:
+ d.foundDep(methodExprName(n))
}
return true
@@ -298,7 +299,7 @@ func (d *initDeps) visit(n *Node) bool {
// foundDep records that we've found a dependency on n by adding it to
// seen.
-func (d *initDeps) foundDep(n *Node) {
+func (d *initDeps) foundDep(n ir.Node) {
// Can happen with method expressions involving interface
// types; e.g., fixedbugs/issue4495.go.
if n == nil {
@@ -307,7 +308,7 @@ func (d *initDeps) foundDep(n *Node) {
// Names without definitions aren't interesting as far as
// initialization ordering goes.
- if n.Name.Defn == nil {
+ if n.Name().Defn == nil {
return
}
@@ -315,8 +316,8 @@ func (d *initDeps) foundDep(n *Node) {
return
}
d.seen.Add(n)
- if d.transitive && n.Class() == PFUNC {
- d.inspectList(n.Name.Defn.Nbody)
+ if d.transitive && n.Class() == ir.PFUNC {
+ d.inspectList(n.Name().Defn.Body())
}
}
@@ -327,13 +328,15 @@ func (d *initDeps) foundDep(n *Node) {
// an OAS node's Pos may not be unique. For example, given the
// declaration "var a, b = f(), g()", "a" must be ordered before "b",
// but both OAS nodes use the "=" token's position as their Pos.
-type declOrder []*Node
+type declOrder []ir.Node
-func (s declOrder) Len() int { return len(s) }
-func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) }
-func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s declOrder) Len() int { return len(s) }
+func (s declOrder) Less(i, j int) bool {
+ return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
+}
+func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*Node)) }
+func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
func (s *declOrder) Pop() interface{} {
n := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
@@ -342,14 +345,14 @@ func (s *declOrder) Pop() interface{} {
// firstLHS returns the first expression on the left-hand side of
// assignment n.
-func firstLHS(n *Node) *Node {
- switch n.Op {
- case OAS:
- return n.Left
- case OAS2DOTTYPE, OAS2FUNC, OAS2RECV, OAS2MAPR:
- return n.List.First()
+func firstLHS(n ir.Node) ir.Node {
+ switch n.Op() {
+ case ir.OAS:
+ return n.Left()
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
+ return n.List().First()
}
- Fatalf("unexpected Op: %v", n.Op)
+ base.Fatalf("unexpected Op: %v", n.Op())
return nil
}
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index 419056985f..6310762c1f 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -27,11 +27,14 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
+ "go/constant"
"strings"
)
@@ -50,27 +53,27 @@ const (
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *Node) *types.Pkg {
- if fn.IsMethod() {
+func fnpkg(fn ir.Node) *types.Pkg {
+ if ir.IsMethod(fn) {
// method
- rcvr := fn.Type.Recv().Type
+ rcvr := fn.Type().Recv().Type
if rcvr.IsPtr() {
rcvr = rcvr.Elem()
}
if rcvr.Sym == nil {
- Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr)
+ base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
}
return rcvr.Sym.Pkg
}
// non-method
- return fn.Sym.Pkg
+ return fn.Sym().Pkg
}
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
-func typecheckinl(fn *Node) {
+func typecheckinl(fn ir.Node) {
lno := setlineno(fn)
expandInline(fn)
@@ -81,17 +84,17 @@ func typecheckinl(fn *Node) {
// the ->inl of a local function has been typechecked before caninl copied it.
pkg := fnpkg(fn)
- if pkg == localpkg || pkg == nil {
+ if pkg == ir.LocalPkg || pkg == nil {
return // typecheckinl on local function
}
- if Debug.m > 2 || Debug_export != 0 {
- fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
+ if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
+ fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Func().Inl.Body))
}
savefn := Curfn
Curfn = fn
- typecheckslice(fn.Func.Inl.Body, ctxStmt)
+ typecheckslice(fn.Func().Inl.Body, ctxStmt)
Curfn = savefn
// During expandInline (which imports fn.Func.Inl.Body),
@@ -99,65 +102,65 @@ func typecheckinl(fn *Node) {
// to fn.Func.Inl.Dcl for consistency with how local functions
// behave. (Append because typecheckinl may be called multiple
// times.)
- fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...)
- fn.Func.Dcl = nil
+ fn.Func().Inl.Dcl = append(fn.Func().Inl.Dcl, fn.Func().Dcl...)
+ fn.Func().Dcl = nil
- lineno = lno
+ base.Pos = lno
}
// Caninl determines whether fn is inlineable.
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
-func caninl(fn *Node) {
- if fn.Op != ODCLFUNC {
- Fatalf("caninl %v", fn)
+func caninl(fn ir.Node) {
+ if fn.Op() != ir.ODCLFUNC {
+ base.Fatalf("caninl %v", fn)
}
- if fn.Func.Nname == nil {
- Fatalf("caninl no nname %+v", fn)
+ if fn.Func().Nname == nil {
+ base.Fatalf("caninl no nname %+v", fn)
}
var reason string // reason, if any, that the function was not inlined
- if Debug.m > 1 || logopt.Enabled() {
+ if base.Flag.LowerM > 1 || logopt.Enabled() {
defer func() {
if reason != "" {
- if Debug.m > 1 {
- fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func().Nname, reason)
}
if logopt.Enabled() {
- logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", fn.funcname(), reason)
+ logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
}
}
}()
}
// If marked "go:noinline", don't inline
- if fn.Func.Pragma&Noinline != 0 {
+ if fn.Func().Pragma&ir.Noinline != 0 {
reason = "marked go:noinline"
return
}
// If marked "go:norace" and -race compilation, don't inline.
- if flag_race && fn.Func.Pragma&Norace != 0 {
+ if base.Flag.Race && fn.Func().Pragma&ir.Norace != 0 {
reason = "marked go:norace with -race compilation"
return
}
// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
- if Debug_checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 {
+ if base.Debug.Checkptr != 0 && fn.Func().Pragma&ir.NoCheckPtr != 0 {
reason = "marked go:nocheckptr"
return
}
// If marked "go:cgo_unsafe_args", don't inline, since the
// function makes assumptions about its argument frame layout.
- if fn.Func.Pragma&CgoUnsafeArgs != 0 {
+ if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 {
reason = "marked go:cgo_unsafe_args"
return
}
// If marked as "go:uintptrescapes", don't inline, since the
// escape information is lost during inlining.
- if fn.Func.Pragma&UintptrEscapes != 0 {
+ if fn.Func().Pragma&ir.UintptrEscapes != 0 {
reason = "marked as having an escaping uintptr argument"
return
}
@@ -166,29 +169,29 @@ func caninl(fn *Node) {
// granularity, so inlining yeswritebarrierrec functions can
// confuse it (#22342). As a workaround, disallow inlining
// them for now.
- if fn.Func.Pragma&Yeswritebarrierrec != 0 {
+ if fn.Func().Pragma&ir.Yeswritebarrierrec != 0 {
reason = "marked go:yeswritebarrierrec"
return
}
// If fn has no body (is defined outside of Go), cannot inline it.
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
reason = "no function body"
return
}
if fn.Typecheck() == 0 {
- Fatalf("caninl on non-typechecked function %v", fn)
+ base.Fatalf("caninl on non-typechecked function %v", fn)
}
- n := fn.Func.Nname
- if n.Func.InlinabilityChecked() {
+ n := fn.Func().Nname
+ if n.Func().InlinabilityChecked() {
return
}
- defer n.Func.SetInlinabilityChecked(true)
+ defer n.Func().SetInlinabilityChecked(true)
cc := int32(inlineExtraCallCost)
- if Debug.l == 4 {
+ if base.Flag.LowerL == 4 {
cc = 1 // this appears to yield better performance than 0.
}
@@ -204,9 +207,9 @@ func caninl(fn *Node) {
visitor := hairyVisitor{
budget: inlineMaxBudget,
extraCallCost: cc,
- usedLocals: make(map[*Node]bool),
+ usedLocals: make(map[ir.Node]bool),
}
- if visitor.visitList(fn.Nbody) {
+ if visitor.visitList(fn.Body()) {
reason = visitor.reason
return
}
@@ -215,82 +218,77 @@ func caninl(fn *Node) {
return
}
- n.Func.Inl = &Inline{
+ n.Func().Inl = &ir.Inline{
Cost: inlineMaxBudget - visitor.budget,
- Dcl: inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)),
- Body: inlcopylist(fn.Nbody.Slice()),
+ Dcl: inlcopylist(pruneUnusedAutos(n.Name().Defn.Func().Dcl, &visitor)),
+ Body: inlcopylist(fn.Body().Slice()),
}
- // hack, TODO, check for better way to link method nodes back to the thing with the ->inl
- // this is so export can find the body of a method
- fn.Type.FuncType().Nname = asTypesNode(n)
-
- if Debug.m > 1 {
- fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body))
- } else if Debug.m != 0 {
- fmt.Printf("%v: can inline %v\n", fn.Line(), n)
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body))
+ } else if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
if logopt.Enabled() {
- logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", fn.funcname(), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
+ logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
}
}
// inlFlood marks n's inline body for export and recursively ensures
// all called functions are marked too.
-func inlFlood(n *Node) {
+func inlFlood(n ir.Node) {
if n == nil {
return
}
- if n.Op != ONAME || n.Class() != PFUNC {
- Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class())
+ if n.Op() != ir.ONAME || n.Class() != ir.PFUNC {
+ base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class())
}
- if n.Func == nil {
- Fatalf("inlFlood: missing Func on %v", n)
+ if n.Func() == nil {
+ base.Fatalf("inlFlood: missing Func on %v", n)
}
- if n.Func.Inl == nil {
+ if n.Func().Inl == nil {
return
}
- if n.Func.ExportInline() {
+ if n.Func().ExportInline() {
return
}
- n.Func.SetExportInline(true)
+ n.Func().SetExportInline(true)
typecheckinl(n)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
- inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool {
- switch n.Op {
- case ONAME:
+ ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OMETHEXPR:
+ inlFlood(methodExprName(n))
+
+ case ir.ONAME:
switch n.Class() {
- case PFUNC:
- if n.isMethodExpression() {
- inlFlood(asNode(n.Type.Nname()))
- } else {
- inlFlood(n)
- exportsym(n)
- }
- case PEXTERN:
+ case ir.PFUNC:
+ inlFlood(n)
+ exportsym(n)
+ case ir.PEXTERN:
exportsym(n)
}
- case ODOTMETH:
- fn := asNode(n.Type.Nname())
+ case ir.ODOTMETH:
+ fn := methodExprName(n)
inlFlood(fn)
- case OCALLPART:
+ case ir.OCALLPART:
// Okay, because we don't yet inline indirect
// calls to method values.
- case OCLOSURE:
+ case ir.OCLOSURE:
// If the closure is inlinable, we'll need to
// flood it too. But today we don't support
// inlining functions that contain closures.
//
// When we do, we'll probably want:
// inlFlood(n.Func.Closure.Func.Nname)
- Fatalf("unexpected closure in inlinable function")
+ base.Fatalf("unexpected closure in inlinable function")
}
return true
})
@@ -302,11 +300,11 @@ type hairyVisitor struct {
budget int32
reason string
extraCallCost int32
- usedLocals map[*Node]bool
+ usedLocals map[ir.Node]bool
}
// Look for anything we want to punt on.
-func (v *hairyVisitor) visitList(ll Nodes) bool {
+func (v *hairyVisitor) visitList(ll ir.Nodes) bool {
for _, n := range ll.Slice() {
if v.visit(n) {
return true
@@ -315,20 +313,20 @@ func (v *hairyVisitor) visitList(ll Nodes) bool {
return false
}
-func (v *hairyVisitor) visit(n *Node) bool {
+func (v *hairyVisitor) visit(n ir.Node) bool {
if n == nil {
return false
}
- switch n.Op {
+ switch n.Op() {
// Call is okay if inlinable and we have the budget for the body.
- case OCALLFUNC:
+ case ir.OCALLFUNC:
// Functions that call runtime.getcaller{pc,sp} can not be inlined
// because getcaller{pc,sp} expect a pointer to the caller's first argument.
//
// runtime.throw is a "cheap call" like panic in normal code.
- if n.Left.Op == ONAME && n.Left.Class() == PFUNC && isRuntimePkg(n.Left.Sym.Pkg) {
- fn := n.Left.Sym.Name
+ if n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC && isRuntimePkg(n.Left().Sym().Pkg) {
+ fn := n.Left().Sym().Name
if fn == "getcallerpc" || fn == "getcallersp" {
v.reason = "call to " + fn
return true
@@ -344,8 +342,8 @@ func (v *hairyVisitor) visit(n *Node) bool {
break
}
- if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
- v.budget -= fn.Func.Inl.Cost
+ if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil {
+ v.budget -= fn.Func().Inl.Cost
break
}
@@ -353,16 +351,13 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget -= v.extraCallCost
// Call is okay if inlinable and we have the budget for the body.
- case OCALLMETH:
- t := n.Left.Type
+ case ir.OCALLMETH:
+ t := n.Left().Type()
if t == nil {
- Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
- }
- if t.Nname() == nil {
- Fatalf("no function definition for [%p] %+v\n", t, t)
+ base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
}
- if isRuntimePkg(n.Left.Sym.Pkg) {
- fn := n.Left.Sym.Name
+ if isRuntimePkg(n.Left().Sym().Pkg) {
+ fn := n.Left().Sym().Name
if fn == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
@@ -372,7 +367,7 @@ func (v *hairyVisitor) visit(n *Node) bool {
break
}
}
- if inlfn := asNode(t.FuncType().Nname).Func; inlfn.Inl != nil {
+ if inlfn := methodExprName(n.Left()).Func(); inlfn.Inl != nil {
v.budget -= inlfn.Inl.Cost
break
}
@@ -380,58 +375,58 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget -= v.extraCallCost
// Things that are too hairy, irrespective of the budget
- case OCALL, OCALLINTER:
+ case ir.OCALL, ir.OCALLINTER:
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
- case OPANIC:
+ case ir.OPANIC:
v.budget -= inlineExtraPanicCost
- case ORECOVER:
+ case ir.ORECOVER:
// recover matches the argument frame pointer to find
// the right panic value, so it needs an argument frame.
v.reason = "call to recover"
return true
- case OCLOSURE,
- ORANGE,
- OSELECT,
- OGO,
- ODEFER,
- ODCLTYPE, // can't print yet
- ORETJMP:
- v.reason = "unhandled op " + n.Op.String()
+ case ir.OCLOSURE,
+ ir.ORANGE,
+ ir.OSELECT,
+ ir.OGO,
+ ir.ODEFER,
+ ir.ODCLTYPE, // can't print yet
+ ir.ORETJMP:
+ v.reason = "unhandled op " + n.Op().String()
return true
- case OAPPEND:
+ case ir.OAPPEND:
v.budget -= inlineExtraAppendCost
- case ODCLCONST, OEMPTY, OFALL:
+ case ir.ODCLCONST, ir.OEMPTY, ir.OFALL:
// These nodes don't produce code; omit from inlining budget.
return false
- case OLABEL:
+ case ir.OLABEL:
// TODO(mdempsky): Add support for inlining labeled control statements.
- if n.labeledControl() != nil {
+ if labeledControl(n) != nil {
v.reason = "labeled control"
return true
}
- case OBREAK, OCONTINUE:
- if n.Sym != nil {
+ case ir.OBREAK, ir.OCONTINUE:
+ if n.Sym() != nil {
// Should have short-circuited due to labeledControl above.
- Fatalf("unexpected labeled break/continue: %v", n)
+ base.Fatalf("unexpected labeled break/continue: %v", n)
}
- case OIF:
- if Isconst(n.Left, CTBOOL) {
+ case ir.OIF:
+ if ir.IsConst(n.Left(), constant.Bool) {
// This if and the condition cost nothing.
- return v.visitList(n.Ninit) || v.visitList(n.Nbody) ||
- v.visitList(n.Rlist)
+ return v.visitList(n.Init()) || v.visitList(n.Body()) ||
+ v.visitList(n.Rlist())
}
- case ONAME:
- if n.Class() == PAUTO {
+ case ir.ONAME:
+ if n.Class() == ir.PAUTO {
v.usedLocals[n] = true
}
@@ -440,67 +435,67 @@ func (v *hairyVisitor) visit(n *Node) bool {
v.budget--
// When debugging, don't stop early, to get full cost of inlining this function
- if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() {
+ if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
return true
}
- return v.visit(n.Left) || v.visit(n.Right) ||
- v.visitList(n.List) || v.visitList(n.Rlist) ||
- v.visitList(n.Ninit) || v.visitList(n.Nbody)
+ return v.visit(n.Left()) || v.visit(n.Right()) ||
+ v.visitList(n.List()) || v.visitList(n.Rlist()) ||
+ v.visitList(n.Init()) || v.visitList(n.Body())
}
// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
// the body and dcls of an inlineable function.
-func inlcopylist(ll []*Node) []*Node {
- s := make([]*Node, 0, len(ll))
+func inlcopylist(ll []ir.Node) []ir.Node {
+ s := make([]ir.Node, 0, len(ll))
for _, n := range ll {
s = append(s, inlcopy(n))
}
return s
}
-func inlcopy(n *Node) *Node {
+func inlcopy(n ir.Node) ir.Node {
if n == nil {
return nil
}
- switch n.Op {
- case ONAME, OTYPE, OLITERAL:
+ switch n.Op() {
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
return n
}
- m := n.copy()
- if n.Op != OCALLPART && m.Func != nil {
- Fatalf("unexpected Func: %v", m)
+ m := ir.Copy(n)
+ if n.Op() != ir.OCALLPART && m.Func() != nil {
+ base.Fatalf("unexpected Func: %v", m)
}
- m.Left = inlcopy(n.Left)
- m.Right = inlcopy(n.Right)
- m.List.Set(inlcopylist(n.List.Slice()))
- m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
- m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
- m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
+ m.SetLeft(inlcopy(n.Left()))
+ m.SetRight(inlcopy(n.Right()))
+ m.PtrList().Set(inlcopylist(n.List().Slice()))
+ m.PtrRlist().Set(inlcopylist(n.Rlist().Slice()))
+ m.PtrInit().Set(inlcopylist(n.Init().Slice()))
+ m.PtrBody().Set(inlcopylist(n.Body().Slice()))
return m
}
-func countNodes(n *Node) int {
+func countNodes(n ir.Node) int {
if n == nil {
return 0
}
cnt := 1
- cnt += countNodes(n.Left)
- cnt += countNodes(n.Right)
- for _, n1 := range n.Ninit.Slice() {
+ cnt += countNodes(n.Left())
+ cnt += countNodes(n.Right())
+ for _, n1 := range n.Init().Slice() {
cnt += countNodes(n1)
}
- for _, n1 := range n.Nbody.Slice() {
+ for _, n1 := range n.Body().Slice() {
cnt += countNodes(n1)
}
- for _, n1 := range n.List.Slice() {
+ for _, n1 := range n.List().Slice() {
cnt += countNodes(n1)
}
- for _, n1 := range n.Rlist.Slice() {
+ for _, n1 := range n.Rlist().Slice() {
cnt += countNodes(n1)
}
return cnt
@@ -508,7 +503,7 @@ func countNodes(n *Node) int {
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn *Node) {
+func inlcalls(fn ir.Node) {
savefn := Curfn
Curfn = fn
maxCost := int32(inlineMaxBudget)
@@ -521,31 +516,31 @@ func inlcalls(fn *Node) {
// but allow inlining if there is a recursion cycle of many functions.
// Most likely, the inlining will stop before we even hit the beginning of
// the cycle again, but the map catches the unusual case.
- inlMap := make(map[*Node]bool)
+ inlMap := make(map[ir.Node]bool)
fn = inlnode(fn, maxCost, inlMap)
if fn != Curfn {
- Fatalf("inlnode replaced curfn")
+ base.Fatalf("inlnode replaced curfn")
}
Curfn = savefn
}
// Turn an OINLCALL into a statement.
-func inlconv2stmt(n *Node) {
- n.Op = OBLOCK
+func inlconv2stmt(n ir.Node) {
+ n.SetOp(ir.OBLOCK)
// n->ninit stays
- n.List.Set(n.Nbody.Slice())
+ n.PtrList().Set(n.Body().Slice())
- n.Nbody.Set(nil)
- n.Rlist.Set(nil)
+ n.PtrBody().Set(nil)
+ n.PtrRlist().Set(nil)
}
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
-func inlconv2expr(n *Node) *Node {
- r := n.Rlist.First()
- return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
+func inlconv2expr(n ir.Node) ir.Node {
+ r := n.Rlist().First()
+ return addinit(r, append(n.Init().Slice(), n.Body().Slice()...))
}
// Turn the rlist (with the return values) of the OINLCALL in
@@ -553,17 +548,17 @@ func inlconv2expr(n *Node) *Node {
// containing the inlined statements on the first list element so
// order will be preserved Used in return, oas2func and call
// statements.
-func inlconv2list(n *Node) []*Node {
- if n.Op != OINLCALL || n.Rlist.Len() == 0 {
- Fatalf("inlconv2list %+v\n", n)
+func inlconv2list(n ir.Node) []ir.Node {
+ if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 {
+ base.Fatalf("inlconv2list %+v\n", n)
}
- s := n.Rlist.Slice()
- s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
+ s := n.Rlist().Slice()
+ s[0] = addinit(s[0], append(n.Init().Slice(), n.Body().Slice()...))
return s
}
-func inlnodelist(l Nodes, maxCost int32, inlMap map[*Node]bool) {
+func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[ir.Node]bool) {
s := l.Slice()
for i := range s {
s[i] = inlnode(s[i], maxCost, inlMap)
@@ -583,80 +578,80 @@ func inlnodelist(l Nodes, maxCost int32, inlMap map[*Node]bool) {
// shorter and less complicated.
// The result of inlnode MUST be assigned back to n, e.g.
// n.Left = inlnode(n.Left)
-func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
+func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
if n == nil {
return n
}
- switch n.Op {
- case ODEFER, OGO:
- switch n.Left.Op {
- case OCALLFUNC, OCALLMETH:
- n.Left.SetNoInline(true)
+ switch n.Op() {
+ case ir.ODEFER, ir.OGO:
+ switch n.Left().Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH:
+ n.Left().SetNoInline(true)
}
// TODO do them here (or earlier),
// so escape analysis can avoid more heapmoves.
- case OCLOSURE:
+ case ir.OCLOSURE:
return n
- case OCALLMETH:
+ case ir.OCALLMETH:
// Prevent inlining some reflect.Value methods when using checkptr,
// even when package reflect was compiled without it (#35073).
- if s := n.Left.Sym; Debug_checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ if s := n.Left().Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
return n
}
}
lno := setlineno(n)
- inlnodelist(n.Ninit, maxCost, inlMap)
- for _, n1 := range n.Ninit.Slice() {
- if n1.Op == OINLCALL {
+ inlnodelist(n.Init(), maxCost, inlMap)
+ for _, n1 := range n.Init().Slice() {
+ if n1.Op() == ir.OINLCALL {
inlconv2stmt(n1)
}
}
- n.Left = inlnode(n.Left, maxCost, inlMap)
- if n.Left != nil && n.Left.Op == OINLCALL {
- n.Left = inlconv2expr(n.Left)
+ n.SetLeft(inlnode(n.Left(), maxCost, inlMap))
+ if n.Left() != nil && n.Left().Op() == ir.OINLCALL {
+ n.SetLeft(inlconv2expr(n.Left()))
}
- n.Right = inlnode(n.Right, maxCost, inlMap)
- if n.Right != nil && n.Right.Op == OINLCALL {
- if n.Op == OFOR || n.Op == OFORUNTIL {
- inlconv2stmt(n.Right)
- } else if n.Op == OAS2FUNC {
- n.Rlist.Set(inlconv2list(n.Right))
- n.Right = nil
- n.Op = OAS2
+ n.SetRight(inlnode(n.Right(), maxCost, inlMap))
+ if n.Right() != nil && n.Right().Op() == ir.OINLCALL {
+ if n.Op() == ir.OFOR || n.Op() == ir.OFORUNTIL {
+ inlconv2stmt(n.Right())
+ } else if n.Op() == ir.OAS2FUNC {
+ n.PtrRlist().Set(inlconv2list(n.Right()))
+ n.SetRight(nil)
+ n.SetOp(ir.OAS2)
n.SetTypecheck(0)
n = typecheck(n, ctxStmt)
} else {
- n.Right = inlconv2expr(n.Right)
+ n.SetRight(inlconv2expr(n.Right()))
}
}
- inlnodelist(n.List, maxCost, inlMap)
- if n.Op == OBLOCK {
- for _, n2 := range n.List.Slice() {
- if n2.Op == OINLCALL {
+ inlnodelist(n.List(), maxCost, inlMap)
+ if n.Op() == ir.OBLOCK {
+ for _, n2 := range n.List().Slice() {
+ if n2.Op() == ir.OINLCALL {
inlconv2stmt(n2)
}
}
} else {
- s := n.List.Slice()
+ s := n.List().Slice()
for i1, n1 := range s {
- if n1 != nil && n1.Op == OINLCALL {
+ if n1 != nil && n1.Op() == ir.OINLCALL {
s[i1] = inlconv2expr(s[i1])
}
}
}
- inlnodelist(n.Rlist, maxCost, inlMap)
- s := n.Rlist.Slice()
+ inlnodelist(n.Rlist(), maxCost, inlMap)
+ s := n.Rlist().Slice()
for i1, n1 := range s {
- if n1.Op == OINLCALL {
- if n.Op == OIF {
+ if n1.Op() == ir.OINLCALL {
+ if n.Op() == ir.OIF {
inlconv2stmt(n1)
} else {
s[i1] = inlconv2expr(s[i1])
@@ -664,9 +659,9 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
}
- inlnodelist(n.Nbody, maxCost, inlMap)
- for _, n := range n.Nbody.Slice() {
- if n.Op == OINLCALL {
+ inlnodelist(n.Body(), maxCost, inlMap)
+ for _, n := range n.Body().Slice() {
+ if n.Op() == ir.OINLCALL {
inlconv2stmt(n)
}
}
@@ -674,75 +669,70 @@ func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// with all the branches out of the way, it is now time to
// transmogrify this node itself unless inhibited by the
// switch at the top of this function.
- switch n.Op {
- case OCALLFUNC, OCALLMETH:
+ switch n.Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH:
if n.NoInline() {
return n
}
}
- switch n.Op {
- case OCALLFUNC:
- if Debug.m > 3 {
- fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left())
}
if isIntrinsicCall(n) {
break
}
- if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil {
n = mkinlcall(n, fn, maxCost, inlMap)
}
- case OCALLMETH:
- if Debug.m > 3 {
- fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
+ case ir.OCALLMETH:
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left().Right())
}
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
- if n.Left.Type == nil {
- Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
+ if n.Left().Type() == nil {
+ base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
}
- if n.Left.Type.Nname() == nil {
- Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type)
- }
-
- n = mkinlcall(n, asNode(n.Left.Type.FuncType().Nname), maxCost, inlMap)
+ n = mkinlcall(n, methodExprName(n.Left()), maxCost, inlMap)
}
- lineno = lno
+ base.Pos = lno
return n
}
// inlCallee takes a function-typed expression and returns the underlying function ONAME
// that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn *Node) *Node {
+func inlCallee(fn ir.Node) ir.Node {
fn = staticValue(fn)
switch {
- case fn.Op == ONAME && fn.Class() == PFUNC:
- if fn.isMethodExpression() {
- n := asNode(fn.Type.Nname())
- // Check that receiver type matches fn.Left.
- // TODO(mdempsky): Handle implicit dereference
- // of pointer receiver argument?
- if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) {
- return nil
- }
- return n
+ case fn.Op() == ir.OMETHEXPR:
+ n := methodExprName(fn)
+ // Check that receiver type matches fn.Left.
+ // TODO(mdempsky): Handle implicit dereference
+ // of pointer receiver argument?
+ if n == nil || !types.Identical(n.Type().Recv().Type, fn.Left().Type()) {
+ return nil
}
+ return n
+ case fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC:
return fn
- case fn.Op == OCLOSURE:
- c := fn.Func.Closure
+ case fn.Op() == ir.OCLOSURE:
+ c := fn.Func().Decl
caninl(c)
- return c.Func.Nname
+ return c.Func().Nname
}
return nil
}
-func staticValue(n *Node) *Node {
+func staticValue(n ir.Node) ir.Node {
for {
- if n.Op == OCONVNOP {
- n = n.Left
+ if n.Op() == ir.OCONVNOP {
+ n = n.Left()
continue
}
@@ -757,34 +747,34 @@ func staticValue(n *Node) *Node {
// staticValue1 implements a simple SSA-like optimization. If n is a local variable
// that is initialized and never reassigned, staticValue1 returns the initializer
// expression. Otherwise, it returns nil.
-func staticValue1(n *Node) *Node {
- if n.Op != ONAME || n.Class() != PAUTO || n.Name.Addrtaken() {
+func staticValue1(n ir.Node) ir.Node {
+ if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() {
return nil
}
- defn := n.Name.Defn
+ defn := n.Name().Defn
if defn == nil {
return nil
}
- var rhs *Node
+ var rhs ir.Node
FindRHS:
- switch defn.Op {
- case OAS:
- rhs = defn.Right
- case OAS2:
- for i, lhs := range defn.List.Slice() {
+ switch defn.Op() {
+ case ir.OAS:
+ rhs = defn.Right()
+ case ir.OAS2:
+ for i, lhs := range defn.List().Slice() {
if lhs == n {
- rhs = defn.Rlist.Index(i)
+ rhs = defn.Rlist().Index(i)
break FindRHS
}
}
- Fatalf("%v missing from LHS of %v", n, defn)
+ base.Fatalf("%v missing from LHS of %v", n, defn)
default:
return nil
}
if rhs == nil {
- Fatalf("RHS is nil: %v", defn)
+ base.Fatalf("RHS is nil: %v", defn)
}
unsafe, _ := reassigned(n)
@@ -801,70 +791,70 @@ FindRHS:
// useful for -m output documenting the reason for inhibited optimizations.
// NB: global variables are always considered to be re-assigned.
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(n *Node) (bool, *Node) {
- if n.Op != ONAME {
- Fatalf("reassigned %v", n)
+func reassigned(n ir.Node) (bool, ir.Node) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("reassigned %v", n)
}
// no way to reliably check for no-reassignment of globals, assume it can be
- if n.Name.Curfn == nil {
+ if n.Name().Curfn == nil {
return true, nil
}
- f := n.Name.Curfn
+ f := n.Name().Curfn
// There just might be a good reason for this although this can be pretty surprising:
// local variables inside a closure have Curfn pointing to the OCLOSURE node instead
// of the corresponding ODCLFUNC.
// We need to walk the function body to check for reassignments so we follow the
// linkage to the ODCLFUNC node as that is where body is held.
- if f.Op == OCLOSURE {
- f = f.Func.Closure
+ if f.Op() == ir.OCLOSURE {
+ f = f.Func().Decl
}
v := reassignVisitor{name: n}
- a := v.visitList(f.Nbody)
+ a := v.visitList(f.Body())
return a != nil, a
}
type reassignVisitor struct {
- name *Node
+ name ir.Node
}
-func (v *reassignVisitor) visit(n *Node) *Node {
+func (v *reassignVisitor) visit(n ir.Node) ir.Node {
if n == nil {
return nil
}
- switch n.Op {
- case OAS:
- if n.Left == v.name && n != v.name.Name.Defn {
+ switch n.Op() {
+ case ir.OAS:
+ if n.Left() == v.name && n != v.name.Name().Defn {
return n
}
- case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE:
- for _, p := range n.List.Slice() {
- if p == v.name && n != v.name.Name.Defn {
+ case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE:
+ for _, p := range n.List().Slice() {
+ if p == v.name && n != v.name.Name().Defn {
return n
}
}
}
- if a := v.visit(n.Left); a != nil {
+ if a := v.visit(n.Left()); a != nil {
return a
}
- if a := v.visit(n.Right); a != nil {
+ if a := v.visit(n.Right()); a != nil {
return a
}
- if a := v.visitList(n.List); a != nil {
+ if a := v.visitList(n.List()); a != nil {
return a
}
- if a := v.visitList(n.Rlist); a != nil {
+ if a := v.visitList(n.Rlist()); a != nil {
return a
}
- if a := v.visitList(n.Ninit); a != nil {
+ if a := v.visitList(n.Init()); a != nil {
return a
}
- if a := v.visitList(n.Nbody); a != nil {
+ if a := v.visitList(n.Body()); a != nil {
return a
}
return nil
}
-func (v *reassignVisitor) visitList(l Nodes) *Node {
+func (v *reassignVisitor) visitList(l ir.Nodes) ir.Node {
for _, n := range l.Slice() {
if a := v.visit(n); a != nil {
return a
@@ -873,18 +863,18 @@ func (v *reassignVisitor) visitList(l Nodes) *Node {
return nil
}
-func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node {
- n := asNode(t.Nname)
- if n == nil || n.isBlank() {
- return nblank
+func inlParam(t *types.Field, as ir.Node, inlvars map[ir.Node]ir.Node) ir.Node {
+ n := ir.AsNode(t.Nname)
+ if n == nil || ir.IsBlank(n) {
+ return ir.BlankNode
}
inlvar := inlvars[n]
if inlvar == nil {
- Fatalf("missing inlvar for %v", n)
+ base.Fatalf("missing inlvar for %v", n)
}
- as.Ninit.Append(nod(ODCL, inlvar, nil))
- inlvar.Name.Defn = as
+ as.PtrInit().Append(ir.Nod(ir.ODCL, inlvar, nil))
+ inlvar.Name().Defn = as
return inlvar
}
@@ -897,33 +887,33 @@ var inlgen int
// parameters.
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
- if fn.Func.Inl == nil {
+func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
+ if fn.Func().Inl == nil {
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("%s cannot be inlined", fn.pkgFuncName()))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
}
return n
}
- if fn.Func.Inl.Cost > maxCost {
+ if fn.Func().Inl.Cost > maxCost {
// The inlined function body is too big. Typically we use this check to restrict
// inlining into very big functions. See issue 26546 and 17566.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
- fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, fn.pkgFuncName(), maxCost))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func().Inl.Cost, ir.PkgFuncName(fn), maxCost))
}
return n
}
- if fn == Curfn || fn.Name.Defn == Curfn {
+ if fn == Curfn || fn.Name().Defn == Curfn {
// Can't recursively inline a function into itself.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", Curfn.funcname()))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn)))
}
return n
}
- if instrumenting && isRuntimePkg(fn.Sym.Pkg) {
+ if instrumenting && isRuntimePkg(fn.Sym().Pkg) {
// Runtime package must not be instrumented.
// Instrument skips runtime package. However, some runtime code can be
// inlined into other packages and instrumented there. To avoid this,
@@ -934,8 +924,8 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
if inlMap[fn] {
- if Debug.m > 1 {
- fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname())
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(Curfn))
}
return n
}
@@ -943,115 +933,115 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
defer func() {
inlMap[fn] = false
}()
- if Debug_typecheckinl == 0 {
+ if base.Debug.TypecheckInl == 0 {
typecheckinl(fn)
}
// We have a function node, and it has an inlineable body.
- if Debug.m > 1 {
- fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
- } else if Debug.m != 0 {
- fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Func().Inl.Body))
+ } else if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
- if Debug.m > 2 {
- fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
- if ssaDump != "" && ssaDump == Curfn.funcname() {
+ if ssaDump != "" && ssaDump == ir.FuncName(Curfn) {
ssaDumpInlined = append(ssaDumpInlined, fn)
}
- ninit := n.Ninit
+ ninit := n.Init()
// For normal function calls, the function callee expression
// may contain side effects (e.g., added by addinit during
// inlconv2expr or inlconv2list). Make sure to preserve these,
// if necessary (#42703).
- if n.Op == OCALLFUNC {
- callee := n.Left
- for callee.Op == OCONVNOP {
- ninit.AppendNodes(&callee.Ninit)
- callee = callee.Left
+ if n.Op() == ir.OCALLFUNC {
+ callee := n.Left()
+ for callee.Op() == ir.OCONVNOP {
+ ninit.AppendNodes(callee.PtrInit())
+ callee = callee.Left()
}
- if callee.Op != ONAME && callee.Op != OCLOSURE {
- Fatalf("unexpected callee expression: %v", callee)
+ if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
+ base.Fatalf("unexpected callee expression: %v", callee)
}
}
// Make temp names to use instead of the originals.
- inlvars := make(map[*Node]*Node)
+ inlvars := make(map[ir.Node]ir.Node)
// record formals/locals for later post-processing
- var inlfvars []*Node
+ var inlfvars []ir.Node
// Handle captured variables when inlining closures.
- if fn.Name.Defn != nil {
- if c := fn.Name.Defn.Func.Closure; c != nil {
- for _, v := range c.Func.Closure.Func.Cvars.Slice() {
- if v.Op == OXXX {
+ if fn.Name().Defn != nil {
+ if c := fn.Name().Defn.Func().OClosure; c != nil {
+ for _, v := range c.Func().ClosureVars.Slice() {
+ if v.Op() == ir.OXXX {
continue
}
- o := v.Name.Param.Outer
+ o := v.Name().Param.Outer
// make sure the outer param matches the inlining location
// NB: if we enabled inlining of functions containing OCLOSURE or refined
// the reassigned check via some sort of copy propagation this would most
// likely need to be changed to a loop to walk up to the correct Param
- if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) {
- Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v)
+ if o == nil || (o.Name().Curfn != Curfn && o.Name().Curfn.Func().OClosure != Curfn) {
+ base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v)
}
- if v.Name.Byval() {
+ if v.Name().Byval() {
iv := typecheck(inlvar(v), ctxExpr)
- ninit.Append(nod(ODCL, iv, nil))
- ninit.Append(typecheck(nod(OAS, iv, o), ctxStmt))
+ ninit.Append(ir.Nod(ir.ODCL, iv, nil))
+ ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt))
inlvars[v] = iv
} else {
- addr := newname(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
ia := typecheck(inlvar(addr), ctxExpr)
- ninit.Append(nod(ODCL, ia, nil))
- ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), ctxStmt))
+ ninit.Append(ir.Nod(ir.ODCL, ia, nil))
+ ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt))
inlvars[addr] = ia
// When capturing by reference, all occurrence of the captured var
// must be substituted with dereference of the temporary address
- inlvars[v] = typecheck(nod(ODEREF, ia, nil), ctxExpr)
+ inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr)
}
}
}
}
- for _, ln := range fn.Func.Inl.Dcl {
- if ln.Op != ONAME {
+ for _, ln := range fn.Func().Inl.Dcl {
+ if ln.Op() != ir.ONAME {
continue
}
- if ln.Class() == PPARAMOUT { // return values handled below.
+ if ln.Class() == ir.PPARAMOUT { // return values handled below.
continue
}
- if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
+ if isParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap
// TODO(mdempsky): Remove once I'm confident
// this never actually happens. We currently
// perform inlining before escape analysis, so
// nothing should have moved to the heap yet.
- Fatalf("impossible: %v", ln)
+ base.Fatalf("impossible: %v", ln)
}
inlf := typecheck(inlvar(ln), ctxExpr)
inlvars[ln] = inlf
- if genDwarfInline > 0 {
- if ln.Class() == PPARAM {
- inlf.Name.SetInlFormal(true)
+ if base.Flag.GenDwarfInl > 0 {
+ if ln.Class() == ir.PPARAM {
+ inlf.Name().SetInlFormal(true)
} else {
- inlf.Name.SetInlLocal(true)
+ inlf.Name().SetInlLocal(true)
}
- inlf.Pos = ln.Pos
+ inlf.SetPos(ln.Pos())
inlfvars = append(inlfvars, inlf)
}
}
nreturns := 0
- inspectList(asNodes(fn.Func.Inl.Body), func(n *Node) bool {
- if n != nil && n.Op == ORETURN {
+ ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n ir.Node) bool {
+ if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
return true
@@ -1063,10 +1053,10 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
delayretvars := nreturns == 1
// temporaries for return values.
- var retvars []*Node
- for i, t := range fn.Type.Results().Fields().Slice() {
- var m *Node
- if n := asNode(t.Nname); n != nil && !n.isBlank() && !strings.HasPrefix(n.Sym.Name, "~r") {
+ var retvars []ir.Node
+ for i, t := range fn.Type().Results().Fields().Slice() {
+ var m ir.Node
+ if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") {
m = inlvar(n)
m = typecheck(m, ctxExpr)
inlvars[n] = m
@@ -1076,13 +1066,13 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
m = retvar(t, i)
}
- if genDwarfInline > 0 {
+ if base.Flag.GenDwarfInl > 0 {
// Don't update the src.Pos on a return variable if it
// was manufactured by the inliner (e.g. "~R2"); such vars
// were not part of the original callee.
- if !strings.HasPrefix(m.Sym.Name, "~R") {
- m.Name.SetInlFormal(true)
- m.Pos = t.Pos
+ if !strings.HasPrefix(m.Sym().Name, "~R") {
+ m.Name().SetInlFormal(true)
+ m.SetPos(t.Pos)
inlfvars = append(inlfvars, m)
}
}
@@ -1091,53 +1081,53 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
}
// Assign arguments to the parameters' temp names.
- as := nod(OAS2, nil, nil)
+ as := ir.Nod(ir.OAS2, nil, nil)
as.SetColas(true)
- if n.Op == OCALLMETH {
- if n.Left.Left == nil {
- Fatalf("method call without receiver: %+v", n)
+ if n.Op() == ir.OCALLMETH {
+ if n.Left().Left() == nil {
+ base.Fatalf("method call without receiver: %+v", n)
}
- as.Rlist.Append(n.Left.Left)
+ as.PtrRlist().Append(n.Left().Left())
}
- as.Rlist.Append(n.List.Slice()...)
+ as.PtrRlist().Append(n.List().Slice()...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
- var vas *Node
+ var vas ir.Node
- if recv := fn.Type.Recv(); recv != nil {
- as.List.Append(inlParam(recv, as, inlvars))
+ if recv := fn.Type().Recv(); recv != nil {
+ as.PtrList().Append(inlParam(recv, as, inlvars))
}
- for _, param := range fn.Type.Params().Fields().Slice() {
+ for _, param := range fn.Type().Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
if !param.IsDDD() || n.IsDDD() {
- as.List.Append(inlParam(param, as, inlvars))
+ as.PtrList().Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
- x := as.List.Len()
- for as.List.Len() < as.Rlist.Len() {
- as.List.Append(argvar(param.Type, as.List.Len()))
+ x := as.List().Len()
+ for as.List().Len() < as.Rlist().Len() {
+ as.PtrList().Append(argvar(param.Type, as.List().Len()))
}
- varargs := as.List.Slice()[x:]
+ varargs := as.List().Slice()[x:]
- vas = nod(OAS, nil, nil)
- vas.Left = inlParam(param, vas, inlvars)
+ vas = ir.Nod(ir.OAS, nil, nil)
+ vas.SetLeft(inlParam(param, vas, inlvars))
if len(varargs) == 0 {
- vas.Right = nodnil()
- vas.Right.Type = param.Type
+ vas.SetRight(nodnil())
+ vas.Right().SetType(param.Type)
} else {
- vas.Right = nod(OCOMPLIT, nil, typenod(param.Type))
- vas.Right.List.Set(varargs)
+ vas.SetRight(ir.Nod(ir.OCOMPLIT, nil, typenod(param.Type)))
+ vas.Right().PtrList().Set(varargs)
}
}
- if as.Rlist.Len() != 0 {
+ if as.Rlist().Len() != 0 {
as = typecheck(as, ctxStmt)
ninit.Append(as)
}
@@ -1150,8 +1140,8 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
if !delayretvars {
// Zero the return parameters.
for _, n := range retvars {
- ninit.Append(nod(ODCL, n, nil))
- ras := nod(OAS, n, nil)
+ ninit.Append(ir.Nod(ir.ODCL, n, nil))
+ ras := ir.Nod(ir.OAS, n, nil)
ras = typecheck(ras, ctxStmt)
ninit.Append(ras)
}
@@ -1162,25 +1152,25 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
inlgen++
parent := -1
- if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil {
+ if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
parent = b.InliningIndex()
}
- newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
+ newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), fn.Sym().Linksym())
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
- inlMark := nod(OINLMARK, nil, nil)
- inlMark.Pos = n.Pos.WithIsStmt()
- inlMark.Xoffset = int64(newIndex)
+ inlMark := ir.Nod(ir.OINLMARK, nil, nil)
+ inlMark.SetPos(n.Pos().WithIsStmt())
+ inlMark.SetOffset(int64(newIndex))
ninit.Append(inlMark)
- if genDwarfInline > 0 {
- if !fn.Sym.Linksym().WasInlined() {
- Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn)
- fn.Sym.Linksym().Set(obj.AttrWasInlined, true)
+ if base.Flag.GenDwarfInl > 0 {
+ if !fn.Sym().Linksym().WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym().Linksym(), fn)
+ fn.Sym().Linksym().Set(obj.AttrWasInlined, true)
}
}
@@ -1193,26 +1183,26 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
newInlIndex: newIndex,
}
- body := subst.list(asNodes(fn.Func.Inl.Body))
+ body := subst.list(ir.AsNodes(fn.Func().Inl.Body))
- lab := nodSym(OLABEL, nil, retlabel)
+ lab := nodSym(ir.OLABEL, nil, retlabel)
body = append(body, lab)
typecheckslice(body, ctxStmt)
- if genDwarfInline > 0 {
+ if base.Flag.GenDwarfInl > 0 {
for _, v := range inlfvars {
- v.Pos = subst.updatedPos(v.Pos)
+ v.SetPos(subst.updatedPos(v.Pos()))
}
}
//dumplist("ninit post", ninit);
- call := nod(OINLCALL, nil, nil)
- call.Ninit.Set(ninit.Slice())
- call.Nbody.Set(body)
- call.Rlist.Set(retvars)
- call.Type = n.Type
+ call := ir.Nod(ir.OINLCALL, nil, nil)
+ call.PtrInit().Set(ninit.Slice())
+ call.PtrBody().Set(body)
+ call.PtrRlist().Set(retvars)
+ call.SetType(n.Type())
call.SetTypecheck(1)
// transitive inlining
@@ -1221,15 +1211,15 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
- inlnodelist(call.Nbody, maxCost, inlMap)
- for _, n := range call.Nbody.Slice() {
- if n.Op == OINLCALL {
+ inlnodelist(call.Body(), maxCost, inlMap)
+ for _, n := range call.Body().Slice() {
+ if n.Op() == ir.OINLCALL {
inlconv2stmt(n)
}
}
- if Debug.m > 2 {
- fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
}
return call
@@ -1238,42 +1228,42 @@ func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
// Every time we expand a function we generate a new set of tmpnames,
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
-func inlvar(var_ *Node) *Node {
- if Debug.m > 3 {
+func inlvar(var_ ir.Node) ir.Node {
+ if base.Flag.LowerM > 3 {
fmt.Printf("inlvar %+v\n", var_)
}
- n := newname(var_.Sym)
- n.Type = var_.Type
- n.SetClass(PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- n.Name.SetAddrtaken(var_.Name.Addrtaken())
+ n := NewName(var_.Sym())
+ n.SetType(var_.Type())
+ n.SetClass(ir.PAUTO)
+ n.Name().SetUsed(true)
+ n.Name().Curfn = Curfn // the calling function, not the called one
+ n.Name().SetAddrtaken(var_.Name().Addrtaken())
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's results in.
-func retvar(t *types.Field, i int) *Node {
- n := newname(lookupN("~R", i))
- n.Type = t.Type
- n.SetClass(PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+func retvar(t *types.Field, i int) ir.Node {
+ n := NewName(lookupN("~R", i))
+ n.SetType(t.Type)
+ n.SetClass(ir.PAUTO)
+ n.Name().SetUsed(true)
+ n.Name().Curfn = Curfn // the calling function, not the called one
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
-func argvar(t *types.Type, i int) *Node {
- n := newname(lookupN("~arg", i))
- n.Type = t.Elem()
- n.SetClass(PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+func argvar(t *types.Type, i int) ir.Node {
+ n := NewName(lookupN("~arg", i))
+ n.SetType(t.Elem())
+ n.SetClass(ir.PAUTO)
+ n.Name().SetUsed(true)
+ n.Name().Curfn = Curfn // the calling function, not the called one
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
return n
}
@@ -1284,13 +1274,13 @@ type inlsubst struct {
retlabel *types.Sym
// Temporary result variables.
- retvars []*Node
+ retvars []ir.Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
- inlvars map[*Node]*Node
+ inlvars map[ir.Node]ir.Node
// bases maps from original PosBase to PosBase with an extra
// inlined call frame.
@@ -1302,8 +1292,8 @@ type inlsubst struct {
}
// list inlines a list of nodes.
-func (subst *inlsubst) list(ll Nodes) []*Node {
- s := make([]*Node, 0, ll.Len())
+func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
+ s := make([]ir.Node, 0, ll.Len())
for _, n := range ll.Slice() {
s = append(s, subst.node(n))
}
@@ -1314,98 +1304,101 @@ func (subst *inlsubst) list(ll Nodes) []*Node {
// inlined function, substituting references to input/output
// parameters with ones to the tmpnames, and substituting returns with
// assignments to the output.
-func (subst *inlsubst) node(n *Node) *Node {
+func (subst *inlsubst) node(n ir.Node) ir.Node {
if n == nil {
return nil
}
- switch n.Op {
- case ONAME:
+ switch n.Op() {
+ case ir.ONAME:
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
- if Debug.m > 2 {
+ if base.Flag.LowerM > 2 {
fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
}
return inlvar
}
- if Debug.m > 2 {
+ if base.Flag.LowerM > 2 {
fmt.Printf("not substituting name %+v\n", n)
}
return n
- case OLITERAL, OTYPE:
+ case ir.OMETHEXPR:
+ return n
+
+ case ir.OLITERAL, ir.ONIL, ir.OTYPE:
// If n is a named constant or type, we can continue
// using it in the inline copy. Otherwise, make a copy
// so we can update the line number.
- if n.Sym != nil {
+ if n.Sym() != nil {
return n
}
// Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
// dump("Return before substitution", n);
- case ORETURN:
- m := nodSym(OGOTO, nil, subst.retlabel)
- m.Ninit.Set(subst.list(n.Ninit))
+ case ir.ORETURN:
+ m := nodSym(ir.OGOTO, nil, subst.retlabel)
+ m.PtrInit().Set(subst.list(n.Init()))
- if len(subst.retvars) != 0 && n.List.Len() != 0 {
- as := nod(OAS2, nil, nil)
+ if len(subst.retvars) != 0 && n.List().Len() != 0 {
+ as := ir.Nod(ir.OAS2, nil, nil)
// Make a shallow copy of retvars.
// Otherwise OINLCALL.Rlist will be the same list,
// and later walk and typecheck may clobber it.
for _, n := range subst.retvars {
- as.List.Append(n)
+ as.PtrList().Append(n)
}
- as.Rlist.Set(subst.list(n.List))
+ as.PtrRlist().Set(subst.list(n.List()))
if subst.delayretvars {
- for _, n := range as.List.Slice() {
- as.Ninit.Append(nod(ODCL, n, nil))
- n.Name.Defn = as
+ for _, n := range as.List().Slice() {
+ as.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
+ n.Name().Defn = as
}
}
as = typecheck(as, ctxStmt)
- m.Ninit.Append(as)
+ m.PtrInit().Append(as)
}
- typecheckslice(m.Ninit.Slice(), ctxStmt)
+ typecheckslice(m.Init().Slice(), ctxStmt)
m = typecheck(m, ctxStmt)
// dump("Return after substitution", m);
return m
- case OGOTO, OLABEL:
- m := n.copy()
- m.Pos = subst.updatedPos(m.Pos)
- m.Ninit.Set(nil)
- p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen)
- m.Sym = lookup(p)
+ case ir.OGOTO, ir.OLABEL:
+ m := ir.Copy(n)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.PtrInit().Set(nil)
+ p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
+ m.SetSym(lookup(p))
return m
}
- m := n.copy()
- m.Pos = subst.updatedPos(m.Pos)
- m.Ninit.Set(nil)
+ m := ir.Copy(n)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.PtrInit().Set(nil)
- if n.Op == OCLOSURE {
- Fatalf("cannot inline function containing closure: %+v", n)
+ if n.Op() == ir.OCLOSURE {
+ base.Fatalf("cannot inline function containing closure: %+v", n)
}
- m.Left = subst.node(n.Left)
- m.Right = subst.node(n.Right)
- m.List.Set(subst.list(n.List))
- m.Rlist.Set(subst.list(n.Rlist))
- m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...))
- m.Nbody.Set(subst.list(n.Nbody))
+ m.SetLeft(subst.node(n.Left()))
+ m.SetRight(subst.node(n.Right()))
+ m.PtrList().Set(subst.list(n.List()))
+ m.PtrRlist().Set(subst.list(n.Rlist()))
+ m.PtrInit().Set(append(m.Init().Slice(), subst.list(n.Init())...))
+ m.PtrBody().Set(subst.list(n.Body()))
return m
}
func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
- pos := Ctxt.PosTable.Pos(xpos)
+ pos := base.Ctxt.PosTable.Pos(xpos)
oldbase := pos.Base() // can be nil
newbase := subst.bases[oldbase]
if newbase == nil {
@@ -1413,13 +1406,13 @@ func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
subst.bases[oldbase] = newbase
}
pos.SetBase(newbase)
- return Ctxt.PosTable.XPos(pos)
+ return base.Ctxt.PosTable.XPos(pos)
}
-func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
- s := make([]*Node, 0, len(ll))
+func pruneUnusedAutos(ll []ir.Node, vis *hairyVisitor) []ir.Node {
+ s := make([]ir.Node, 0, len(ll))
for _, n := range ll {
- if n.Class() == PAUTO {
+ if n.Class() == ir.PAUTO {
if _, found := vis.usedLocals[n]; !found {
continue
}
@@ -1431,49 +1424,49 @@ func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
// devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
-func devirtualize(fn *Node) {
+func devirtualize(fn ir.Node) {
Curfn = fn
- inspectList(fn.Nbody, func(n *Node) bool {
- if n.Op == OCALLINTER {
+ ir.InspectList(fn.Body(), func(n ir.Node) bool {
+ if n.Op() == ir.OCALLINTER {
devirtualizeCall(n)
}
return true
})
}
-func devirtualizeCall(call *Node) {
- recv := staticValue(call.Left.Left)
- if recv.Op != OCONVIFACE {
+func devirtualizeCall(call ir.Node) {
+ recv := staticValue(call.Left().Left())
+ if recv.Op() != ir.OCONVIFACE {
return
}
- typ := recv.Left.Type
+ typ := recv.Left().Type()
if typ.IsInterface() {
return
}
- x := nodl(call.Left.Pos, ODOTTYPE, call.Left.Left, nil)
- x.Type = typ
- x = nodlSym(call.Left.Pos, OXDOT, x, call.Left.Sym)
+ x := ir.NodAt(call.Left().Pos(), ir.ODOTTYPE, call.Left().Left(), nil)
+ x.SetType(typ)
+ x = nodlSym(call.Left().Pos(), ir.OXDOT, x, call.Left().Sym())
x = typecheck(x, ctxExpr|ctxCallee)
- switch x.Op {
- case ODOTMETH:
- if Debug.m != 0 {
- Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ)
- }
- call.Op = OCALLMETH
- call.Left = x
- case ODOTINTER:
+ switch x.Op() {
+ case ir.ODOTMETH:
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "devirtualizing %v to %v", call.Left(), typ)
+ }
+ call.SetOp(ir.OCALLMETH)
+ call.SetLeft(x)
+ case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
- if Debug.m != 0 {
- Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", call.Left(), typ)
}
- call.Op = OCALLINTER
- call.Left = x
+ call.SetOp(ir.OCALLINTER)
+ call.SetLeft(x)
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
- if Debug.m != 0 {
- Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
}
return
}
@@ -1484,12 +1477,12 @@ func devirtualizeCall(call *Node) {
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
- checkwidth(x.Type)
- switch ft := x.Type; ft.NumResults() {
+ checkwidth(x.Type())
+ switch ft := x.Type(); ft.NumResults() {
case 0:
case 1:
- call.Type = ft.Results().Field(0).Type
+ call.SetType(ft.Results().Field(0).Type)
default:
- call.Type = ft.Results()
+ call.SetType(ft.Results())
}
}
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go
index 7cce371408..39d73867e4 100644
--- a/src/cmd/compile/internal/gc/lex.go
+++ b/src/cmd/compile/internal/gc/lex.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/internal/objabi"
"cmd/internal/src"
@@ -12,12 +14,8 @@ import (
"strings"
)
-// lineno is the source position at the start of the most recently lexed token.
-// TODO(gri) rename and eventually remove
-var lineno src.XPos
-
-func makePos(base *src.PosBase, line, col uint) src.XPos {
- return Ctxt.PosTable.XPos(src.MakePos(base, line, col))
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+ return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
}
func isSpace(c rune) bool {
@@ -28,78 +26,51 @@ func isQuoted(s string) bool {
return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
}
-type PragmaFlag int16
-
const (
- // Func pragmas.
- Nointerface PragmaFlag = 1 << iota
- Noescape // func parameters don't escape
- Norace // func must not have race detector annotations
- Nosplit // func should not execute on separate stack
- Noinline // func should not be inlined
- NoCheckPtr // func should not be instrumented by checkptr
- CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
- UintptrEscapes // pointers converted to uintptr escape
-
- // Runtime-only func pragmas.
- // See ../../../../runtime/README.md for detailed descriptions.
- Systemstack // func must run on system stack
- Nowritebarrier // emit compiler error instead of write barrier
- Nowritebarrierrec // error on write barrier in this or recursive callees
- Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
-
- // Runtime and cgo type pragmas
- NotInHeap // values of this type must not be heap allocated
-
- // Go command pragmas
- GoBuildPragma
-)
-
-const (
- FuncPragmas = Nointerface |
- Noescape |
- Norace |
- Nosplit |
- Noinline |
- NoCheckPtr |
- CgoUnsafeArgs |
- UintptrEscapes |
- Systemstack |
- Nowritebarrier |
- Nowritebarrierrec |
- Yeswritebarrierrec
-
- TypePragmas = NotInHeap
+ FuncPragmas = ir.Nointerface |
+ ir.Noescape |
+ ir.Norace |
+ ir.Nosplit |
+ ir.Noinline |
+ ir.NoCheckPtr |
+ ir.CgoUnsafeArgs |
+ ir.UintptrEscapes |
+ ir.Systemstack |
+ ir.Nowritebarrier |
+ ir.Nowritebarrierrec |
+ ir.Yeswritebarrierrec
+
+ TypePragmas = ir.NotInHeap
)
-func pragmaFlag(verb string) PragmaFlag {
+func pragmaFlag(verb string) ir.PragmaFlag {
switch verb {
case "go:build":
- return GoBuildPragma
+ return ir.GoBuildPragma
case "go:nointerface":
if objabi.Fieldtrack_enabled != 0 {
- return Nointerface
+ return ir.Nointerface
}
case "go:noescape":
- return Noescape
+ return ir.Noescape
case "go:norace":
- return Norace
+ return ir.Norace
case "go:nosplit":
- return Nosplit | NoCheckPtr // implies NoCheckPtr (see #34972)
+ return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972)
case "go:noinline":
- return Noinline
+ return ir.Noinline
case "go:nocheckptr":
- return NoCheckPtr
+ return ir.NoCheckPtr
case "go:systemstack":
- return Systemstack
+ return ir.Systemstack
case "go:nowritebarrier":
- return Nowritebarrier
+ return ir.Nowritebarrier
case "go:nowritebarrierrec":
- return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier
+ return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier
case "go:yeswritebarrierrec":
- return Yeswritebarrierrec
+ return ir.Yeswritebarrierrec
case "go:cgo_unsafe_args":
- return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968)
+ return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
case "go:uintptrescapes":
// For the next function declared in the file
// any uintptr arguments may be pointer values
@@ -112,9 +83,9 @@ func pragmaFlag(verb string) PragmaFlag {
// call. The conversion to uintptr must appear
// in the argument list.
// Used in syscall/dll_windows.go.
- return UintptrEscapes
+ return ir.UintptrEscapes
case "go:notinheap":
- return NotInHeap
+ return ir.NotInHeap
}
return 0
}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 7015d9d6cd..6b7123dc71 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -9,6 +9,8 @@ package gc
import (
"bufio"
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@@ -18,9 +20,9 @@ import (
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
- "cmd/internal/sys"
"flag"
"fmt"
+ "go/constant"
"internal/goversion"
"io"
"io/ioutil"
@@ -34,111 +36,20 @@ import (
"strings"
)
-var (
- buildid string
- spectre string
- spectreIndex bool
-)
-
-var (
- Debug_append int
- Debug_checkptr int
- Debug_closure int
- Debug_compilelater int
- debug_dclstack int
- Debug_dumpptrs int
- Debug_libfuzzer int
- Debug_panic int
- Debug_slice int
- Debug_vlog bool
- Debug_wb int
- Debug_pctab string
- Debug_locationlist int
- Debug_typecheckinl int
- Debug_gendwarfinl int
- Debug_softfloat int
- Debug_defer int
-)
-
-// Debug arguments.
-// These can be specified with the -d flag, as in "-d nil"
-// to set the debug_checknil variable.
-// Multiple options can be comma-separated.
-// Each option accepts an optional argument, as in "gcprog=2"
-var debugtab = []struct {
- name string
- help string
- val interface{} // must be *int or *string
-}{
- {"append", "print information about append compilation", &Debug_append},
- {"checkptr", "instrument unsafe pointer conversions", &Debug_checkptr},
- {"closure", "print information about closure compilation", &Debug_closure},
- {"compilelater", "compile functions as late as possible", &Debug_compilelater},
- {"disablenil", "disable nil checks", &disable_checknil},
- {"dclstack", "run internal dclstack check", &debug_dclstack},
- {"dumpptrs", "show Node pointer values in Dump/dumplist output", &Debug_dumpptrs},
- {"gcprog", "print dump of GC programs", &Debug_gcprog},
- {"libfuzzer", "coverage instrumentation for libfuzzer", &Debug_libfuzzer},
- {"nil", "print information about nil checks", &Debug_checknil},
- {"panic", "do not hide any compiler panic", &Debug_panic},
- {"slice", "print information about slice compilation", &Debug_slice},
- {"typeassert", "print information about type assertion inlining", &Debug_typeassert},
- {"wb", "print information about write barriers", &Debug_wb},
- {"export", "print export data", &Debug_export},
- {"pctab", "print named pc-value table", &Debug_pctab},
- {"locationlists", "print information about DWARF location list creation", &Debug_locationlist},
- {"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl},
- {"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl},
- {"softfloat", "force compiler to emit soft-float code", &Debug_softfloat},
- {"defer", "print information about defer compilation", &Debug_defer},
- {"fieldtrack", "enable fieldtracking", &objabi.Fieldtrack_enabled},
-}
-
-const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
-
-<key> is one of:
-
-`
-
-const debugHelpFooter = `
-<value> is key-specific.
-
-Key "checkptr" supports values:
- "0": instrumentation disabled
- "1": conversions involving unsafe.Pointer are instrumented
- "2": conversions to unsafe.Pointer force heap allocation
-
-Key "pctab" supports values:
- "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
-`
-
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
- objabi.Flagprint(os.Stderr)
- Exit(2)
-}
-
func hidePanic() {
- if Debug_panic == 0 && nsavederrors+nerrors > 0 {
+ if base.Debug.Panic == 0 && base.Errors() > 0 {
// If we've already complained about things
// in the program, don't bother complaining
// about a panic too; let the user clean up
// the code and try again.
if err := recover(); err != nil {
- errorexit()
+ base.ErrorExit()
}
}
}
-// supportsDynlink reports whether or not the code generator for the given
-// architecture supports the -shared and -dynlink flags.
-func supportsDynlink(arch *sys.Arch) bool {
- return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X)
-}
-
// timing data for compiler phases
var timings Timings
-var benchfile string
var nowritebarrierrecCheck *nowritebarrierrecChecker
@@ -152,28 +63,28 @@ func Main(archInit func(*Arch)) {
archInit(&thearch)
- Ctxt = obj.Linknew(thearch.LinkArch)
- Ctxt.DiagFunc = yyerror
- Ctxt.DiagFlush = flusherrors
- Ctxt.Bso = bufio.NewWriter(os.Stdout)
+ base.Ctxt = obj.Linknew(thearch.LinkArch)
+ base.Ctxt.DiagFunc = base.Errorf
+ base.Ctxt.DiagFlush = base.FlushErrors
+ base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
// UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump
// on Darwin don't support it properly, especially since macOS 10.14 (Mojave). This is exposed as a flag
// to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project.
// See bugs 31188 and 21945 (CLs 170638, 98075, 72371).
- Ctxt.UseBASEntries = Ctxt.Headtype != objabi.Hdarwin
+ base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin
- localpkg = types.NewPkg("", "")
- localpkg.Prefix = "\"\""
+ ir.LocalPkg = types.NewPkg("", "")
+ ir.LocalPkg.Prefix = "\"\""
// We won't know localpkg's height until after import
// processing. In the mean time, set to MaxPkgHeight to ensure
// height comparisons at least work until then.
- localpkg.Height = types.MaxPkgHeight
+ ir.LocalPkg.Height = types.MaxPkgHeight
// pseudo-package, for scoping
- builtinpkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
- builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
+ ir.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
+ ir.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin
// pseudo-package, accessed by import "unsafe"
unsafepkg = types.NewPkg("unsafe", "unsafe")
@@ -203,321 +114,75 @@ func Main(archInit func(*Arch)) {
// pseudo-package used for methods with anonymous receivers
gopkg = types.NewPkg("go", "")
- Wasm := objabi.GOARCH == "wasm"
-
- // Whether the limit for stack-allocated objects is much smaller than normal.
- // This can be helpful for diagnosing certain causes of GC latency. See #27732.
- smallFrames := false
- jsonLogOpt := ""
-
- flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
- flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
- flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
-
- objabi.Flagcount("%", "debug non-static initializers", &Debug.P)
- objabi.Flagcount("B", "disable bounds checking", &Debug.B)
- objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C)
- objabi.Flagcount("E", "debug symbol export", &Debug.E)
- objabi.Flagcount("G", "accept generic code", &Debug.G)
- objabi.Flagcount("K", "debug missing line numbers", &Debug.K)
- objabi.Flagcount("L", "show full file names in error messages", &Debug.L)
- objabi.Flagcount("N", "disable optimizations", &Debug.N)
- objabi.Flagcount("S", "print assembly listing", &Debug.S)
- objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W)
- objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e)
- objabi.Flagcount("h", "halt on error", &Debug.h)
- objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j)
- objabi.Flagcount("l", "disable inlining", &Debug.l)
- objabi.Flagcount("m", "print optimization decisions", &Debug.m)
- objabi.Flagcount("r", "debug generated wrappers", &Debug.r)
- objabi.Flagcount("w", "debug type checking", &Debug.w)
-
- objabi.Flagfn1("I", "add `directory` to import search path", addidir)
- objabi.AddVersionFlag() // -V
- flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
- flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
- flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
- flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)")
- flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help")
- flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
- flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
- flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
- objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
- objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
- objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
- flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
- flag.StringVar(&flag_lang, "lang", "", "release to compile for")
- flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
- objabi.Flagcount("live", "debug liveness analysis", &debuglive)
- if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
- flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
- }
- flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports")
- flag.StringVar(&outfile, "o", "", "write output to `file`")
- flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
- flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o")
- if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
- flag.BoolVar(&flag_race, "race", false, "enable race detector")
- }
- flag.StringVar(&spectre, "spectre", spectre, "enable spectre mitigations in `list` (all, index, ret)")
- if enableTrace {
- flag.BoolVar(&trace, "t", false, "trace type-checking")
- }
- flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
- flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
- flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
- var flag_shared bool
- var flag_dynlink bool
- if supportsDynlink(thearch.LinkArch.Arch) {
- flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library")
- flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
- flag.BoolVar(&Ctxt.Flag_linkshared, "linkshared", false, "generate code that will be linked against Go shared libraries")
- }
- flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
- flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`")
- flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
- var goversion string
- flag.StringVar(&goversion, "goversion", "", "required version of the runtime")
- var symabisPath string
- flag.StringVar(&symabisPath, "symabis", "", "read symbol ABIs from `file`")
- flag.StringVar(&traceprofile, "traceprofile", "", "write an execution trace to `file`")
- flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
- flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
- flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
- flag.BoolVar(&smallFrames, "smallframes", false, "reduce the size limit for stack allocated objects")
- flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF")
- flag.StringVar(&jsonLogOpt, "json", "", "version,destination for JSON compiler/optimizer logging")
-
- objabi.Flagparse(usage)
-
- Ctxt.Pkgpath = myimportpath
-
- for _, f := range strings.Split(spectre, ",") {
- f = strings.TrimSpace(f)
- switch f {
- default:
- log.Fatalf("unknown setting -spectre=%s", f)
- case "":
- // nothing
- case "all":
- spectreIndex = true
- Ctxt.Retpoline = true
- case "index":
- spectreIndex = true
- case "ret":
- Ctxt.Retpoline = true
- }
- }
-
- if spectreIndex {
- switch objabi.GOARCH {
- case "amd64":
- // ok
- default:
- log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
- }
- }
+ base.DebugSSA = ssa.PhaseOption
+ base.ParseFlags()
// Record flags that affect the build result. (And don't
// record flags that don't, since that would cause spurious
// changes in the binary.)
recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
- if smallFrames {
+ if !enableTrace && base.Flag.LowerT {
+ log.Fatalf("compiler not built with support for -t")
+ }
+
+ // Enable inlining (after recordFlags, to avoid recording the rewritten -l). For now:
+ // default: inlining on. (Flag.LowerL == 1)
+ // -l: inlining off (Flag.LowerL == 0)
+ // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1)
+ if base.Flag.LowerL <= 1 {
+ base.Flag.LowerL = 1 - base.Flag.LowerL
+ }
+
+ if base.Flag.SmallFrames {
maxStackVarSize = 128 * 1024
maxImplicitStackVarSize = 16 * 1024
}
- Ctxt.Flag_shared = flag_dynlink || flag_shared
- Ctxt.Flag_dynlink = flag_dynlink
- Ctxt.Flag_optimize = Debug.N == 0
-
- Ctxt.Debugasm = Debug.S
- Ctxt.Debugvlog = Debug_vlog
- if flagDWARF {
- Ctxt.DebugInfo = debuginfo
- Ctxt.GenAbstractFunc = genAbstractFunc
- Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt)
+ if base.Flag.Dwarf {
+ base.Ctxt.DebugInfo = debuginfo
+ base.Ctxt.GenAbstractFunc = genAbstractFunc
+ base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt)
} else {
// turn off inline generation if no dwarf at all
- genDwarfInline = 0
- Ctxt.Flag_locationlists = false
- }
-
- if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" {
- usage()
+ base.Flag.GenDwarfInl = 0
+ base.Ctxt.Flag_locationlists = false
}
-
- if goversion != "" && goversion != runtime.Version() {
- fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), goversion)
- Exit(2)
+ if base.Ctxt.Flag_locationlists && len(base.Ctxt.Arch.DWARFRegisters) == 0 {
+ log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name)
}
checkLang()
- if symabisPath != "" {
- readSymABIs(symabisPath, myimportpath)
+ if base.Flag.SymABIs != "" {
+ readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath)
}
- thearch.LinkArch.Init(Ctxt)
-
- if outfile == "" {
- p := flag.Arg(0)
- if i := strings.LastIndex(p, "/"); i >= 0 {
- p = p[i+1:]
- }
- if runtime.GOOS == "windows" {
- if i := strings.LastIndex(p, `\`); i >= 0 {
- p = p[i+1:]
- }
- }
- if i := strings.LastIndex(p, "."); i >= 0 {
- p = p[:i]
- }
- suffix := ".o"
- if writearchive {
- suffix = ".a"
- }
- outfile = p + suffix
+ if ispkgin(omit_pkgs) {
+ base.Flag.Race = false
+ base.Flag.MSan = false
}
+ thearch.LinkArch.Init(base.Ctxt)
startProfile()
-
- if flag_race && flag_msan {
- log.Fatal("cannot use both -race and -msan")
- }
- if flag_race || flag_msan {
- // -race and -msan imply -d=checkptr for now.
- Debug_checkptr = 1
- }
- if ispkgin(omit_pkgs) {
- flag_race = false
- flag_msan = false
- }
- if flag_race {
+ if base.Flag.Race {
racepkg = types.NewPkg("runtime/race", "")
}
- if flag_msan {
+ if base.Flag.MSan {
msanpkg = types.NewPkg("runtime/msan", "")
}
- if flag_race || flag_msan {
+ if base.Flag.Race || base.Flag.MSan {
instrumenting = true
}
-
- if compiling_runtime && Debug.N != 0 {
- log.Fatal("cannot disable optimizations while compiling runtime")
- }
- if nBackendWorkers < 1 {
- log.Fatalf("-c must be at least 1, got %d", nBackendWorkers)
- }
- if nBackendWorkers > 1 && !concurrentBackendAllowed() {
- log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
- }
- if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 {
- log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name)
+ if base.Flag.Dwarf {
+ dwarf.EnableLogging(base.Debug.DwarfInl != 0)
}
-
- // parse -d argument
- if debugstr != "" {
- Split:
- for _, name := range strings.Split(debugstr, ",") {
- if name == "" {
- continue
- }
- // display help about the -d option itself and quit
- if name == "help" {
- fmt.Print(debugHelpHeader)
- maxLen := len("ssa/help")
- for _, t := range debugtab {
- if len(t.name) > maxLen {
- maxLen = len(t.name)
- }
- }
- for _, t := range debugtab {
- fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
- }
- // ssa options have their own help
- fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
- fmt.Print(debugHelpFooter)
- os.Exit(0)
- }
- val, valstring, haveInt := 1, "", true
- if i := strings.IndexAny(name, "=:"); i >= 0 {
- var err error
- name, valstring = name[:i], name[i+1:]
- val, err = strconv.Atoi(valstring)
- if err != nil {
- val, haveInt = 1, false
- }
- }
- for _, t := range debugtab {
- if t.name != name {
- continue
- }
- switch vp := t.val.(type) {
- case nil:
- // Ignore
- case *string:
- *vp = valstring
- case *int:
- if !haveInt {
- log.Fatalf("invalid debug value %v", name)
- }
- *vp = val
- default:
- panic("bad debugtab type")
- }
- continue Split
- }
- // special case for ssa for now
- if strings.HasPrefix(name, "ssa/") {
- // expect form ssa/phase/flag
- // e.g. -d=ssa/generic_cse/time
- // _ in phase name also matches space
- phase := name[4:]
- flag := "debug" // default flag is debug
- if i := strings.Index(phase, "/"); i >= 0 {
- flag = phase[i+1:]
- phase = phase[:i]
- }
- err := ssa.PhaseOption(phase, flag, val, valstring)
- if err != "" {
- log.Fatalf(err)
- }
- continue Split
- }
- log.Fatalf("unknown debug key -d %s\n", name)
- }
- }
-
- if compiling_runtime {
- // Runtime can't use -d=checkptr, at least not yet.
- Debug_checkptr = 0
-
- // Fuzzing the runtime isn't interesting either.
- Debug_libfuzzer = 0
- }
-
- // set via a -d flag
- Ctxt.Debugpcln = Debug_pctab
- if flagDWARF {
- dwarf.EnableLogging(Debug_gendwarfinl != 0)
- }
-
- if Debug_softfloat != 0 {
+ if base.Debug.SoftFloat != 0 {
thearch.SoftFloat = true
}
- // enable inlining. for now:
- // default: inlining on. (Debug.l == 1)
- // -l: inlining off (Debug.l == 0)
- // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1)
- if Debug.l <= 1 {
- Debug.l = 1 - Debug.l
- }
-
- if jsonLogOpt != "" { // parse version,destination from json logging optimization.
- logopt.LogJsonOption(jsonLogOpt)
+ if base.Flag.JSON != "" { // parse version,destination from json logging optimization.
+ logopt.LogJsonOption(base.Flag.JSON)
}
ssaDump = os.Getenv("GOSSAFUNC")
@@ -534,7 +199,7 @@ func Main(archInit func(*Arch)) {
}
}
- trackScopes = flagDWARF
+ trackScopes = base.Flag.Dwarf
Widthptr = thearch.LinkArch.PtrSize
Widthreg = thearch.LinkArch.RegSize
@@ -544,31 +209,19 @@ func Main(archInit func(*Arch)) {
// would lead to import cycles)
types.Widthptr = Widthptr
types.Dowidth = dowidth
- types.Fatalf = Fatalf
- types.Sconv = func(s *types.Sym, flag, mode int) string {
- return sconv(s, FmtFlag(flag), fmtMode(mode))
- }
- types.Tconv = func(t *types.Type, flag, mode int) string {
- return tconv(t, FmtFlag(flag), fmtMode(mode))
- }
- types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
- symFormat(sym, s, verb, fmtMode(mode))
- }
- types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
- typeFormat(t, s, verb, fmtMode(mode))
- }
+ types.Fatalf = base.Fatalf
+ ir.InstallTypeFormats()
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym()
}
- types.FmtLeft = int(FmtLeft)
- types.FmtUnsigned = int(FmtUnsigned)
- types.FErr = int(FErr)
- types.Ctxt = Ctxt
+ types.FmtLeft = int(ir.FmtLeft)
+ types.FmtUnsigned = int(ir.FmtUnsigned)
+ types.FErr = int(ir.FErr)
+ types.Ctxt = base.Ctxt
initUniverse()
- dclcontext = PEXTERN
- nerrors = 0
+ dclcontext = ir.PEXTERN
autogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
@@ -576,14 +229,12 @@ func Main(archInit func(*Arch)) {
loadsys()
timings.Start("fe", "parse")
- lines := parseFiles(flag.Args(), Debug.G != 0)
+ lines := parseFiles(flag.Args(), base.Flag.G != 0)
timings.Stop()
timings.AddEvent(int64(lines), "lines")
- if Debug.G != 0 {
+ if base.Flag.G != 0 {
// can only parse generic code for now
- if nerrors+nsavederrors != 0 {
- errorexit()
- }
+ base.ExitIfErrors()
return
}
@@ -607,7 +258,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
+ if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left().Name().Param.Alias()) {
xtop[i] = typecheck(n, ctxStmt)
}
}
@@ -619,7 +270,7 @@ func Main(archInit func(*Arch)) {
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
+ if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left().Name().Param.Alias() {
xtop[i] = typecheck(n, ctxStmt)
}
}
@@ -630,14 +281,14 @@ func Main(archInit func(*Arch)) {
var fcount int64
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if n.Op == ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
Curfn = n
decldepth = 1
- saveerrors()
- typecheckslice(Curfn.Nbody.Slice(), ctxStmt)
+ errorsBefore := base.Errors()
+ typecheckslice(Curfn.Body().Slice(), ctxStmt)
checkreturn(Curfn)
- if nerrors != 0 {
- Curfn.Nbody.Set(nil) // type errors; do not compile
+ if base.Errors() > errorsBefore {
+ Curfn.PtrBody().Set(nil) // type errors; do not compile
}
// Now that we've checked whether n terminates,
// we can eliminate some obviously dead code.
@@ -649,11 +300,9 @@ func Main(archInit func(*Arch)) {
// check past phase 9 isn't sufficient, as we may exit with other errors
// before then, thus skipping map key errors.
checkMapKeys()
- timings.AddEvent(fcount, "funcs")
+ base.ExitIfErrors()
- if nsavederrors+nerrors != 0 {
- errorexit()
- }
+ timings.AddEvent(fcount, "funcs")
fninit(xtop)
@@ -662,39 +311,31 @@ func Main(archInit func(*Arch)) {
// because variables captured by value do not escape.
timings.Start("fe", "capturevars")
for _, n := range xtop {
- if n.Op == ODCLFUNC && n.Func.Closure != nil {
+ if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
Curfn = n
capturevars(n)
}
}
capturevarscomplete = true
-
Curfn = nil
-
- if nsavederrors+nerrors != 0 {
- errorexit()
- }
+ base.ExitIfErrors()
// Phase 5: Inlining
timings.Start("fe", "inlining")
- if Debug_typecheckinl != 0 {
+ if base.Debug.TypecheckInl != 0 {
// Typecheck imported function bodies if Debug.l > 1,
// otherwise lazily when used or re-exported.
for _, n := range importlist {
- if n.Func.Inl != nil {
- saveerrors()
+ if n.Func().Inl != nil {
typecheckinl(n)
}
}
-
- if nsavederrors+nerrors != 0 {
- errorexit()
- }
+ base.ExitIfErrors()
}
- if Debug.l != 0 {
+ if base.Flag.LowerL != 0 {
// Find functions that can be inlined and clone them before walk expands them.
- visitBottomUp(xtop, func(list []*Node, recursive bool) {
+ visitBottomUp(xtop, func(list []ir.Node, recursive bool) {
numfns := numNonClosures(list)
for _, n := range list {
if !recursive || numfns > 1 {
@@ -703,8 +344,8 @@ func Main(archInit func(*Arch)) {
// across more than one function.
caninl(n)
} else {
- if Debug.m > 1 {
- fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func().Nname)
}
}
inlcalls(n)
@@ -713,7 +354,7 @@ func Main(archInit func(*Arch)) {
}
for _, n := range xtop {
- if n.Op == ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
devirtualize(n)
}
}
@@ -734,7 +375,7 @@ func Main(archInit func(*Arch)) {
// checking. This must happen before transformclosure.
// We'll do the final check after write barriers are
// inserted.
- if compiling_runtime {
+ if base.Flag.CompilingRuntime {
nowritebarrierrecCheck = newNowritebarrierrecChecker()
}
@@ -743,7 +384,7 @@ func Main(archInit func(*Arch)) {
// before walk reaches a call of a closure.
timings.Start("fe", "xclosures")
for _, n := range xtop {
- if n.Op == ODCLFUNC && n.Func.Closure != nil {
+ if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
Curfn = n
transformclosure(n)
}
@@ -766,7 +407,7 @@ func Main(archInit func(*Arch)) {
fcount = 0
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if n.Op == ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
funccompile(n)
fcount++
}
@@ -785,33 +426,30 @@ func Main(archInit func(*Arch)) {
// Finalize DWARF inline routine DIEs, then explicitly turn off
// DWARF inlining gen so as to avoid problems with generated
// method wrappers.
- if Ctxt.DwFixups != nil {
- Ctxt.DwFixups.Finalize(myimportpath, Debug_gendwarfinl != 0)
- Ctxt.DwFixups = nil
- genDwarfInline = 0
+ if base.Ctxt.DwFixups != nil {
+ base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
+ base.Ctxt.DwFixups = nil
+ base.Flag.GenDwarfInl = 0
}
// Phase 9: Check external declarations.
timings.Start("be", "externaldcls")
for i, n := range externdcl {
- if n.Op == ONAME {
+ if n.Op() == ir.ONAME {
externdcl[i] = typecheck(externdcl[i], ctxExpr)
}
}
// Check the map keys again, since we typechecked the external
// declarations.
checkMapKeys()
-
- if nerrors+nsavederrors != 0 {
- errorexit()
- }
+ base.ExitIfErrors()
// Write object data to disk.
timings.Start("be", "dumpobj")
dumpdata()
- Ctxt.NumberSyms()
+ base.Ctxt.NumberSyms()
dumpobj()
- if asmhdr != "" {
+ if base.Flag.AsmHdr != "" {
dumpasmhdr()
}
@@ -821,40 +459,37 @@ func Main(archInit func(*Arch)) {
})
for _, large := range largeStackFrames {
if large.callee != 0 {
- yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
} else {
- yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
}
}
if len(funcStack) != 0 {
- Fatalf("funcStack is non-empty: %v", len(funcStack))
+ base.Fatalf("funcStack is non-empty: %v", len(funcStack))
}
if len(compilequeue) != 0 {
- Fatalf("%d uncompiled functions", len(compilequeue))
+ base.Fatalf("%d uncompiled functions", len(compilequeue))
}
- logopt.FlushLoggedOpts(Ctxt, myimportpath)
+ logopt.FlushLoggedOpts(base.Ctxt, base.Ctxt.Pkgpath)
+ base.ExitIfErrors()
- if nerrors+nsavederrors != 0 {
- errorexit()
- }
-
- flusherrors()
+ base.FlushErrors()
timings.Stop()
- if benchfile != "" {
- if err := writebench(benchfile); err != nil {
+ if base.Flag.Bench != "" {
+ if err := writebench(base.Flag.Bench); err != nil {
log.Fatalf("cannot write benchmark data: %v", err)
}
}
}
// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*Node) int {
+func numNonClosures(list []ir.Node) int {
count := 0
for _, n := range list {
- if n.Func.Closure == nil {
+ if n.Func().OClosure == nil {
count++
}
}
@@ -871,7 +506,7 @@ func writebench(filename string) error {
fmt.Fprintln(&buf, "commit:", objabi.Version)
fmt.Fprintln(&buf, "goos:", runtime.GOOS)
fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
- timings.Write(&buf, "BenchmarkCompile:"+myimportpath+":")
+ timings.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":")
n, err := f.Write(buf.Bytes())
if err != nil {
@@ -884,64 +519,6 @@ func writebench(filename string) error {
return f.Close()
}
-var (
- importMap = map[string]string{}
- packageFile map[string]string // nil means not in use
-)
-
-func addImportMap(s string) {
- if strings.Count(s, "=") != 1 {
- log.Fatal("-importmap argument must be of the form source=actual")
- }
- i := strings.Index(s, "=")
- source, actual := s[:i], s[i+1:]
- if source == "" || actual == "" {
- log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
- }
- importMap[source] = actual
-}
-
-func readImportCfg(file string) {
- packageFile = map[string]string{}
- data, err := ioutil.ReadFile(file)
- if err != nil {
- log.Fatalf("-importcfg: %v", err)
- }
-
- for lineNum, line := range strings.Split(string(data), "\n") {
- lineNum++ // 1-based
- line = strings.TrimSpace(line)
- if line == "" || strings.HasPrefix(line, "#") {
- continue
- }
-
- var verb, args string
- if i := strings.Index(line, " "); i < 0 {
- verb = line
- } else {
- verb, args = line[:i], strings.TrimSpace(line[i+1:])
- }
- var before, after string
- if i := strings.Index(args, "="); i >= 0 {
- before, after = args[:i], args[i+1:]
- }
- switch verb {
- default:
- log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
- case "importmap":
- if before == "" || after == "" {
- log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
- }
- importMap[before] = after
- case "packagefile":
- if before == "" || after == "" {
- log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
- }
- packageFile[before] = after
- }
- }
-}
-
// symabiDefs and symabiRefs record the defined and referenced ABIs of
// symbols required by non-Go code. These are keyed by link symbol
// name, where the local package prefix is always `"".`
@@ -1013,11 +590,6 @@ func readSymABIs(file, myimportpath string) {
}
}
-func saveerrors() {
- nsavederrors += nerrors
- nerrors = 0
-}
-
func arsize(b *bufio.Reader, name string) int {
var buf [ArhdrSize]byte
if _, err := io.ReadFull(b, buf[:]); err != nil {
@@ -1032,14 +604,6 @@ func arsize(b *bufio.Reader, name string) int {
return i
}
-var idirs []string
-
-func addidir(dir string) {
- if dir != "" {
- idirs = append(idirs, dir)
- }
-}
-
func isDriveLetter(b byte) bool {
return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
}
@@ -1054,12 +618,12 @@ func islocalname(name string) bool {
func findpkg(name string) (file string, ok bool) {
if islocalname(name) {
- if nolocalimports {
+ if base.Flag.NoLocalImports {
return "", false
}
- if packageFile != nil {
- file, ok = packageFile[name]
+ if base.Flag.Cfg.PackageFile != nil {
+ file, ok = base.Flag.Cfg.PackageFile[name]
return file, ok
}
@@ -1081,16 +645,16 @@ func findpkg(name string) (file string, ok bool) {
// don't want to see "encoding/../encoding/base64"
// as different from "encoding/base64".
if q := path.Clean(name); q != name {
- yyerror("non-canonical import path %q (should be %q)", name, q)
+ base.Errorf("non-canonical import path %q (should be %q)", name, q)
return "", false
}
- if packageFile != nil {
- file, ok = packageFile[name]
+ if base.Flag.Cfg.PackageFile != nil {
+ file, ok = base.Flag.Cfg.PackageFile[name]
return file, ok
}
- for _, dir := range idirs {
+ for _, dir := range base.Flag.Cfg.ImportDirs {
file = fmt.Sprintf("%s/%s.a", dir, name)
if _, err := os.Stat(file); err == nil {
return file, true
@@ -1104,13 +668,13 @@ func findpkg(name string) (file string, ok bool) {
if objabi.GOROOT != "" {
suffix := ""
suffixsep := ""
- if flag_installsuffix != "" {
+ if base.Flag.InstallSuffix != "" {
suffixsep = "_"
- suffix = flag_installsuffix
- } else if flag_race {
+ suffix = base.Flag.InstallSuffix
+ } else if base.Flag.Race {
suffixsep = "_"
suffix = "race"
- } else if flag_msan {
+ } else if base.Flag.MSan {
suffixsep = "_"
suffix = "msan"
}
@@ -1147,7 +711,7 @@ func loadsys() {
case varTag:
importvar(Runtimepkg, src.NoXPos, sym, typ)
default:
- Fatalf("unhandled declaration tag %v", d.tag)
+ base.Fatalf("unhandled declaration tag %v", d.tag)
}
}
@@ -1159,15 +723,15 @@ func loadsys() {
// imported so far.
var myheight int
-func importfile(f *Val) *types.Pkg {
- path_, ok := f.U.(string)
- if !ok {
- yyerror("import path must be a string")
+func importfile(f constant.Value) *types.Pkg {
+ if f.Kind() != constant.String {
+ base.Errorf("import path must be a string")
return nil
}
+ path_ := constant.StringVal(f)
if len(path_) == 0 {
- yyerror("import path is empty")
+ base.Errorf("import path is empty")
return nil
}
@@ -1180,16 +744,16 @@ func importfile(f *Val) *types.Pkg {
// the main package, just as we reserve the import
// path "math" to identify the standard math package.
if path_ == "main" {
- yyerror("cannot import \"main\"")
- errorexit()
+ base.Errorf("cannot import \"main\"")
+ base.ErrorExit()
}
- if myimportpath != "" && path_ == myimportpath {
- yyerror("import %q while compiling that package (import cycle)", path_)
- errorexit()
+ if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath {
+ base.Errorf("import %q while compiling that package (import cycle)", path_)
+ base.ErrorExit()
}
- if mapped, ok := importMap[path_]; ok {
+ if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok {
path_ = mapped
}
@@ -1199,13 +763,13 @@ func importfile(f *Val) *types.Pkg {
if islocalname(path_) {
if path_[0] == '/' {
- yyerror("import path cannot be absolute path")
+ base.Errorf("import path cannot be absolute path")
return nil
}
- prefix := Ctxt.Pathname
- if localimport != "" {
- prefix = localimport
+ prefix := base.Ctxt.Pathname
+ if base.Flag.D != "" {
+ prefix = base.Flag.D
}
path_ = path.Join(prefix, path_)
@@ -1216,8 +780,8 @@ func importfile(f *Val) *types.Pkg {
file, found := findpkg(path_)
if !found {
- yyerror("can't find import: %q", path_)
- errorexit()
+ base.Errorf("can't find import: %q", path_)
+ base.ErrorExit()
}
importpkg := types.NewPkg(path_, "")
@@ -1229,57 +793,55 @@ func importfile(f *Val) *types.Pkg {
imp, err := bio.Open(file)
if err != nil {
- yyerror("can't open import: %q: %v", path_, err)
- errorexit()
+ base.Errorf("can't open import: %q: %v", path_, err)
+ base.ErrorExit()
}
defer imp.Close()
// check object header
p, err := imp.ReadString('\n')
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
if p == "!<arch>\n" { // package archive
// package export block should be first
sz := arsize(imp.Reader, "__.PKGDEF")
if sz <= 0 {
- yyerror("import %s: not a package file", file)
- errorexit()
+ base.Errorf("import %s: not a package file", file)
+ base.ErrorExit()
}
p, err = imp.ReadString('\n')
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
}
if !strings.HasPrefix(p, "go object ") {
- yyerror("import %s: not a go object file: %s", file, p)
- errorexit()
+ base.Errorf("import %s: not a go object file: %s", file, p)
+ base.ErrorExit()
}
q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
if p[10:] != q {
- yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
- errorexit()
+ base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q)
+ base.ErrorExit()
}
// process header lines
for {
p, err = imp.ReadString('\n')
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
if p == "\n" {
break // header ends with blank line
}
}
- // In the importfile, if we find:
- // $$\n (textual format): not supported anymore
- // $$B\n (binary format) : import directly, then feed the lexer a dummy statement
+ // Expect $$B\n to signal binary import format.
// look for $$
var c byte
@@ -1304,41 +866,41 @@ func importfile(f *Val) *types.Pkg {
var fingerprint goobj.FingerprintType
switch c {
case '\n':
- yyerror("cannot import %s: old export format no longer supported (recompile library)", path_)
+ base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_)
return nil
case 'B':
- if Debug_export != 0 {
+ if base.Debug.Export != 0 {
fmt.Printf("importing %s (%s)\n", path_, file)
}
imp.ReadByte() // skip \n after $$B
c, err = imp.ReadByte()
if err != nil {
- yyerror("import %s: reading input: %v", file, err)
- errorexit()
+ base.Errorf("import %s: reading input: %v", file, err)
+ base.ErrorExit()
}
// Indexed format is distinguished by an 'i' byte,
// whereas previous export formats started with 'c', 'd', or 'v'.
if c != 'i' {
- yyerror("import %s: unexpected package format byte: %v", file, c)
- errorexit()
+ base.Errorf("import %s: unexpected package format byte: %v", file, c)
+ base.ErrorExit()
}
fingerprint = iimport(importpkg, imp)
default:
- yyerror("no import in %q", path_)
- errorexit()
+ base.Errorf("no import in %q", path_)
+ base.ErrorExit()
}
// assume files move (get installed) so don't record the full path
- if packageFile != nil {
+ if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
- Ctxt.AddImport(path_, fingerprint)
+ base.Ctxt.AddImport(path_, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
- Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
+ base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
}
if importpkg.Height >= myheight {
@@ -1360,21 +922,21 @@ func pkgnotused(lineno src.XPos, path string, name string) {
elem = elem[i+1:]
}
if name == "" || elem == name {
- yyerrorl(lineno, "imported and not used: %q", path)
+ base.ErrorfAt(lineno, "imported and not used: %q", path)
} else {
- yyerrorl(lineno, "imported and not used: %q as %s", path, name)
+ base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
}
}
func mkpackage(pkgname string) {
- if localpkg.Name == "" {
+ if ir.LocalPkg.Name == "" {
if pkgname == "_" {
- yyerror("invalid package name _")
+ base.Errorf("invalid package name _")
}
- localpkg.Name = pkgname
+ ir.LocalPkg.Name = pkgname
} else {
- if pkgname != localpkg.Name {
- yyerror("package %s; expected %s", pkgname, localpkg.Name)
+ if pkgname != ir.LocalPkg.Name {
+ base.Errorf("package %s; expected %s", pkgname, ir.LocalPkg.Name)
}
}
}
@@ -1387,19 +949,19 @@ func clearImports() {
}
var unused []importedPkg
- for _, s := range localpkg.Syms {
- n := asNode(s.Def)
+ for _, s := range ir.LocalPkg.Syms {
+ n := ir.AsNode(s.Def)
if n == nil {
continue
}
- if n.Op == OPACK {
+ if n.Op() == ir.OPACK {
// throw away top-level package name left over
// from previous file.
// leave s->block set to cause redeclaration
// errors if a conflicting top-level name is
// introduced by a different file.
- if !n.Name.Used() && nsyntaxerrors == 0 {
- unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name})
+ if !n.Name().Used() && base.SyntaxErrors() == 0 {
+ unused = append(unused, importedPkg{n.Pos(), n.Name().Pkg.Path, s.Name})
}
s.Def = nil
continue
@@ -1407,9 +969,9 @@ func clearImports() {
if IsAlias(s) {
// throw away top-level name left over
// from previous import . "x"
- if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && nsyntaxerrors == 0 {
- unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""})
- n.Name.Pack.Name.SetUsed(true)
+ if n.Name() != nil && n.Name().Pack != nil && !n.Name().Pack.Name().Used() && base.SyntaxErrors() == 0 {
+ unused = append(unused, importedPkg{n.Name().Pack.Pos(), n.Name().Pack.Name().Pkg.Path, ""})
+ n.Name().Pack.Name().SetUsed(true)
}
s.Def = nil
continue
@@ -1423,56 +985,13 @@ func clearImports() {
}
func IsAlias(sym *types.Sym) bool {
- return sym.Def != nil && asNode(sym.Def).Sym != sym
-}
-
-// By default, assume any debug flags are incompatible with concurrent
-// compilation. A few are safe and potentially in common use for
-// normal compiles, though; return true for those.
-func concurrentFlagOk() bool {
- // Report whether any debug flag that would prevent concurrent
- // compilation is set, by zeroing out the allowed ones and then
- // checking if the resulting struct is zero.
- d := Debug
- d.B = 0 // disable bounds checking
- d.C = 0 // disable printing of columns in error messages
- d.e = 0 // no limit on errors; errors all come from non-concurrent code
- d.N = 0 // disable optimizations
- d.l = 0 // disable inlining
- d.w = 0 // all printing happens before compilation
- d.W = 0 // all printing happens before compilation
- d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below)
-
- return d == DebugFlags{}
-}
-
-func concurrentBackendAllowed() bool {
- if !concurrentFlagOk() {
- return false
- }
-
- // Debug.S by itself is ok, because all printing occurs
- // while writing the object file, and that is non-concurrent.
- // Adding Debug_vlog, however, causes Debug.S to also print
- // while flushing the plist, which happens concurrently.
- if Debug_vlog || debugstr != "" || debuglive > 0 {
- return false
- }
- // TODO: Test and delete this condition.
- if objabi.Fieldtrack_enabled != 0 {
- return false
- }
- // TODO: fix races and enable the following flags
- if Ctxt.Flag_shared || Ctxt.Flag_dynlink || flag_race {
- return false
- }
- return true
+ return sym.Def != nil && ir.AsNode(sym.Def).Sym() != sym
}
// recordFlags records the specified command-line flags to be placed
// in the DWARF info.
func recordFlags(flags ...string) {
- if myimportpath == "" {
+ if base.Ctxt.Pkgpath == "" {
// We can't record the flags if we don't know what the
// package name is.
return
@@ -1515,30 +1034,27 @@ func recordFlags(flags ...string) {
if cmd.Len() == 0 {
return
}
- s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath)
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
- Ctxt.Data = append(Ctxt.Data, s)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = cmd.Bytes()[1:]
}
// recordPackageName records the name of the package being
// compiled, so that the linker can save it in the compile unit's DIE.
func recordPackageName() {
- s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + myimportpath)
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
- Ctxt.Data = append(Ctxt.Data, s)
- s.P = []byte(localpkg.Name)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = []byte(ir.LocalPkg.Name)
}
-// flag_lang is the language version we are compiling for, set by the -lang flag.
-var flag_lang string
-
// currentLang returns the current language version.
func currentLang() string {
return fmt.Sprintf("go1.%d", goversion.Version)
@@ -1563,9 +1079,9 @@ var langWant lang
func langSupported(major, minor int, pkg *types.Pkg) bool {
if pkg == nil {
// TODO(mdempsky): Set Pkg for local types earlier.
- pkg = localpkg
+ pkg = ir.LocalPkg
}
- if pkg != localpkg {
+ if pkg != ir.LocalPkg {
// Assume imported packages passed type-checking.
return true
}
@@ -1579,23 +1095,23 @@ func langSupported(major, minor int, pkg *types.Pkg) bool {
// checkLang verifies that the -lang flag holds a valid value, and
// exits if not. It initializes data used by langSupported.
func checkLang() {
- if flag_lang == "" {
+ if base.Flag.Lang == "" {
return
}
var err error
- langWant, err = parseLang(flag_lang)
+ langWant, err = parseLang(base.Flag.Lang)
if err != nil {
- log.Fatalf("invalid value %q for -lang: %v", flag_lang, err)
+ log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err)
}
- if def := currentLang(); flag_lang != def {
+ if def := currentLang(); base.Flag.Lang != def {
defVers, err := parseLang(def)
if err != nil {
log.Fatalf("internal error parsing default lang %q: %v", def, err)
}
if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
- log.Fatalf("invalid value %q for -lang: max known version is %q", flag_lang, def)
+ log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def)
}
}
}
diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go
index 63d2a12c07..d763f1ebee 100644
--- a/src/cmd/compile/internal/gc/mkbuiltin.go
+++ b/src/cmd/compile/internal/gc/mkbuiltin.go
@@ -35,7 +35,10 @@ func main() {
fmt.Fprintln(&b)
fmt.Fprintln(&b, "package gc")
fmt.Fprintln(&b)
- fmt.Fprintln(&b, `import "cmd/compile/internal/types"`)
+ fmt.Fprintln(&b, `import (`)
+ fmt.Fprintln(&b, ` "cmd/compile/internal/ir"`)
+ fmt.Fprintln(&b, ` "cmd/compile/internal/types"`)
+ fmt.Fprintln(&b, `)`)
mkbuiltin(&b, "runtime")
@@ -144,12 +147,12 @@ func (i *typeInterner) mktype(t ast.Expr) string {
case "rune":
return "types.Runetype"
}
- return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name))
+ return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
case *ast.SelectorExpr:
if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
log.Fatalf("unhandled type: %#v", t)
}
- return "types.Types[TUNSAFEPTR]"
+ return "types.Types[types.TUNSAFEPTR]"
case *ast.ArrayType:
if t.Len == nil {
@@ -171,7 +174,7 @@ func (i *typeInterner) mktype(t ast.Expr) string {
if len(t.Methods.List) != 0 {
log.Fatal("non-empty interfaces unsupported")
}
- return "types.Types[TINTER]"
+ return "types.Types[types.TINTER]"
case *ast.MapType:
return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
case *ast.StarExpr:
@@ -204,7 +207,7 @@ func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
}
}
}
- return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", "))
+ return fmt.Sprintf("[]ir.Node{%s}", strings.Join(res, ", "))
}
func intconst(e ast.Expr) int64 {
diff --git a/src/cmd/compile/internal/gc/mpfloat.go b/src/cmd/compile/internal/gc/mpfloat.go
deleted file mode 100644
index 401aef319d..0000000000
--- a/src/cmd/compile/internal/gc/mpfloat.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "fmt"
- "math"
- "math/big"
-)
-
-// implements float arithmetic
-
-const (
- // Maximum size in bits for Mpints before signalling
- // overflow and also mantissa precision for Mpflts.
- Mpprec = 512
- // Turn on for constant arithmetic debugging output.
- Mpdebug = false
-)
-
-// Mpflt represents a floating-point constant.
-type Mpflt struct {
- Val big.Float
-}
-
-// Mpcplx represents a complex constant.
-type Mpcplx struct {
- Real Mpflt
- Imag Mpflt
-}
-
-// Use newMpflt (not new(Mpflt)!) to get the correct default precision.
-func newMpflt() *Mpflt {
- var a Mpflt
- a.Val.SetPrec(Mpprec)
- return &a
-}
-
-// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision.
-func newMpcmplx() *Mpcplx {
- var a Mpcplx
- a.Real = *newMpflt()
- a.Imag = *newMpflt()
- return &a
-}
-
-func (a *Mpflt) SetInt(b *Mpint) {
- if b.checkOverflow(0) {
- // sign doesn't really matter but copy anyway
- a.Val.SetInf(b.Val.Sign() < 0)
- return
- }
- a.Val.SetInt(&b.Val)
-}
-
-func (a *Mpflt) Set(b *Mpflt) {
- a.Val.Set(&b.Val)
-}
-
-func (a *Mpflt) Add(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("\n%v + %v", a, b)
- }
-
- a.Val.Add(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) AddFloat64(c float64) {
- var b Mpflt
-
- b.SetFloat64(c)
- a.Add(&b)
-}
-
-func (a *Mpflt) Sub(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("\n%v - %v", a, b)
- }
-
- a.Val.Sub(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) Mul(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("%v\n * %v\n", a, b)
- }
-
- a.Val.Mul(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) MulFloat64(c float64) {
- var b Mpflt
-
- b.SetFloat64(c)
- a.Mul(&b)
-}
-
-func (a *Mpflt) Quo(b *Mpflt) {
- if Mpdebug {
- fmt.Printf("%v\n / %v\n", a, b)
- }
-
- a.Val.Quo(&a.Val, &b.Val)
-
- if Mpdebug {
- fmt.Printf(" = %v\n\n", a)
- }
-}
-
-func (a *Mpflt) Cmp(b *Mpflt) int {
- return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpflt) CmpFloat64(c float64) int {
- if c == 0 {
- return a.Val.Sign() // common case shortcut
- }
- return a.Val.Cmp(big.NewFloat(c))
-}
-
-func (a *Mpflt) Float64() float64 {
- x, _ := a.Val.Float64()
-
- // check for overflow
- if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpflt Float64")
- }
-
- return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) Float32() float64 {
- x32, _ := a.Val.Float32()
- x := float64(x32)
-
- // check for overflow
- if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpflt Float32")
- }
-
- return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) SetFloat64(c float64) {
- if Mpdebug {
- fmt.Printf("\nconst %g", c)
- }
-
- // convert -0 to 0
- if c == 0 {
- c = 0
- }
- a.Val.SetFloat64(c)
-
- if Mpdebug {
- fmt.Printf(" = %v\n", a)
- }
-}
-
-func (a *Mpflt) Neg() {
- // avoid -0
- if a.Val.Sign() != 0 {
- a.Val.Neg(&a.Val)
- }
-}
-
-func (a *Mpflt) SetString(as string) {
- f, _, err := a.Val.Parse(as, 0)
- if err != nil {
- yyerror("malformed constant: %s (%v)", as, err)
- a.Val.SetFloat64(0)
- return
- }
-
- if f.IsInf() {
- yyerror("constant too large: %s", as)
- a.Val.SetFloat64(0)
- return
- }
-
- // -0 becomes 0
- if f.Sign() == 0 && f.Signbit() {
- a.Val.SetFloat64(0)
- }
-}
-
-func (f *Mpflt) String() string {
- return f.Val.Text('b', 0)
-}
-
-func (fvp *Mpflt) GoString() string {
- // determine sign
- sign := ""
- f := &fvp.Val
- if f.Sign() < 0 {
- sign = "-"
- f = new(big.Float).Abs(f)
- }
-
- // Don't try to convert infinities (will not terminate).
- if f.IsInf() {
- return sign + "Inf"
- }
-
- // Use exact fmt formatting if in float64 range (common case):
- // proceed if f doesn't underflow to 0 or overflow to inf.
- if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
- return fmt.Sprintf("%s%.6g", sign, x)
- }
-
- // Out of float64 range. Do approximate manual to decimal
- // conversion to avoid precise but possibly slow Float
- // formatting.
- // f = mant * 2**exp
- var mant big.Float
- exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
-
- // approximate float64 mantissa m and decimal exponent d
- // f ~ m * 10**d
- m, _ := mant.Float64() // 0.5 <= m < 1.0
- d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
-
- // adjust m for truncated (integer) decimal exponent e
- e := int64(d)
- m *= math.Pow(10, d-float64(e))
-
- // ensure 1 <= m < 10
- switch {
- case m < 1-0.5e-6:
- // The %.6g format below rounds m to 5 digits after the
- // decimal point. Make sure that m*10 < 10 even after
- // rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
- m *= 10
- e--
- case m >= 10:
- m /= 10
- e++
- }
-
- return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
-}
-
-// complex multiply v *= rv
-// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
-func (v *Mpcplx) Mul(rv *Mpcplx) {
- var ac, ad, bc, bd Mpflt
-
- ac.Set(&v.Real)
- ac.Mul(&rv.Real) // ac
-
- bd.Set(&v.Imag)
- bd.Mul(&rv.Imag) // bd
-
- bc.Set(&v.Imag)
- bc.Mul(&rv.Real) // bc
-
- ad.Set(&v.Real)
- ad.Mul(&rv.Imag) // ad
-
- v.Real.Set(&ac)
- v.Real.Sub(&bd) // ac-bd
-
- v.Imag.Set(&bc)
- v.Imag.Add(&ad) // bc+ad
-}
-
-// complex divide v /= rv
-// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
-func (v *Mpcplx) Div(rv *Mpcplx) bool {
- if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 {
- return false
- }
-
- var ac, ad, bc, bd, cc_plus_dd Mpflt
-
- cc_plus_dd.Set(&rv.Real)
- cc_plus_dd.Mul(&rv.Real) // cc
-
- ac.Set(&rv.Imag)
- ac.Mul(&rv.Imag) // dd
- cc_plus_dd.Add(&ac) // cc+dd
-
- // We already checked that c and d are not both zero, but we can't
- // assume that c²+d² != 0 follows, because for tiny values of c
- // and/or d c²+d² can underflow to zero. Check that c²+d² is
- // nonzero, return if it's not.
- if cc_plus_dd.CmpFloat64(0) == 0 {
- return false
- }
-
- ac.Set(&v.Real)
- ac.Mul(&rv.Real) // ac
-
- bd.Set(&v.Imag)
- bd.Mul(&rv.Imag) // bd
-
- bc.Set(&v.Imag)
- bc.Mul(&rv.Real) // bc
-
- ad.Set(&v.Real)
- ad.Mul(&rv.Imag) // ad
-
- v.Real.Set(&ac)
- v.Real.Add(&bd) // ac+bd
- v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
-
- v.Imag.Set(&bc)
- v.Imag.Sub(&ad) // bc-ad
- v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
-
- return true
-}
-
-func (v *Mpcplx) String() string {
- return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
-}
-
-func (v *Mpcplx) GoString() string {
- var re string
- sre := v.Real.CmpFloat64(0)
- if sre != 0 {
- re = v.Real.GoString()
- }
-
- var im string
- sim := v.Imag.CmpFloat64(0)
- if sim != 0 {
- im = v.Imag.GoString()
- }
-
- switch {
- case sre == 0 && sim == 0:
- return "0"
- case sre == 0:
- return im + "i"
- case sim == 0:
- return re
- case sim < 0:
- return fmt.Sprintf("(%s%si)", re, im)
- default:
- return fmt.Sprintf("(%s+%si)", re, im)
- }
-}
diff --git a/src/cmd/compile/internal/gc/mpint.go b/src/cmd/compile/internal/gc/mpint.go
deleted file mode 100644
index 340350bca7..0000000000
--- a/src/cmd/compile/internal/gc/mpint.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
- "fmt"
- "math/big"
-)
-
-// implements integer arithmetic
-
-// Mpint represents an integer constant.
-type Mpint struct {
- Val big.Int
- Ovf bool // set if Val overflowed compiler limit (sticky)
- Rune bool // set if syntax indicates default type rune
-}
-
-func (a *Mpint) SetOverflow() {
- a.Val.SetUint64(1) // avoid spurious div-zero errors
- a.Ovf = true
-}
-
-func (a *Mpint) checkOverflow(extra int) bool {
- // We don't need to be precise here, any reasonable upper limit would do.
- // For now, use existing limit so we pass all the tests unchanged.
- if a.Val.BitLen()+extra > Mpprec {
- a.SetOverflow()
- }
- return a.Ovf
-}
-
-func (a *Mpint) Set(b *Mpint) {
- a.Val.Set(&b.Val)
-}
-
-func (a *Mpint) SetFloat(b *Mpflt) bool {
- // avoid converting huge floating-point numbers to integers
- // (2*Mpprec is large enough to permit all tests to pass)
- if b.Val.MantExp(nil) > 2*Mpprec {
- a.SetOverflow()
- return false
- }
-
- if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
- return true
- }
-
- const delta = 16 // a reasonably small number of bits > 0
- var t big.Float
- t.SetPrec(Mpprec - delta)
-
- // try rounding down a little
- t.SetMode(big.ToZero)
- t.Set(&b.Val)
- if _, acc := t.Int(&a.Val); acc == big.Exact {
- return true
- }
-
- // try rounding up a little
- t.SetMode(big.AwayFromZero)
- t.Set(&b.Val)
- if _, acc := t.Int(&a.Val); acc == big.Exact {
- return true
- }
-
- a.Ovf = false
- return false
-}
-
-func (a *Mpint) Add(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Add")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Add(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- yyerror("constant addition overflow")
- }
-}
-
-func (a *Mpint) Sub(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Sub")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Sub(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- yyerror("constant subtraction overflow")
- }
-}
-
-func (a *Mpint) Mul(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Mul")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Mul(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- yyerror("constant multiplication overflow")
- }
-}
-
-func (a *Mpint) Quo(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Quo")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Quo(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- // can only happen for div-0 which should be checked elsewhere
- yyerror("constant division overflow")
- }
-}
-
-func (a *Mpint) Rem(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Rem")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Rem(&a.Val, &b.Val)
-
- if a.checkOverflow(0) {
- // should never happen
- yyerror("constant modulo overflow")
- }
-}
-
-func (a *Mpint) Or(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Or")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Or(&a.Val, &b.Val)
-}
-
-func (a *Mpint) And(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint And")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.And(&a.Val, &b.Val)
-}
-
-func (a *Mpint) AndNot(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint AndNot")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.AndNot(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Xor(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Xor")
- }
- a.SetOverflow()
- return
- }
-
- a.Val.Xor(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Lsh(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Lsh")
- }
- a.SetOverflow()
- return
- }
-
- s := b.Int64()
- if s < 0 || s >= Mpprec {
- msg := "shift count too large"
- if s < 0 {
- msg = "invalid negative shift count"
- }
- yyerror("%s: %d", msg, s)
- a.SetInt64(0)
- return
- }
-
- if a.checkOverflow(int(s)) {
- yyerror("constant shift overflow")
- return
- }
- a.Val.Lsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Rsh(b *Mpint) {
- if a.Ovf || b.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("ovf in Mpint Rsh")
- }
- a.SetOverflow()
- return
- }
-
- s := b.Int64()
- if s < 0 {
- yyerror("invalid negative shift count: %d", s)
- if a.Val.Sign() < 0 {
- a.SetInt64(-1)
- } else {
- a.SetInt64(0)
- }
- return
- }
-
- a.Val.Rsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Cmp(b *Mpint) int {
- return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpint) CmpInt64(c int64) int {
- if c == 0 {
- return a.Val.Sign() // common case shortcut
- }
- return a.Val.Cmp(big.NewInt(c))
-}
-
-func (a *Mpint) Neg() {
- a.Val.Neg(&a.Val)
-}
-
-func (a *Mpint) Int64() int64 {
- if a.Ovf {
- if nsavederrors+nerrors == 0 {
- Fatalf("constant overflow")
- }
- return 0
- }
-
- return a.Val.Int64()
-}
-
-func (a *Mpint) SetInt64(c int64) {
- a.Val.SetInt64(c)
-}
-
-func (a *Mpint) SetString(as string) {
- _, ok := a.Val.SetString(as, 0)
- if !ok {
- // The lexer checks for correct syntax of the literal
- // and reports detailed errors. Thus SetString should
- // never fail (in theory it might run out of memory,
- // but that wouldn't be reported as an error here).
- Fatalf("malformed integer constant: %s", as)
- return
- }
- if a.checkOverflow(0) {
- yyerror("constant too large: %s", as)
- }
-}
-
-func (a *Mpint) GoString() string {
- return a.Val.String()
-}
-
-func (a *Mpint) String() string {
- return fmt.Sprintf("%#x", &a.Val)
-}
diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go
index 4ed91035a5..0cbea2c461 100644
--- a/src/cmd/compile/internal/gc/noder.go
+++ b/src/cmd/compile/internal/gc/noder.go
@@ -6,6 +6,8 @@ package gc
import (
"fmt"
+ "go/constant"
+ "go/token"
"io"
"os"
"path/filepath"
@@ -15,7 +17,9 @@ import (
"unicode"
"unicode/utf8"
+ "cmd/compile/internal/base"
"cmd/compile/internal/importer"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
@@ -66,15 +70,15 @@ func parseFiles(filenames []string, allowGenerics bool) (lines uint) {
var files []*syntax.File
for _, p := range noders {
for e := range p.err {
- p.yyerrorpos(e.Pos, "%s", e.Msg)
+ p.errorAt(e.Pos, "%s", e.Msg)
}
nodersmap[p.file.Pos().RelFilename()] = p
files = append(files, p.file)
lines += p.file.EOF.Line()
- if nsyntaxerrors != 0 {
- errorexit()
+ if base.SyntaxErrors() != 0 {
+ base.ErrorExit()
}
}
@@ -91,7 +95,7 @@ func parseFiles(filenames []string, allowGenerics bool) (lines uint) {
return
}
p := nodersmap[terr.Pos.RelFilename()]
- yyerrorl(p.makeXPos(terr.Pos), "%s", terr.Msg)
+ base.ErrorfAt(p.makeXPos(terr.Pos), "%s", terr.Msg)
},
Importer: &gcimports{
packages: make(map[string]*types2.Package),
@@ -104,27 +108,27 @@ func parseFiles(filenames []string, allowGenerics bool) (lines uint) {
},
},
}
- conf.Check(Ctxt.Pkgpath, files, nil)
+ conf.Check(base.Ctxt.Pkgpath, files, nil)
return
}
for _, p := range noders {
for e := range p.err {
- p.yyerrorpos(e.Pos, "%s", e.Msg)
+ p.errorAt(e.Pos, "%s", e.Msg)
}
p.node()
lines += p.file.EOF.Line()
p.file = nil // release memory
- if nsyntaxerrors != 0 {
- errorexit()
+ if base.SyntaxErrors() != 0 {
+ base.ErrorExit()
}
// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
testdclstack()
}
- localpkg.Height = myheight
+ ir.LocalPkg.Height = myheight
return
}
@@ -178,22 +182,20 @@ func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
}
func (p *noder) makeXPos(pos syntax.Pos) (_ src.XPos) {
- return Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col()))
+ return base.Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col()))
}
-func (p *noder) yyerrorpos(pos syntax.Pos, format string, args ...interface{}) {
- yyerrorl(p.makeXPos(pos), format, args...)
+func (p *noder) errorAt(pos syntax.Pos, format string, args ...interface{}) {
+ base.ErrorfAt(p.makeXPos(pos), format, args...)
}
-var pathPrefix string
-
// TODO(gri) Can we eliminate fileh in favor of absFilename?
func fileh(name string) string {
- return objabi.AbsFile("", name, pathPrefix)
+ return objabi.AbsFile("", name, base.Flag.TrimPath)
}
func absFilename(name string) string {
- return objabi.AbsFile(Ctxt.Pathname, name, pathPrefix)
+ return objabi.AbsFile(base.Ctxt.Pathname, name, base.Flag.TrimPath)
}
// noder transforms package syntax's AST into a Node tree.
@@ -208,7 +210,7 @@ type noder struct {
linknames []linkname
pragcgobuf [][]string
err chan syntax.Error
- scope ScopeID
+ scope ir.ScopeID
importedUnsafe bool
importedEmbed bool
@@ -219,7 +221,7 @@ type noder struct {
lastCloseScopePos syntax.Pos
}
-func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) {
+func (p *noder) funcBody(fn ir.Node, block *syntax.BlockStmt) {
oldScope := p.scope
p.scope = 0
funchdr(fn)
@@ -227,12 +229,12 @@ func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) {
if block != nil {
body := p.stmts(block.List)
if body == nil {
- body = []*Node{nod(OEMPTY, nil, nil)}
+ body = []ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}
}
- fn.Nbody.Set(body)
+ fn.PtrBody().Set(body)
- lineno = p.makeXPos(block.Rbrace)
- fn.Func.Endlineno = lineno
+ base.Pos = p.makeXPos(block.Rbrace)
+ fn.Func().Endlineno = base.Pos
}
funcbody()
@@ -243,9 +245,9 @@ func (p *noder) openScope(pos syntax.Pos) {
types.Markdcl()
if trackScopes {
- Curfn.Func.Parents = append(Curfn.Func.Parents, p.scope)
- p.scopeVars = append(p.scopeVars, len(Curfn.Func.Dcl))
- p.scope = ScopeID(len(Curfn.Func.Parents))
+ Curfn.Func().Parents = append(Curfn.Func().Parents, p.scope)
+ p.scopeVars = append(p.scopeVars, len(Curfn.Func().Dcl))
+ p.scope = ir.ScopeID(len(Curfn.Func().Parents))
p.markScope(pos)
}
@@ -258,29 +260,29 @@ func (p *noder) closeScope(pos syntax.Pos) {
if trackScopes {
scopeVars := p.scopeVars[len(p.scopeVars)-1]
p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
- if scopeVars == len(Curfn.Func.Dcl) {
+ if scopeVars == len(Curfn.Func().Dcl) {
// no variables were declared in this scope, so we can retract it.
- if int(p.scope) != len(Curfn.Func.Parents) {
- Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
+ if int(p.scope) != len(Curfn.Func().Parents) {
+ base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
}
- p.scope = Curfn.Func.Parents[p.scope-1]
- Curfn.Func.Parents = Curfn.Func.Parents[:len(Curfn.Func.Parents)-1]
+ p.scope = Curfn.Func().Parents[p.scope-1]
+ Curfn.Func().Parents = Curfn.Func().Parents[:len(Curfn.Func().Parents)-1]
- nmarks := len(Curfn.Func.Marks)
- Curfn.Func.Marks[nmarks-1].Scope = p.scope
- prevScope := ScopeID(0)
+ nmarks := len(Curfn.Func().Marks)
+ Curfn.Func().Marks[nmarks-1].Scope = p.scope
+ prevScope := ir.ScopeID(0)
if nmarks >= 2 {
- prevScope = Curfn.Func.Marks[nmarks-2].Scope
+ prevScope = Curfn.Func().Marks[nmarks-2].Scope
}
- if Curfn.Func.Marks[nmarks-1].Scope == prevScope {
- Curfn.Func.Marks = Curfn.Func.Marks[:nmarks-1]
+ if Curfn.Func().Marks[nmarks-1].Scope == prevScope {
+ Curfn.Func().Marks = Curfn.Func().Marks[:nmarks-1]
}
return
}
- p.scope = Curfn.Func.Parents[p.scope-1]
+ p.scope = Curfn.Func().Parents[p.scope-1]
p.markScope(pos)
}
@@ -288,10 +290,10 @@ func (p *noder) closeScope(pos syntax.Pos) {
func (p *noder) markScope(pos syntax.Pos) {
xpos := p.makeXPos(pos)
- if i := len(Curfn.Func.Marks); i > 0 && Curfn.Func.Marks[i-1].Pos == xpos {
- Curfn.Func.Marks[i-1].Scope = p.scope
+ if i := len(Curfn.Func().Marks); i > 0 && Curfn.Func().Marks[i-1].Pos == xpos {
+ Curfn.Func().Marks[i-1].Scope = p.scope
} else {
- Curfn.Func.Marks = append(Curfn.Func.Marks, Mark{xpos, p.scope})
+ Curfn.Func().Marks = append(Curfn.Func().Marks, ir.Mark{Pos: xpos, Scope: p.scope})
}
}
@@ -319,7 +321,7 @@ func (p *noder) node() {
mkpackage(p.file.PkgName.Value)
if pragma, ok := p.file.Pragma.(*Pragma); ok {
- pragma.Flag &^= GoBuildPragma
+ pragma.Flag &^= ir.GoBuildPragma
p.checkUnused(pragma)
}
@@ -327,7 +329,7 @@ func (p *noder) node() {
for _, n := range p.linknames {
if !p.importedUnsafe {
- p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ p.errorAt(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
s := lookup(n.local)
@@ -336,10 +338,10 @@ func (p *noder) node() {
} else {
// Use the default object symbol name if the
// user didn't provide one.
- if myimportpath == "" {
- p.yyerrorpos(n.pos, "//go:linkname requires linkname argument or -p compiler flag")
+ if base.Ctxt.Pkgpath == "" {
+ p.errorAt(n.pos, "//go:linkname requires linkname argument or -p compiler flag")
} else {
- s.Linkname = objabi.PathToPrefix(myimportpath) + "." + n.local
+ s.Linkname = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + n.local
}
}
}
@@ -357,11 +359,11 @@ func (p *noder) node() {
}
pragcgobuf = append(pragcgobuf, p.pragcgobuf...)
- lineno = src.NoXPos
+ base.Pos = src.NoXPos
clearImports()
}
-func (p *noder) decls(decls []syntax.Decl) (l []*Node) {
+func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
var cs constState
for _, decl := range decls {
@@ -399,11 +401,10 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
p.checkUnused(pragma)
}
- val := p.basicLit(imp.Path)
- ipkg := importfile(&val)
+ ipkg := importfile(p.basicLit(imp.Path))
if ipkg == nil {
- if nerrors == 0 {
- Fatalf("phase error in import")
+ if base.Errors() == 0 {
+ base.Fatalf("phase error in import")
}
return
}
@@ -424,33 +425,33 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
my = lookup(ipkg.Name)
}
- pack := p.nod(imp, OPACK, nil, nil)
- pack.Sym = my
- pack.Name.Pkg = ipkg
+ pack := p.nod(imp, ir.OPACK, nil, nil)
+ pack.SetSym(my)
+ pack.Name().Pkg = ipkg
switch my.Name {
case ".":
importdot(ipkg, pack)
return
case "init":
- yyerrorl(pack.Pos, "cannot import package as init - init must be a func")
+ base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func")
return
case "_":
return
}
if my.Def != nil {
- redeclare(pack.Pos, my, "as imported package name")
+ redeclare(pack.Pos(), my, "as imported package name")
}
- my.Def = asTypesNode(pack)
- my.Lastlineno = pack.Pos
+ my.Def = pack
+ my.Lastlineno = pack.Pos()
my.Block = 1 // at top level
}
-func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
+func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
names := p.declNames(decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var exprs []*Node
+ var exprs []ir.Node
if decl.Values != nil {
exprs = p.exprList(decl.Values)
}
@@ -463,7 +464,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
// so at that point it hasn't seen the imports.
// We're left to check now, just before applying the //go:embed lines.
for _, e := range pragma.Embeds {
- p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
+ p.errorAt(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
}
} else {
exprs = varEmbed(p, names, typ, exprs, pragma.Embeds)
@@ -482,12 +483,12 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
// constant declarations are handled correctly (e.g., issue 15550).
type constState struct {
group *syntax.Group
- typ *Node
- values []*Node
+ typ ir.Node
+ values []ir.Node
iota int64
}
-func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node {
+func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
if decl.Group == nil || decl.Group != cs.group {
*cs = constState{
group: decl.Group,
@@ -501,40 +502,40 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node {
names := p.declNames(decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var values []*Node
+ var values []ir.Node
if decl.Values != nil {
values = p.exprList(decl.Values)
cs.typ, cs.values = typ, values
} else {
if typ != nil {
- yyerror("const declaration cannot have type without expression")
+ base.Errorf("const declaration cannot have type without expression")
}
typ, values = cs.typ, cs.values
}
- nn := make([]*Node, 0, len(names))
+ nn := make([]ir.Node, 0, len(names))
for i, n := range names {
if i >= len(values) {
- yyerror("missing value in const declaration")
+ base.Errorf("missing value in const declaration")
break
}
v := values[i]
if decl.Values == nil {
- v = treecopy(v, n.Pos)
+ v = treecopy(v, n.Pos())
}
- n.Op = OLITERAL
+ n.SetOp(ir.OLITERAL)
declare(n, dclcontext)
- n.Name.Param.Ntype = typ
- n.Name.Defn = v
+ n.Name().Param.Ntype = typ
+ n.Name().Defn = v
n.SetIota(cs.iota)
- nn = append(nn, p.nod(decl, ODCLCONST, n, nil))
+ nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil))
}
if len(values) > len(names) {
- yyerror("extra expression in const declaration")
+ base.Errorf("extra expression in const declaration")
}
cs.iota++
@@ -542,15 +543,15 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node {
return nn
}
-func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
+func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
n := p.declName(decl.Name)
- n.Op = OTYPE
+ n.SetOp(ir.OTYPE)
declare(n, dclcontext)
// decl.Type may be nil but in that case we got a syntax error during parsing
typ := p.typeExprOrNil(decl.Type)
- param := n.Name.Param
+ param := n.Name().Param
param.Ntype = typ
param.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
@@ -561,86 +562,86 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
p.checkUnused(pragma)
}
- nod := p.nod(decl, ODCLTYPE, n, nil)
- if param.Alias() && !langSupported(1, 9, localpkg) {
- yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
+ nod := p.nod(decl, ir.ODCLTYPE, n, nil)
+ if param.Alias() && !langSupported(1, 9, ir.LocalPkg) {
+ base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
}
return nod
}
-func (p *noder) declNames(names []*syntax.Name) []*Node {
- nodes := make([]*Node, 0, len(names))
+func (p *noder) declNames(names []*syntax.Name) []ir.Node {
+ nodes := make([]ir.Node, 0, len(names))
for _, name := range names {
nodes = append(nodes, p.declName(name))
}
return nodes
}
-func (p *noder) declName(name *syntax.Name) *Node {
+func (p *noder) declName(name *syntax.Name) ir.Node {
n := dclname(p.name(name))
- n.Pos = p.pos(name)
+ n.SetPos(p.pos(name))
return n
}
-func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
+func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
name := p.name(fun.Name)
t := p.signature(fun.Recv, fun.Type)
- f := p.nod(fun, ODCLFUNC, nil, nil)
+ f := p.nod(fun, ir.ODCLFUNC, nil, nil)
if fun.Recv == nil {
if name.Name == "init" {
name = renameinit()
- if t.List.Len() > 0 || t.Rlist.Len() > 0 {
- yyerrorl(f.Pos, "func init must have no arguments and no return values")
+ if t.List().Len() > 0 || t.Rlist().Len() > 0 {
+ base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
}
}
- if localpkg.Name == "main" && name.Name == "main" {
- if t.List.Len() > 0 || t.Rlist.Len() > 0 {
- yyerrorl(f.Pos, "func main must have no arguments and no return values")
+ if ir.LocalPkg.Name == "main" && name.Name == "main" {
+ if t.List().Len() > 0 || t.Rlist().Len() > 0 {
+ base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values")
}
}
} else {
- f.Func.Shortname = name
- name = nblank.Sym // filled in by typecheckfunc
+ f.Func().Shortname = name
+ name = ir.BlankNode.Sym() // filled in by typecheckfunc
}
- f.Func.Nname = newfuncnamel(p.pos(fun.Name), name)
- f.Func.Nname.Name.Defn = f
- f.Func.Nname.Name.Param.Ntype = t
+ f.Func().Nname = newfuncnamel(p.pos(fun.Name), name, f.Func())
+ f.Func().Nname.Name().Defn = f
+ f.Func().Nname.Name().Param.Ntype = t
if pragma, ok := fun.Pragma.(*Pragma); ok {
- f.Func.Pragma = pragma.Flag & FuncPragmas
- if pragma.Flag&Systemstack != 0 && pragma.Flag&Nosplit != 0 {
- yyerrorl(f.Pos, "go:nosplit and go:systemstack cannot be combined")
+ f.Func().Pragma = pragma.Flag & FuncPragmas
+ if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
+ base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
pragma.Flag &^= FuncPragmas
p.checkUnused(pragma)
}
if fun.Recv == nil {
- declare(f.Func.Nname, PFUNC)
+ declare(f.Func().Nname, ir.PFUNC)
}
p.funcBody(f, fun.Body)
if fun.Body != nil {
- if f.Func.Pragma&Noescape != 0 {
- yyerrorl(f.Pos, "can only use //go:noescape with external func implementations")
+ if f.Func().Pragma&ir.Noescape != 0 {
+ base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations")
}
} else {
- if pure_go || strings.HasPrefix(f.funcname(), "init.") {
+ if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") {
// Linknamed functions are allowed to have no body. Hopefully
// the linkname target has a body. See issue 23311.
isLinknamed := false
for _, n := range p.linknames {
- if f.funcname() == n.local {
+ if ir.FuncName(f) == n.local {
isLinknamed = true
break
}
}
if !isLinknamed {
- yyerrorl(f.Pos, "missing function body")
+ base.ErrorfAt(f.Pos(), "missing function body")
}
}
}
@@ -648,18 +649,18 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
return f
}
-func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node {
- n := p.nod(typ, OTFUNC, nil, nil)
+func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) ir.Node {
+ n := p.nod(typ, ir.OTFUNC, nil, nil)
if recv != nil {
- n.Left = p.param(recv, false, false)
+ n.SetLeft(p.param(recv, false, false))
}
- n.List.Set(p.params(typ.ParamList, true))
- n.Rlist.Set(p.params(typ.ResultList, false))
+ n.PtrList().Set(p.params(typ.ParamList, true))
+ n.PtrRlist().Set(p.params(typ.ResultList, false))
return n
}
-func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node {
- nodes := make([]*Node, 0, len(params))
+func (p *noder) params(params []*syntax.Field, dddOk bool) []ir.Node {
+ nodes := make([]ir.Node, 0, len(params))
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
@@ -667,56 +668,56 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node {
return nodes
}
-func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node {
+func (p *noder) param(param *syntax.Field, dddOk, final bool) ir.Node {
var name *types.Sym
if param.Name != nil {
name = p.name(param.Name)
}
typ := p.typeExpr(param.Type)
- n := p.nodSym(param, ODCLFIELD, typ, name)
+ n := p.nodSym(param, ir.ODCLFIELD, typ, name)
// rewrite ...T parameter
- if typ.Op == ODDD {
+ if typ.Op() == ir.ODDD {
if !dddOk {
// We mark these as syntax errors to get automatic elimination
- // of multiple such errors per line (see yyerrorl in subr.go).
- yyerror("syntax error: cannot use ... in receiver or result parameter list")
+ // of multiple such errors per line (see ErrorfAt in subr.go).
+ base.Errorf("syntax error: cannot use ... in receiver or result parameter list")
} else if !final {
if param.Name == nil {
- yyerror("syntax error: cannot use ... with non-final parameter")
+ base.Errorf("syntax error: cannot use ... with non-final parameter")
} else {
- p.yyerrorpos(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
+ p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
}
}
- typ.Op = OTARRAY
- typ.Right = typ.Left
- typ.Left = nil
+ typ.SetOp(ir.OTARRAY)
+ typ.SetRight(typ.Left())
+ typ.SetLeft(nil)
n.SetIsDDD(true)
- if n.Left != nil {
- n.Left.SetIsDDD(true)
+ if n.Left() != nil {
+ n.Left().SetIsDDD(true)
}
}
return n
}
-func (p *noder) exprList(expr syntax.Expr) []*Node {
+func (p *noder) exprList(expr syntax.Expr) []ir.Node {
if list, ok := expr.(*syntax.ListExpr); ok {
return p.exprs(list.ElemList)
}
- return []*Node{p.expr(expr)}
+ return []ir.Node{p.expr(expr)}
}
-func (p *noder) exprs(exprs []syntax.Expr) []*Node {
- nodes := make([]*Node, 0, len(exprs))
+func (p *noder) exprs(exprs []syntax.Expr) []ir.Node {
+ nodes := make([]ir.Node, 0, len(exprs))
for _, expr := range exprs {
nodes = append(nodes, p.expr(expr))
}
return nodes
}
-func (p *noder) expr(expr syntax.Expr) *Node {
+func (p *noder) expr(expr syntax.Expr) ir.Node {
p.setlineno(expr)
switch expr := expr.(type) {
case nil, *syntax.BadExpr:
@@ -724,47 +725,50 @@ func (p *noder) expr(expr syntax.Expr) *Node {
case *syntax.Name:
return p.mkname(expr)
case *syntax.BasicLit:
- n := nodlit(p.basicLit(expr))
+ n := ir.NewLiteral(p.basicLit(expr))
+ if expr.Kind == syntax.RuneLit {
+ n.SetType(types.UntypedRune)
+ }
n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
return n
case *syntax.CompositeLit:
- n := p.nod(expr, OCOMPLIT, nil, nil)
+ n := p.nod(expr, ir.OCOMPLIT, nil, nil)
if expr.Type != nil {
- n.Right = p.expr(expr.Type)
+ n.SetRight(p.expr(expr.Type))
}
l := p.exprs(expr.ElemList)
for i, e := range l {
l[i] = p.wrapname(expr.ElemList[i], e)
}
- n.List.Set(l)
- lineno = p.makeXPos(expr.Rbrace)
+ n.PtrList().Set(l)
+ base.Pos = p.makeXPos(expr.Rbrace)
return n
case *syntax.KeyValueExpr:
// use position of expr.Key rather than of expr (which has position of ':')
- return p.nod(expr.Key, OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
+ return p.nod(expr.Key, ir.OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
case *syntax.FuncLit:
return p.funcLit(expr)
case *syntax.ParenExpr:
- return p.nod(expr, OPAREN, p.expr(expr.X), nil)
+ return p.nod(expr, ir.OPAREN, p.expr(expr.X), nil)
case *syntax.SelectorExpr:
// parser.new_dotname
obj := p.expr(expr.X)
- if obj.Op == OPACK {
- obj.Name.SetUsed(true)
- return importName(obj.Name.Pkg.Lookup(expr.Sel.Value))
+ if obj.Op() == ir.OPACK {
+ obj.Name().SetUsed(true)
+ return importName(obj.Name().Pkg.Lookup(expr.Sel.Value))
}
- n := nodSym(OXDOT, obj, p.name(expr.Sel))
- n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X)
+ n := nodSym(ir.OXDOT, obj, p.name(expr.Sel))
+ n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
return n
case *syntax.IndexExpr:
- return p.nod(expr, OINDEX, p.expr(expr.X), p.expr(expr.Index))
+ return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index))
case *syntax.SliceExpr:
- op := OSLICE
+ op := ir.OSLICE
if expr.Full {
- op = OSLICE3
+ op = ir.OSLICE3
}
n := p.nod(expr, op, p.expr(expr.X), nil)
- var index [3]*Node
+ var index [3]ir.Node
for i, x := range &expr.Index {
if x != nil {
index[i] = p.expr(x)
@@ -773,7 +777,7 @@ func (p *noder) expr(expr syntax.Expr) *Node {
n.SetSliceBounds(index[0], index[1], index[2])
return n
case *syntax.AssertExpr:
- return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
+ return p.nod(expr, ir.ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
case *syntax.Operation:
if expr.Op == syntax.Add && expr.Y != nil {
return p.sum(expr)
@@ -784,23 +788,23 @@ func (p *noder) expr(expr syntax.Expr) *Node {
}
return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y))
case *syntax.CallExpr:
- n := p.nod(expr, OCALL, p.expr(expr.Fun), nil)
- n.List.Set(p.exprs(expr.ArgList))
+ n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil)
+ n.PtrList().Set(p.exprs(expr.ArgList))
n.SetIsDDD(expr.HasDots)
return n
case *syntax.ArrayType:
- var len *Node
+ var len ir.Node
if expr.Len != nil {
len = p.expr(expr.Len)
} else {
- len = p.nod(expr, ODDD, nil, nil)
+ len = p.nod(expr, ir.ODDD, nil, nil)
}
- return p.nod(expr, OTARRAY, len, p.typeExpr(expr.Elem))
+ return p.nod(expr, ir.OTARRAY, len, p.typeExpr(expr.Elem))
case *syntax.SliceType:
- return p.nod(expr, OTARRAY, nil, p.typeExpr(expr.Elem))
+ return p.nod(expr, ir.OTARRAY, nil, p.typeExpr(expr.Elem))
case *syntax.DotsType:
- return p.nod(expr, ODDD, p.typeExpr(expr.Elem), nil)
+ return p.nod(expr, ir.ODDD, p.typeExpr(expr.Elem), nil)
case *syntax.StructType:
return p.structType(expr)
case *syntax.InterfaceType:
@@ -808,18 +812,18 @@ func (p *noder) expr(expr syntax.Expr) *Node {
case *syntax.FuncType:
return p.signature(nil, expr)
case *syntax.MapType:
- return p.nod(expr, OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value))
+ return p.nod(expr, ir.OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value))
case *syntax.ChanType:
- n := p.nod(expr, OTCHAN, p.typeExpr(expr.Elem), nil)
+ n := p.nod(expr, ir.OTCHAN, p.typeExpr(expr.Elem), nil)
n.SetTChanDir(p.chanDir(expr.Dir))
return n
case *syntax.TypeSwitchGuard:
- n := p.nod(expr, OTYPESW, nil, p.expr(expr.X))
+ n := p.nod(expr, ir.OTYPESW, nil, p.expr(expr.X))
if expr.Lhs != nil {
- n.Left = p.declName(expr.Lhs)
- if n.Left.isBlank() {
- yyerror("invalid variable name %v in type switch", n.Left)
+ n.SetLeft(p.declName(expr.Lhs))
+ if ir.IsBlank(n.Left()) {
+ base.Errorf("invalid variable name %v in type switch", n.Left())
}
}
return n
@@ -830,7 +834,7 @@ func (p *noder) expr(expr syntax.Expr) *Node {
// sum efficiently handles very large summation expressions (such as
// in issue #16394). In particular, it avoids left recursion and
// collapses string literals.
-func (p *noder) sum(x syntax.Expr) *Node {
+func (p *noder) sum(x syntax.Expr) ir.Node {
// While we need to handle long sums with asymptotic
// efficiency, the vast majority of sums are very small: ~95%
// have only 2 or 3 operands, and ~99% of string literals are
@@ -865,11 +869,11 @@ func (p *noder) sum(x syntax.Expr) *Node {
// handle correctly. For now, we avoid these problems by
// treating named string constants the same as non-constant
// operands.
- var nstr *Node
+ var nstr ir.Node
chunks := make([]string, 0, 1)
n := p.expr(x)
- if Isconst(n, CTSTR) && n.Sym == nil {
+ if ir.IsConst(n, constant.String) && n.Sym() == nil {
nstr = n
chunks = append(chunks, nstr.StringVal())
}
@@ -878,7 +882,7 @@ func (p *noder) sum(x syntax.Expr) *Node {
add := adds[i]
r := p.expr(add.Y)
- if Isconst(r, CTSTR) && r.Sym == nil {
+ if ir.IsConst(r, constant.String) && r.Sym() == nil {
if nstr != nil {
// Collapse r into nstr instead of adding to n.
chunks = append(chunks, r.StringVal())
@@ -889,26 +893,26 @@ func (p *noder) sum(x syntax.Expr) *Node {
chunks = append(chunks, nstr.StringVal())
} else {
if len(chunks) > 1 {
- nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
}
nstr = nil
chunks = chunks[:0]
}
- n = p.nod(add, OADD, n, r)
+ n = p.nod(add, ir.OADD, n, r)
}
if len(chunks) > 1 {
- nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
}
return n
}
-func (p *noder) typeExpr(typ syntax.Expr) *Node {
+func (p *noder) typeExpr(typ syntax.Expr) ir.Node {
// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
return p.expr(typ)
}
-func (p *noder) typeExprOrNil(typ syntax.Expr) *Node {
+func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Node {
if typ != nil {
return p.expr(typ)
}
@@ -927,15 +931,15 @@ func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
panic("unhandled ChanDir")
}
-func (p *noder) structType(expr *syntax.StructType) *Node {
- l := make([]*Node, 0, len(expr.FieldList))
+func (p *noder) structType(expr *syntax.StructType) ir.Node {
+ l := make([]ir.Node, 0, len(expr.FieldList))
for i, field := range expr.FieldList {
p.setlineno(field)
- var n *Node
+ var n ir.Node
if field.Name == nil {
n = p.embedded(field.Type)
} else {
- n = p.nodSym(field, ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name))
+ n = p.nodSym(field, ir.ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name))
}
if i < len(expr.TagList) && expr.TagList[i] != nil {
n.SetVal(p.basicLit(expr.TagList[i]))
@@ -944,30 +948,30 @@ func (p *noder) structType(expr *syntax.StructType) *Node {
}
p.setlineno(expr)
- n := p.nod(expr, OTSTRUCT, nil, nil)
- n.List.Set(l)
+ n := p.nod(expr, ir.OTSTRUCT, nil, nil)
+ n.PtrList().Set(l)
return n
}
-func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
- l := make([]*Node, 0, len(expr.MethodList))
+func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node {
+ l := make([]ir.Node, 0, len(expr.MethodList))
for _, method := range expr.MethodList {
p.setlineno(method)
- var n *Node
+ var n ir.Node
if method.Name == nil {
- n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil)
+ n = p.nodSym(method, ir.ODCLFIELD, importName(p.packname(method.Type)), nil)
} else {
mname := p.name(method.Name)
sig := p.typeExpr(method.Type)
- sig.Left = fakeRecv()
- n = p.nodSym(method, ODCLFIELD, sig, mname)
+ sig.SetLeft(fakeRecv())
+ n = p.nodSym(method, ir.ODCLFIELD, sig, mname)
ifacedcl(n)
}
l = append(l, n)
}
- n := p.nod(expr, OTINTER, nil, nil)
- n.List.Set(l)
+ n := p.nod(expr, ir.OTINTER, nil, nil)
+ n.PtrList().Set(l)
return n
}
@@ -975,31 +979,31 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym {
switch expr := expr.(type) {
case *syntax.Name:
name := p.name(expr)
- if n := oldname(name); n.Name != nil && n.Name.Pack != nil {
- n.Name.Pack.Name.SetUsed(true)
+ if n := oldname(name); n.Name() != nil && n.Name().Pack != nil {
+ n.Name().Pack.Name().SetUsed(true)
}
return name
case *syntax.SelectorExpr:
name := p.name(expr.X.(*syntax.Name))
- def := asNode(name.Def)
+ def := ir.AsNode(name.Def)
if def == nil {
- yyerror("undefined: %v", name)
+ base.Errorf("undefined: %v", name)
return name
}
var pkg *types.Pkg
- if def.Op != OPACK {
- yyerror("%v is not a package", name)
- pkg = localpkg
+ if def.Op() != ir.OPACK {
+ base.Errorf("%v is not a package", name)
+ pkg = ir.LocalPkg
} else {
- def.Name.SetUsed(true)
- pkg = def.Name.Pkg
+ def.Name().SetUsed(true)
+ pkg = def.Name().Pkg
}
return pkg.Lookup(expr.Sel.Value)
}
panic(fmt.Sprintf("unexpected packname: %#v", expr))
}
-func (p *noder) embedded(typ syntax.Expr) *Node {
+func (p *noder) embedded(typ syntax.Expr) ir.Node {
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
@@ -1009,26 +1013,26 @@ func (p *noder) embedded(typ syntax.Expr) *Node {
}
sym := p.packname(typ)
- n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name))
+ n := p.nodSym(typ, ir.ODCLFIELD, importName(sym), lookup(sym.Name))
n.SetEmbedded(true)
if isStar {
- n.Left = p.nod(op, ODEREF, n.Left, nil)
+ n.SetLeft(p.nod(op, ir.ODEREF, n.Left(), nil))
}
return n
}
-func (p *noder) stmts(stmts []syntax.Stmt) []*Node {
+func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node {
return p.stmtsFall(stmts, false)
}
-func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node {
- var nodes []*Node
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
+ var nodes []ir.Node
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
- } else if s.Op == OBLOCK && s.Ninit.Len() == 0 {
- nodes = append(nodes, s.List.Slice()...)
+ } else if s.Op() == ir.OBLOCK && s.Init().Len() == 0 {
+ nodes = append(nodes, s.List().Slice()...)
} else {
nodes = append(nodes, s)
}
@@ -1036,11 +1040,11 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node {
return nodes
}
-func (p *noder) stmt(stmt syntax.Stmt) *Node {
+func (p *noder) stmt(stmt syntax.Stmt) ir.Node {
return p.stmtFall(stmt, false)
}
-func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node {
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
p.setlineno(stmt)
switch stmt := stmt.(type) {
case *syntax.EmptyStmt:
@@ -1051,89 +1055,89 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node {
l := p.blockStmt(stmt)
if len(l) == 0 {
// TODO(mdempsky): Line number?
- return nod(OEMPTY, nil, nil)
+ return ir.Nod(ir.OEMPTY, nil, nil)
}
return liststmt(l)
case *syntax.ExprStmt:
return p.wrapname(stmt, p.expr(stmt.X))
case *syntax.SendStmt:
- return p.nod(stmt, OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
+ return p.nod(stmt, ir.OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
case *syntax.DeclStmt:
return liststmt(p.decls(stmt.DeclList))
case *syntax.AssignStmt:
if stmt.Op != 0 && stmt.Op != syntax.Def {
- n := p.nod(stmt, OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
+ n := p.nod(stmt, ir.OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
n.SetSubOp(p.binOp(stmt.Op))
return n
}
- n := p.nod(stmt, OAS, nil, nil) // assume common case
+ n := p.nod(stmt, ir.OAS, nil, nil) // assume common case
rhs := p.exprList(stmt.Rhs)
lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)
if len(lhs) == 1 && len(rhs) == 1 {
// common case
- n.Left = lhs[0]
- n.Right = rhs[0]
+ n.SetLeft(lhs[0])
+ n.SetRight(rhs[0])
} else {
- n.Op = OAS2
- n.List.Set(lhs)
- n.Rlist.Set(rhs)
+ n.SetOp(ir.OAS2)
+ n.PtrList().Set(lhs)
+ n.PtrRlist().Set(rhs)
}
return n
case *syntax.BranchStmt:
- var op Op
+ var op ir.Op
switch stmt.Tok {
case syntax.Break:
- op = OBREAK
+ op = ir.OBREAK
case syntax.Continue:
- op = OCONTINUE
+ op = ir.OCONTINUE
case syntax.Fallthrough:
if !fallOK {
- yyerror("fallthrough statement out of place")
+ base.Errorf("fallthrough statement out of place")
}
- op = OFALL
+ op = ir.OFALL
case syntax.Goto:
- op = OGOTO
+ op = ir.OGOTO
default:
panic("unhandled BranchStmt")
}
n := p.nod(stmt, op, nil, nil)
if stmt.Label != nil {
- n.Sym = p.name(stmt.Label)
+ n.SetSym(p.name(stmt.Label))
}
return n
case *syntax.CallStmt:
- var op Op
+ var op ir.Op
switch stmt.Tok {
case syntax.Defer:
- op = ODEFER
+ op = ir.ODEFER
case syntax.Go:
- op = OGO
+ op = ir.OGO
default:
panic("unhandled CallStmt")
}
return p.nod(stmt, op, p.expr(stmt.Call), nil)
case *syntax.ReturnStmt:
- var results []*Node
+ var results []ir.Node
if stmt.Results != nil {
results = p.exprList(stmt.Results)
}
- n := p.nod(stmt, ORETURN, nil, nil)
- n.List.Set(results)
- if n.List.Len() == 0 && Curfn != nil {
- for _, ln := range Curfn.Func.Dcl {
- if ln.Class() == PPARAM {
+ n := p.nod(stmt, ir.ORETURN, nil, nil)
+ n.PtrList().Set(results)
+ if n.List().Len() == 0 && Curfn != nil {
+ for _, ln := range Curfn.Func().Dcl {
+ if ln.Class() == ir.PPARAM {
continue
}
- if ln.Class() != PPARAMOUT {
+ if ln.Class() != ir.PPARAMOUT {
break
}
- if asNode(ln.Sym.Def) != ln {
- yyerror("%s is shadowed during return", ln.Sym.Name)
+ if ir.AsNode(ln.Sym().Def) != ln {
+ base.Errorf("%s is shadowed during return", ln.Sym().Name)
}
}
}
@@ -1150,7 +1154,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node {
panic("unhandled Stmt")
}
-func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
+func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node {
if !colas {
return p.exprList(expr)
}
@@ -1164,17 +1168,17 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
exprs = []syntax.Expr{expr}
}
- res := make([]*Node, len(exprs))
+ res := make([]ir.Node, len(exprs))
seen := make(map[*types.Sym]bool, len(exprs))
newOrErr := false
for i, expr := range exprs {
p.setlineno(expr)
- res[i] = nblank
+ res[i] = ir.BlankNode
name, ok := expr.(*syntax.Name)
if !ok {
- p.yyerrorpos(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
+ p.errorAt(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
newOrErr = true
continue
}
@@ -1185,7 +1189,7 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
}
if seen[sym] {
- p.yyerrorpos(expr.Pos(), "%v repeated on left side of :=", sym)
+ p.errorAt(expr.Pos(), "%v repeated on left side of :=", sym)
newOrErr = true
continue
}
@@ -1197,99 +1201,99 @@ func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
}
newOrErr = true
- n := newname(sym)
+ n := NewName(sym)
declare(n, dclcontext)
- n.Name.Defn = defn
- defn.Ninit.Append(nod(ODCL, n, nil))
+ n.Name().Defn = defn
+ defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
res[i] = n
}
if !newOrErr {
- yyerrorl(defn.Pos, "no new variables on left side of :=")
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
}
return res
}
-func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*Node {
+func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
p.openScope(stmt.Pos())
nodes := p.stmts(stmt.List)
p.closeScope(stmt.Rbrace)
return nodes
}
-func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node {
+func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
p.openScope(stmt.Pos())
- n := p.nod(stmt, OIF, nil, nil)
+ n := p.nod(stmt, ir.OIF, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Cond != nil {
- n.Left = p.expr(stmt.Cond)
+ n.SetLeft(p.expr(stmt.Cond))
}
- n.Nbody.Set(p.blockStmt(stmt.Then))
+ n.PtrBody().Set(p.blockStmt(stmt.Then))
if stmt.Else != nil {
e := p.stmt(stmt.Else)
- if e.Op == OBLOCK && e.Ninit.Len() == 0 {
- n.Rlist.Set(e.List.Slice())
+ if e.Op() == ir.OBLOCK && e.Init().Len() == 0 {
+ n.PtrRlist().Set(e.List().Slice())
} else {
- n.Rlist.Set1(e)
+ n.PtrRlist().Set1(e)
}
}
p.closeAnotherScope()
return n
}
-func (p *noder) forStmt(stmt *syntax.ForStmt) *Node {
+func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
p.openScope(stmt.Pos())
- var n *Node
+ var n ir.Node
if r, ok := stmt.Init.(*syntax.RangeClause); ok {
if stmt.Cond != nil || stmt.Post != nil {
panic("unexpected RangeClause")
}
- n = p.nod(r, ORANGE, nil, p.expr(r.X))
+ n = p.nod(r, ir.ORANGE, nil, p.expr(r.X))
if r.Lhs != nil {
- n.List.Set(p.assignList(r.Lhs, n, r.Def))
+ n.PtrList().Set(p.assignList(r.Lhs, n, r.Def))
}
} else {
- n = p.nod(stmt, OFOR, nil, nil)
+ n = p.nod(stmt, ir.OFOR, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Cond != nil {
- n.Left = p.expr(stmt.Cond)
+ n.SetLeft(p.expr(stmt.Cond))
}
if stmt.Post != nil {
- n.Right = p.stmt(stmt.Post)
+ n.SetRight(p.stmt(stmt.Post))
}
}
- n.Nbody.Set(p.blockStmt(stmt.Body))
+ n.PtrBody().Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
-func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node {
+func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
p.openScope(stmt.Pos())
- n := p.nod(stmt, OSWITCH, nil, nil)
+ n := p.nod(stmt, ir.OSWITCH, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Tag != nil {
- n.Left = p.expr(stmt.Tag)
+ n.SetLeft(p.expr(stmt.Tag))
}
- tswitch := n.Left
- if tswitch != nil && tswitch.Op != OTYPESW {
+ tswitch := n.Left()
+ if tswitch != nil && tswitch.Op() != ir.OTYPESW {
tswitch = nil
}
- n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
+ n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
p.closeScope(stmt.Rbrace)
return n
}
-func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace syntax.Pos) []*Node {
- nodes := make([]*Node, 0, len(clauses))
+func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch ir.Node, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
@@ -1297,16 +1301,16 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace
}
p.openScope(clause.Pos())
- n := p.nod(clause, OCASE, nil, nil)
+ n := p.nod(clause, ir.OCASE, nil, nil)
if clause.Cases != nil {
- n.List.Set(p.exprList(clause.Cases))
+ n.PtrList().Set(p.exprList(clause.Cases))
}
- if tswitch != nil && tswitch.Left != nil {
- nn := newname(tswitch.Left.Sym)
+ if tswitch != nil && tswitch.Left() != nil {
+ nn := NewName(tswitch.Left().Sym())
declare(nn, dclcontext)
- n.Rlist.Set1(nn)
+ n.PtrRlist().Set1(nn)
// keep track of the instances for reporting unused
- nn.Name.Defn = tswitch
+ nn.Name().Defn = tswitch
}
// Trim trailing empty statements. We omit them from
@@ -1320,13 +1324,13 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace
body = body[:len(body)-1]
}
- n.Nbody.Set(p.stmtsFall(body, true))
- if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL {
+ n.PtrBody().Set(p.stmtsFall(body, true))
+ if l := n.Body().Len(); l > 0 && n.Body().Index(l-1).Op() == ir.OFALL {
if tswitch != nil {
- yyerror("cannot fallthrough in type switch")
+ base.Errorf("cannot fallthrough in type switch")
}
if i+1 == len(clauses) {
- yyerror("cannot fallthrough final case in switch")
+ base.Errorf("cannot fallthrough final case in switch")
}
}
@@ -1338,14 +1342,14 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace
return nodes
}
-func (p *noder) selectStmt(stmt *syntax.SelectStmt) *Node {
- n := p.nod(stmt, OSELECT, nil, nil)
- n.List.Set(p.commClauses(stmt.Body, stmt.Rbrace))
+func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
+ n := p.nod(stmt, ir.OSELECT, nil, nil)
+ n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace))
return n
}
-func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*Node {
- nodes := make([]*Node, 0, len(clauses))
+func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
@@ -1353,11 +1357,11 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*
}
p.openScope(clause.Pos())
- n := p.nod(clause, OCASE, nil, nil)
+ n := p.nod(clause, ir.OCASE, nil, nil)
if clause.Comm != nil {
- n.List.Set1(p.stmt(clause.Comm))
+ n.PtrList().Set1(p.stmt(clause.Comm))
}
- n.Nbody.Set(p.stmts(clause.Body))
+ n.PtrBody().Set(p.stmts(clause.Body))
nodes = append(nodes, n)
}
if len(clauses) > 0 {
@@ -1366,19 +1370,19 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*
return nodes
}
-func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node {
- lhs := p.nodSym(label, OLABEL, nil, p.name(label.Label))
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
+ lhs := p.nodSym(label, ir.OLABEL, nil, p.name(label.Label))
- var ls *Node
+ var ls ir.Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
ls = p.stmtFall(label.Stmt, fallOK)
}
- lhs.Name.Defn = ls
- l := []*Node{lhs}
+ lhs.Name().Defn = ls
+ l := []ir.Node{lhs}
if ls != nil {
- if ls.Op == OBLOCK && ls.Ninit.Len() == 0 {
- l = append(l, ls.List.Slice()...)
+ if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 {
+ l = append(l, ls.List().Slice()...)
} else {
l = append(l, ls)
}
@@ -1386,50 +1390,50 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node {
return liststmt(l)
}
-var unOps = [...]Op{
- syntax.Recv: ORECV,
- syntax.Mul: ODEREF,
- syntax.And: OADDR,
+var unOps = [...]ir.Op{
+ syntax.Recv: ir.ORECV,
+ syntax.Mul: ir.ODEREF,
+ syntax.And: ir.OADDR,
- syntax.Not: ONOT,
- syntax.Xor: OBITNOT,
- syntax.Add: OPLUS,
- syntax.Sub: ONEG,
+ syntax.Not: ir.ONOT,
+ syntax.Xor: ir.OBITNOT,
+ syntax.Add: ir.OPLUS,
+ syntax.Sub: ir.ONEG,
}
-func (p *noder) unOp(op syntax.Operator) Op {
+func (p *noder) unOp(op syntax.Operator) ir.Op {
if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
panic("invalid Operator")
}
return unOps[op]
}
-var binOps = [...]Op{
- syntax.OrOr: OOROR,
- syntax.AndAnd: OANDAND,
+var binOps = [...]ir.Op{
+ syntax.OrOr: ir.OOROR,
+ syntax.AndAnd: ir.OANDAND,
- syntax.Eql: OEQ,
- syntax.Neq: ONE,
- syntax.Lss: OLT,
- syntax.Leq: OLE,
- syntax.Gtr: OGT,
- syntax.Geq: OGE,
+ syntax.Eql: ir.OEQ,
+ syntax.Neq: ir.ONE,
+ syntax.Lss: ir.OLT,
+ syntax.Leq: ir.OLE,
+ syntax.Gtr: ir.OGT,
+ syntax.Geq: ir.OGE,
- syntax.Add: OADD,
- syntax.Sub: OSUB,
- syntax.Or: OOR,
- syntax.Xor: OXOR,
+ syntax.Add: ir.OADD,
+ syntax.Sub: ir.OSUB,
+ syntax.Or: ir.OOR,
+ syntax.Xor: ir.OXOR,
- syntax.Mul: OMUL,
- syntax.Div: ODIV,
- syntax.Rem: OMOD,
- syntax.And: OAND,
- syntax.AndNot: OANDNOT,
- syntax.Shl: OLSH,
- syntax.Shr: ORSH,
+ syntax.Mul: ir.OMUL,
+ syntax.Div: ir.ODIV,
+ syntax.Rem: ir.OMOD,
+ syntax.And: ir.OAND,
+ syntax.AndNot: ir.OANDNOT,
+ syntax.Shl: ir.OLSH,
+ syntax.Shr: ir.ORSH,
}
-func (p *noder) binOp(op syntax.Operator) Op {
+func (p *noder) binOp(op syntax.Operator) ir.Op {
if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
panic("invalid Operator")
}
@@ -1440,130 +1444,108 @@ func (p *noder) binOp(op syntax.Operator) Op {
// literal is not compatible with the current language version.
func checkLangCompat(lit *syntax.BasicLit) {
s := lit.Value
- if len(s) <= 2 || langSupported(1, 13, localpkg) {
+ if len(s) <= 2 || langSupported(1, 13, ir.LocalPkg) {
return
}
// len(s) > 2
if strings.Contains(s, "_") {
- yyerrorv("go1.13", "underscores in numeric literals")
+ base.ErrorfVers("go1.13", "underscores in numeric literals")
return
}
if s[0] != '0' {
return
}
- base := s[1]
- if base == 'b' || base == 'B' {
- yyerrorv("go1.13", "binary literals")
+ radix := s[1]
+ if radix == 'b' || radix == 'B' {
+ base.ErrorfVers("go1.13", "binary literals")
return
}
- if base == 'o' || base == 'O' {
- yyerrorv("go1.13", "0o/0O-style octal literals")
+ if radix == 'o' || radix == 'O' {
+ base.ErrorfVers("go1.13", "0o/0O-style octal literals")
return
}
- if lit.Kind != syntax.IntLit && (base == 'x' || base == 'X') {
- yyerrorv("go1.13", "hexadecimal floating-point literals")
+ if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
+ base.ErrorfVers("go1.13", "hexadecimal floating-point literals")
}
}
-func (p *noder) basicLit(lit *syntax.BasicLit) Val {
+func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value {
// We don't use the errors of the conversion routines to determine
// if a literal string is valid because the conversion routines may
// accept a wider syntax than the language permits. Rely on lit.Bad
// instead.
- switch s := lit.Value; lit.Kind {
- case syntax.IntLit:
- checkLangCompat(lit)
- x := new(Mpint)
- if !lit.Bad {
- x.SetString(s)
- }
- return Val{U: x}
-
- case syntax.FloatLit:
- checkLangCompat(lit)
- x := newMpflt()
- if !lit.Bad {
- x.SetString(s)
- }
- return Val{U: x}
+ if lit.Bad {
+ return constant.MakeUnknown()
+ }
- case syntax.ImagLit:
+ switch lit.Kind {
+ case syntax.IntLit, syntax.FloatLit, syntax.ImagLit:
checkLangCompat(lit)
- x := newMpcmplx()
- if !lit.Bad {
- x.Imag.SetString(strings.TrimSuffix(s, "i"))
- }
- return Val{U: x}
-
- case syntax.RuneLit:
- x := new(Mpint)
- x.Rune = true
- if !lit.Bad {
- u, _ := strconv.Unquote(s)
- var r rune
- if len(u) == 1 {
- r = rune(u[0])
- } else {
- r, _ = utf8.DecodeRuneInString(u)
- }
- x.SetInt64(int64(r))
- }
- return Val{U: x}
+ }
- case syntax.StringLit:
- var x string
- if !lit.Bad {
- if len(s) > 0 && s[0] == '`' {
- // strip carriage returns from raw string
- s = strings.Replace(s, "\r", "", -1)
- }
- x, _ = strconv.Unquote(s)
- }
- return Val{U: x}
+ v := constant.MakeFromLiteral(lit.Value, tokenForLitKind[lit.Kind], 0)
+ if v.Kind() == constant.Unknown {
+ // TODO(mdempsky): Better error message?
+ p.errorAt(lit.Pos(), "malformed constant: %s", lit.Value)
+ }
- default:
- panic("unhandled BasicLit kind")
+ // go/constant uses big.Rat by default, which is more precise, but
+ // causes toolstash -cmp and some tests to fail. For now, convert
+ // to big.Float to match cmd/compile's historical precision.
+ // TODO(mdempsky): Remove.
+ if v.Kind() == constant.Float {
+ v = constant.Make(bigFloatVal(v))
}
+
+ return v
+}
+
+var tokenForLitKind = [...]token.Token{
+ syntax.IntLit: token.INT,
+ syntax.RuneLit: token.CHAR,
+ syntax.FloatLit: token.FLOAT,
+ syntax.ImagLit: token.IMAG,
+ syntax.StringLit: token.STRING,
}
func (p *noder) name(name *syntax.Name) *types.Sym {
return lookup(name.Value)
}
-func (p *noder) mkname(name *syntax.Name) *Node {
+func (p *noder) mkname(name *syntax.Name) ir.Node {
// TODO(mdempsky): Set line number?
return mkname(p.name(name))
}
-func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
+func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
- switch x.Op {
- case OTYPE, OLITERAL:
- if x.Sym == nil {
+ switch x.Op() {
+ case ir.OTYPE, ir.OLITERAL:
+ if x.Sym() == nil {
break
}
fallthrough
- case ONAME, ONONAME, OPACK:
- x = p.nod(n, OPAREN, x, nil)
+ case ir.ONAME, ir.ONONAME, ir.OPACK:
+ x = p.nod(n, ir.OPAREN, x, nil)
x.SetImplicit(true)
}
return x
}
-func (p *noder) nod(orig syntax.Node, op Op, left, right *Node) *Node {
- return nodl(p.pos(orig), op, left, right)
+func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node {
+ return ir.NodAt(p.pos(orig), op, left, right)
}
-func (p *noder) nodSym(orig syntax.Node, op Op, left *Node, sym *types.Sym) *Node {
+func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
n := nodSym(op, left, sym)
- n.Pos = p.pos(orig)
+ n.SetPos(p.pos(orig))
return n
}
func (p *noder) pos(n syntax.Node) src.XPos {
// TODO(gri): orig.Pos() should always be known - fix package syntax
- xpos := lineno
+ xpos := base.Pos
if pos := n.Pos(); pos.IsKnown() {
xpos = p.makeXPos(pos)
}
@@ -1572,7 +1554,7 @@ func (p *noder) pos(n syntax.Node) src.XPos {
func (p *noder) setlineno(n syntax.Node) {
if n != nil {
- lineno = p.pos(n)
+ base.Pos = p.pos(n)
}
}
@@ -1596,13 +1578,13 @@ var allowedStdPragmas = map[string]bool{
// *Pragma is the value stored in a syntax.Pragma during parsing.
type Pragma struct {
- Flag PragmaFlag // collected bits
- Pos []PragmaPos // position of each individual flag
+ Flag ir.PragmaFlag // collected bits
+ Pos []PragmaPos // position of each individual flag
Embeds []PragmaEmbed
}
type PragmaPos struct {
- Flag PragmaFlag
+ Flag ir.PragmaFlag
Pos syntax.Pos
}
@@ -1614,12 +1596,12 @@ type PragmaEmbed struct {
func (p *noder) checkUnused(pragma *Pragma) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
- p.yyerrorpos(pos.Pos, "misplaced compiler directive")
+ p.errorAt(pos.Pos, "misplaced compiler directive")
}
}
if len(pragma.Embeds) > 0 {
for _, e := range pragma.Embeds {
- p.yyerrorpos(e.Pos, "misplaced go:embed directive")
+ p.errorAt(e.Pos, "misplaced go:embed directive")
}
}
}
@@ -1708,7 +1690,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
// For security, we disallow //go:cgo_* directives other
// than cgo_import_dynamic outside cgo-generated files.
// Exception: they are allowed in the standard library, for runtime and syscall.
- if !isCgoGeneratedFile(pos) && !compiling_std {
+ if !isCgoGeneratedFile(pos) && !base.Flag.Std {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)})
}
p.pragcgo(pos, text)
@@ -1719,11 +1701,11 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
verb = verb[:i]
}
flag := pragmaFlag(verb)
- const runtimePragmas = Systemstack | Nowritebarrier | Nowritebarrierrec | Yeswritebarrierrec
- if !compiling_runtime && flag&runtimePragmas != 0 {
+ const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec
+ if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
}
- if flag == 0 && !allowedStdPragmas[verb] && compiling_std {
+ if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
}
pragma.Flag |= flag
@@ -1755,10 +1737,10 @@ func safeArg(name string) bool {
return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
}
-func mkname(sym *types.Sym) *Node {
+func mkname(sym *types.Sym) ir.Node {
n := oldname(sym)
- if n.Name != nil && n.Name.Pack != nil {
- n.Name.Pack.Name.SetUsed(true)
+ if n.Name() != nil && n.Name().Pack != nil {
+ n.Name().Pack.Name().SetUsed(true)
}
return n
}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 32aa7c5bb1..d566959d9e 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/obj"
@@ -13,6 +15,7 @@ import (
"crypto/sha256"
"encoding/json"
"fmt"
+ "go/constant"
"io"
"io/ioutil"
"os"
@@ -46,20 +49,20 @@ const (
)
func dumpobj() {
- if linkobj == "" {
- dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
+ if base.Flag.LinkObj == "" {
+ dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
return
}
- dumpobj1(outfile, modeCompilerObj)
- dumpobj1(linkobj, modeLinkerObj)
+ dumpobj1(base.Flag.LowerO, modeCompilerObj)
+ dumpobj1(base.Flag.LinkObj, modeLinkerObj)
}
func dumpobj1(outfile string, mode int) {
bout, err := bio.Create(outfile)
if err != nil {
- flusherrors()
+ base.FlushErrors()
fmt.Printf("can't create %s: %v\n", outfile, err)
- errorexit()
+ base.ErrorExit()
}
defer bout.Close()
bout.WriteString("!<arch>\n")
@@ -78,10 +81,10 @@ func dumpobj1(outfile string, mode int) {
func printObjHeader(bout *bio.Writer) {
fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
- if buildid != "" {
- fmt.Fprintf(bout, "build id %q\n", buildid)
+ if base.Flag.BuildID != "" {
+ fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
}
- if localpkg.Name == "main" {
+ if ir.LocalPkg.Name == "main" {
fmt.Fprintf(bout, "main\n")
}
fmt.Fprintf(bout, "\n") // header ends with blank line
@@ -139,7 +142,7 @@ func dumpdata() {
for {
for i := xtops; i < len(xtop); i++ {
n := xtop[i]
- if n.Op == ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
funccompile(n)
}
}
@@ -168,13 +171,13 @@ func dumpdata() {
addGCLocals()
if exportlistLen != len(exportlist) {
- Fatalf("exportlist changed after compile functions loop")
+ base.Fatalf("exportlist changed after compile functions loop")
}
if ptabsLen != len(ptabs) {
- Fatalf("ptabs changed after compile functions loop")
+ base.Fatalf("ptabs changed after compile functions loop")
}
if itabsLen != len(itabs) {
- Fatalf("itabs changed after compile functions loop")
+ base.Fatalf("itabs changed after compile functions loop")
}
}
@@ -186,27 +189,27 @@ func dumpLinkerObj(bout *bio.Writer) {
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
fmt.Fprintf(bout, "\n$$ // cgo\n")
if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
- Fatalf("serializing pragcgobuf: %v", err)
+ base.Fatalf("serializing pragcgobuf: %v", err)
}
fmt.Fprintf(bout, "\n$$\n\n")
}
fmt.Fprintf(bout, "\n!\n")
- obj.WriteObjFile(Ctxt, bout)
+ obj.WriteObjFile(base.Ctxt, bout)
}
func addptabs() {
- if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
+ if !base.Ctxt.Flag_dynlink || ir.LocalPkg.Name != "main" {
return
}
for _, exportn := range exportlist {
- s := exportn.Sym
- n := asNode(s.Def)
+ s := exportn.Sym()
+ n := ir.AsNode(s.Def)
if n == nil {
continue
}
- if n.Op != ONAME {
+ if n.Op() != ir.ONAME {
continue
}
if !types.IsExported(s.Name) {
@@ -215,76 +218,61 @@ func addptabs() {
if s.Pkg.Name != "main" {
continue
}
- if n.Type.Etype == TFUNC && n.Class() == PFUNC {
+ if n.Type().Etype == types.TFUNC && n.Class() == ir.PFUNC {
// function
- ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
+ ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type()})
} else {
// variable
- ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
+ ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type())})
}
}
}
-func dumpGlobal(n *Node) {
- if n.Type == nil {
- Fatalf("external %v nil type\n", n)
+func dumpGlobal(n ir.Node) {
+ if n.Type() == nil {
+ base.Fatalf("external %v nil type\n", n)
}
- if n.Class() == PFUNC {
+ if n.Class() == ir.PFUNC {
return
}
- if n.Sym.Pkg != localpkg {
+ if n.Sym().Pkg != ir.LocalPkg {
return
}
- dowidth(n.Type)
+ dowidth(n.Type())
ggloblnod(n)
}
-func dumpGlobalConst(n *Node) {
+func dumpGlobalConst(n ir.Node) {
// only export typed constants
- t := n.Type
+ t := n.Type()
if t == nil {
return
}
- if n.Sym.Pkg != localpkg {
+ if n.Sym().Pkg != ir.LocalPkg {
return
}
// only export integer constants for now
- switch t.Etype {
- case TINT8:
- case TINT16:
- case TINT32:
- case TINT64:
- case TINT:
- case TUINT8:
- case TUINT16:
- case TUINT32:
- case TUINT64:
- case TUINT:
- case TUINTPTR:
- // ok
- case TIDEAL:
- if !Isconst(n, CTINT) {
- return
- }
- x := n.Val().U.(*Mpint)
- if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
+ if !t.IsInteger() {
+ return
+ }
+ v := n.Val()
+ if t.IsUntyped() {
+ // Export untyped integers as int (if they fit).
+ t = types.Types[types.TINT]
+ if doesoverflow(v, t) {
return
}
- // Ideal integers we export as int (if they fit).
- t = types.Types[TINT]
- default:
- return
}
- Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
+ base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.Int64Val(t, v))
}
func dumpglobls() {
// add globals
for _, n := range externdcl {
- switch n.Op {
- case ONAME:
+ switch n.Op() {
+ case ir.ONAME:
dumpGlobal(n)
- case OLITERAL:
+ case ir.OLITERAL:
dumpGlobalConst(n)
}
}
@@ -307,7 +295,7 @@ func dumpglobls() {
// This is done during the sequential phase after compilation, since
// global symbols can't be declared during parallel compilation.
func addGCLocals() {
- for _, s := range Ctxt.Text {
+ for _, s := range base.Ctxt.Text {
fn := s.Func()
if fn == nil {
continue
@@ -330,9 +318,9 @@ func addGCLocals() {
func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
- Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+ base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
}
- s.WriteInt(Ctxt, int64(off), wid, int64(v))
+ s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
return off + wid
}
@@ -383,7 +371,7 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
symname = strconv.Quote(s)
}
- symdata := Ctxt.Lookup(stringSymPrefix + symname)
+ symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
@@ -426,7 +414,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
if readonly {
sym = stringsym(pos, string(data))
} else {
- sym = slicedata(pos, string(data)).Sym.Linksym()
+ sym = slicedata(pos, string(data)).Sym().Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
@@ -461,7 +449,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
- symdata = Ctxt.Lookup(stringSymPrefix + symname)
+ symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
@@ -474,7 +462,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
- symdata = slicedata(pos, "").Sym.Linksym()
+ symdata = slicedata(pos, "").Sym().Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
@@ -487,12 +475,12 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
var slicedataGen int
-func slicedata(pos src.XPos, s string) *Node {
+func slicedata(pos src.XPos, s string) ir.Node {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
- sym := localpkg.Lookup(symname)
- symnode := newname(sym)
- sym.Def = asTypesNode(symnode)
+ sym := ir.LocalPkg.Lookup(symname)
+ symnode := NewName(sym)
+ sym.Def = symnode
lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice")
@@ -501,11 +489,11 @@ func slicedata(pos src.XPos, s string) *Node {
return symnode
}
-func slicebytes(nam *Node, s string) {
- if nam.Op != ONAME {
- Fatalf("slicebytes %v", nam)
+func slicebytes(nam ir.Node, s string) {
+ if nam.Op() != ir.ONAME {
+ base.Fatalf("slicebytes %v", nam)
}
- slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
+ slicesym(nam, slicedata(nam.Pos(), s), int64(len(s)))
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
@@ -513,126 +501,133 @@ func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
if int64(len(t)) > 2e9 {
- yyerrorl(pos, "%v with length %v is too big", what, len(t))
+ base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
return 0
}
- s.WriteString(Ctxt, int64(off), len(t), t)
+ s.WriteString(base.Ctxt, int64(off), len(t), t)
return off + len(t)
}
func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(Rnd(int64(off), int64(Widthptr)))
- s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
+ s.WriteAddr(base.Ctxt, int64(off), Widthptr, x, int64(xoff))
off += Widthptr
return off
}
func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteOff(Ctxt, int64(off), x, 0)
+ s.WriteOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
- s.WriteWeakOff(Ctxt, int64(off), x, 0)
+ s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
// arr must be an ONAME. slicesym does not modify n.
-func slicesym(n, arr *Node, lencap int64) {
- s := n.Sym.Linksym()
- base := n.Xoffset
- if arr.Op != ONAME {
- Fatalf("slicesym non-name arr %v", arr)
+func slicesym(n, arr ir.Node, lencap int64) {
+ s := n.Sym().Linksym()
+ off := n.Offset()
+ if arr.Op() != ir.ONAME {
+ base.Fatalf("slicesym non-name arr %v", arr)
}
- s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
- s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap)
- s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap)
+ s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym().Linksym(), arr.Offset())
+ s.WriteInt(base.Ctxt, off+sliceLenOffset, Widthptr, lencap)
+ s.WriteInt(base.Ctxt, off+sliceCapOffset, Widthptr, lencap)
}
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
-func addrsym(n, a *Node) {
- if n.Op != ONAME {
- Fatalf("addrsym n op %v", n.Op)
+func addrsym(n, a ir.Node) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("addrsym n op %v", n.Op())
}
- if n.Sym == nil {
- Fatalf("addrsym nil n sym")
+ if n.Sym() == nil {
+ base.Fatalf("addrsym nil n sym")
}
- if a.Op != ONAME {
- Fatalf("addrsym a op %v", a.Op)
+ if a.Op() != ir.ONAME {
+ base.Fatalf("addrsym a op %v", a.Op())
}
- s := n.Sym.Linksym()
- s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, a.Sym().Linksym(), a.Offset())
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
-func pfuncsym(n, f *Node) {
- if n.Op != ONAME {
- Fatalf("pfuncsym n op %v", n.Op)
+func pfuncsym(n, f ir.Node) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("pfuncsym n op %v", n.Op())
}
- if n.Sym == nil {
- Fatalf("pfuncsym nil n sym")
+ if n.Sym() == nil {
+ base.Fatalf("pfuncsym nil n sym")
}
- if f.Class() != PFUNC {
- Fatalf("pfuncsym class not PFUNC %d", f.Class())
+ if f.Class() != ir.PFUNC {
+ base.Fatalf("pfuncsym class not PFUNC %d", f.Class())
}
- s := n.Sym.Linksym()
- s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, funcsym(f.Sym()).Linksym(), f.Offset())
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
-func litsym(n, c *Node, wid int) {
- if n.Op != ONAME {
- Fatalf("litsym n op %v", n.Op)
- }
- if c.Op != OLITERAL {
- Fatalf("litsym c op %v", c.Op)
- }
- if n.Sym == nil {
- Fatalf("litsym nil n sym")
- }
- s := n.Sym.Linksym()
- switch u := c.Val().U.(type) {
- case bool:
- i := int64(obj.Bool2int(u))
- s.WriteInt(Ctxt, n.Xoffset, wid, i)
-
- case *Mpint:
- s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64())
-
- case *Mpflt:
- f := u.Float64()
- switch n.Type.Etype {
- case TFLOAT32:
- s.WriteFloat32(Ctxt, n.Xoffset, float32(f))
- case TFLOAT64:
- s.WriteFloat64(Ctxt, n.Xoffset, f)
+func litsym(n, c ir.Node, wid int) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("litsym n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("litsym nil n sym")
+ }
+ if !types.Identical(n.Type(), c.Type()) {
+ base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type(), c, c.Type())
+ }
+ if c.Op() == ir.ONIL {
+ return
+ }
+ if c.Op() != ir.OLITERAL {
+ base.Fatalf("litsym c op %v", c.Op())
+ }
+ s := n.Sym().Linksym()
+ switch u := c.Val(); u.Kind() {
+ case constant.Bool:
+ i := int64(obj.Bool2int(constant.BoolVal(u)))
+ s.WriteInt(base.Ctxt, n.Offset(), wid, i)
+
+ case constant.Int:
+ s.WriteInt(base.Ctxt, n.Offset(), wid, ir.Int64Val(n.Type(), u))
+
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch n.Type().Etype {
+ case types.TFLOAT32:
+ s.WriteFloat32(base.Ctxt, n.Offset(), float32(f))
+ case types.TFLOAT64:
+ s.WriteFloat64(base.Ctxt, n.Offset(), f)
}
- case *Mpcplx:
- r := u.Real.Float64()
- i := u.Imag.Float64()
- switch n.Type.Etype {
- case TCOMPLEX64:
- s.WriteFloat32(Ctxt, n.Xoffset, float32(r))
- s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i))
- case TCOMPLEX128:
- s.WriteFloat64(Ctxt, n.Xoffset, r)
- s.WriteFloat64(Ctxt, n.Xoffset+8, i)
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch n.Type().Etype {
+ case types.TCOMPLEX64:
+ s.WriteFloat32(base.Ctxt, n.Offset(), float32(re))
+ s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im))
+ case types.TCOMPLEX128:
+ s.WriteFloat64(base.Ctxt, n.Offset(), re)
+ s.WriteFloat64(base.Ctxt, n.Offset()+8, im)
}
- case string:
- symdata := stringsym(n.Pos, u)
- s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0)
- s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
+ case constant.String:
+ i := constant.StringVal(u)
+ symdata := stringsym(n.Pos(), i)
+ s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, symdata, 0)
+ s.WriteInt(base.Ctxt, n.Offset()+int64(Widthptr), Widthptr, int64(len(i)))
default:
- Fatalf("litsym unhandled OLITERAL %v", c)
+ base.Fatalf("litsym unhandled OLITERAL %v", c)
}
}
diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go
deleted file mode 100644
index 41d588309c..0000000000
--- a/src/cmd/compile/internal/gc/op_string.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
-
-package gc
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[OXXX-0]
- _ = x[ONAME-1]
- _ = x[ONONAME-2]
- _ = x[OTYPE-3]
- _ = x[OPACK-4]
- _ = x[OLITERAL-5]
- _ = x[OADD-6]
- _ = x[OSUB-7]
- _ = x[OOR-8]
- _ = x[OXOR-9]
- _ = x[OADDSTR-10]
- _ = x[OADDR-11]
- _ = x[OANDAND-12]
- _ = x[OAPPEND-13]
- _ = x[OBYTES2STR-14]
- _ = x[OBYTES2STRTMP-15]
- _ = x[ORUNES2STR-16]
- _ = x[OSTR2BYTES-17]
- _ = x[OSTR2BYTESTMP-18]
- _ = x[OSTR2RUNES-19]
- _ = x[OAS-20]
- _ = x[OAS2-21]
- _ = x[OAS2DOTTYPE-22]
- _ = x[OAS2FUNC-23]
- _ = x[OAS2MAPR-24]
- _ = x[OAS2RECV-25]
- _ = x[OASOP-26]
- _ = x[OCALL-27]
- _ = x[OCALLFUNC-28]
- _ = x[OCALLMETH-29]
- _ = x[OCALLINTER-30]
- _ = x[OCALLPART-31]
- _ = x[OCAP-32]
- _ = x[OCLOSE-33]
- _ = x[OCLOSURE-34]
- _ = x[OCOMPLIT-35]
- _ = x[OMAPLIT-36]
- _ = x[OSTRUCTLIT-37]
- _ = x[OARRAYLIT-38]
- _ = x[OSLICELIT-39]
- _ = x[OPTRLIT-40]
- _ = x[OCONV-41]
- _ = x[OCONVIFACE-42]
- _ = x[OCONVNOP-43]
- _ = x[OCOPY-44]
- _ = x[ODCL-45]
- _ = x[ODCLFUNC-46]
- _ = x[ODCLFIELD-47]
- _ = x[ODCLCONST-48]
- _ = x[ODCLTYPE-49]
- _ = x[ODELETE-50]
- _ = x[ODOT-51]
- _ = x[ODOTPTR-52]
- _ = x[ODOTMETH-53]
- _ = x[ODOTINTER-54]
- _ = x[OXDOT-55]
- _ = x[ODOTTYPE-56]
- _ = x[ODOTTYPE2-57]
- _ = x[OEQ-58]
- _ = x[ONE-59]
- _ = x[OLT-60]
- _ = x[OLE-61]
- _ = x[OGE-62]
- _ = x[OGT-63]
- _ = x[ODEREF-64]
- _ = x[OINDEX-65]
- _ = x[OINDEXMAP-66]
- _ = x[OKEY-67]
- _ = x[OSTRUCTKEY-68]
- _ = x[OLEN-69]
- _ = x[OMAKE-70]
- _ = x[OMAKECHAN-71]
- _ = x[OMAKEMAP-72]
- _ = x[OMAKESLICE-73]
- _ = x[OMAKESLICECOPY-74]
- _ = x[OMUL-75]
- _ = x[ODIV-76]
- _ = x[OMOD-77]
- _ = x[OLSH-78]
- _ = x[ORSH-79]
- _ = x[OAND-80]
- _ = x[OANDNOT-81]
- _ = x[ONEW-82]
- _ = x[ONEWOBJ-83]
- _ = x[ONOT-84]
- _ = x[OBITNOT-85]
- _ = x[OPLUS-86]
- _ = x[ONEG-87]
- _ = x[OOROR-88]
- _ = x[OPANIC-89]
- _ = x[OPRINT-90]
- _ = x[OPRINTN-91]
- _ = x[OPAREN-92]
- _ = x[OSEND-93]
- _ = x[OSLICE-94]
- _ = x[OSLICEARR-95]
- _ = x[OSLICESTR-96]
- _ = x[OSLICE3-97]
- _ = x[OSLICE3ARR-98]
- _ = x[OSLICEHEADER-99]
- _ = x[ORECOVER-100]
- _ = x[ORECV-101]
- _ = x[ORUNESTR-102]
- _ = x[OSELRECV-103]
- _ = x[OSELRECV2-104]
- _ = x[OIOTA-105]
- _ = x[OREAL-106]
- _ = x[OIMAG-107]
- _ = x[OCOMPLEX-108]
- _ = x[OALIGNOF-109]
- _ = x[OOFFSETOF-110]
- _ = x[OSIZEOF-111]
- _ = x[OBLOCK-112]
- _ = x[OBREAK-113]
- _ = x[OCASE-114]
- _ = x[OCONTINUE-115]
- _ = x[ODEFER-116]
- _ = x[OEMPTY-117]
- _ = x[OFALL-118]
- _ = x[OFOR-119]
- _ = x[OFORUNTIL-120]
- _ = x[OGOTO-121]
- _ = x[OIF-122]
- _ = x[OLABEL-123]
- _ = x[OGO-124]
- _ = x[ORANGE-125]
- _ = x[ORETURN-126]
- _ = x[OSELECT-127]
- _ = x[OSWITCH-128]
- _ = x[OTYPESW-129]
- _ = x[OTCHAN-130]
- _ = x[OTMAP-131]
- _ = x[OTSTRUCT-132]
- _ = x[OTINTER-133]
- _ = x[OTFUNC-134]
- _ = x[OTARRAY-135]
- _ = x[ODDD-136]
- _ = x[OINLCALL-137]
- _ = x[OEFACE-138]
- _ = x[OITAB-139]
- _ = x[OIDATA-140]
- _ = x[OSPTR-141]
- _ = x[OCLOSUREVAR-142]
- _ = x[OCFUNC-143]
- _ = x[OCHECKNIL-144]
- _ = x[OVARDEF-145]
- _ = x[OVARKILL-146]
- _ = x[OVARLIVE-147]
- _ = x[ORESULT-148]
- _ = x[OINLMARK-149]
- _ = x[ORETJMP-150]
- _ = x[OGETG-151]
- _ = x[OEND-152]
-}
-
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
-
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866}
-
-func (i Op) String() string {
- if i >= Op(len(_Op_index)-1) {
- return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Op_name[_Op_index[i]:_Op_index[i+1]]
-}
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 863de5b6c7..b7d713439b 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
@@ -42,33 +44,33 @@ import (
// Order holds state during the ordering process.
type Order struct {
- out []*Node // list of generated statements
- temp []*Node // stack of temporary variables
- free map[string][]*Node // free list of unused temporaries, by type.LongString().
+ out []ir.Node // list of generated statements
+ temp []ir.Node // stack of temporary variables
+ free map[string][]ir.Node // free list of unused temporaries, by type.LongString().
}
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
-func order(fn *Node) {
- if Debug.W > 1 {
- s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
- dumplist(s, fn.Nbody)
+func order(fn ir.Node) {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym())
+ ir.DumpList(s, fn.Body())
}
- orderBlock(&fn.Nbody, map[string][]*Node{})
+ orderBlock(fn.PtrBody(), map[string][]ir.Node{})
}
// newTemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, newTemp emits code to zero the temporary.
-func (o *Order) newTemp(t *types.Type, clear bool) *Node {
- var v *Node
+func (o *Order) newTemp(t *types.Type, clear bool) ir.Node {
+ var v ir.Node
// Note: LongString is close to the type equality we want,
// but not exactly. We still need to double-check with types.Identical.
key := t.LongString()
a := o.free[key]
for i, n := range a {
- if types.Identical(t, n.Type) {
+ if types.Identical(t, n.Type()) {
v = a[i]
a[i] = a[len(a)-1]
a = a[:len(a)-1]
@@ -80,7 +82,7 @@ func (o *Order) newTemp(t *types.Type, clear bool) *Node {
v = temp(t)
}
if clear {
- a := nod(OAS, v, nil)
+ a := ir.Nod(ir.OAS, v, nil)
a = typecheck(a, ctxStmt)
o.out = append(o.out, a)
}
@@ -101,9 +103,9 @@ func (o *Order) newTemp(t *types.Type, clear bool) *Node {
// (The other candidate would be map access, but map access
// returns a pointer to the result data instead of taking a pointer
// to be filled in.)
-func (o *Order) copyExpr(n *Node, t *types.Type, clear bool) *Node {
+func (o *Order) copyExpr(n ir.Node, t *types.Type, clear bool) ir.Node {
v := o.newTemp(t, clear)
- a := nod(OAS, v, n)
+ a := ir.Nod(ir.OAS, v, n)
a = typecheck(a, ctxStmt)
o.out = append(o.out, a)
return v
@@ -113,25 +115,25 @@ func (o *Order) copyExpr(n *Node, t *types.Type, clear bool) *Node {
// The definition of cheap is that n is a variable or constant.
// If not, cheapExpr allocates a new tmp, emits tmp = n,
// and then returns tmp.
-func (o *Order) cheapExpr(n *Node) *Node {
+func (o *Order) cheapExpr(n ir.Node) ir.Node {
if n == nil {
return nil
}
- switch n.Op {
- case ONAME, OLITERAL:
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
- case OLEN, OCAP:
- l := o.cheapExpr(n.Left)
- if l == n.Left {
+ case ir.OLEN, ir.OCAP:
+ l := o.cheapExpr(n.Left())
+ if l == n.Left() {
return n
}
- a := n.sepcopy()
- a.Left = l
+ a := ir.SepCopy(n)
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
}
- return o.copyExpr(n, n.Type, false)
+ return o.copyExpr(n, n.Type(), false)
}
// safeExpr returns a safe version of n.
@@ -141,47 +143,47 @@ func (o *Order) cheapExpr(n *Node) *Node {
// as assigning to the original n.
//
// The intended use is to apply to x when rewriting x += y into x = x + y.
-func (o *Order) safeExpr(n *Node) *Node {
- switch n.Op {
- case ONAME, OLITERAL:
+func (o *Order) safeExpr(n ir.Node) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
- case ODOT, OLEN, OCAP:
- l := o.safeExpr(n.Left)
- if l == n.Left {
+ case ir.ODOT, ir.OLEN, ir.OCAP:
+ l := o.safeExpr(n.Left())
+ if l == n.Left() {
return n
}
- a := n.sepcopy()
- a.Left = l
+ a := ir.SepCopy(n)
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
- case ODOTPTR, ODEREF:
- l := o.cheapExpr(n.Left)
- if l == n.Left {
+ case ir.ODOTPTR, ir.ODEREF:
+ l := o.cheapExpr(n.Left())
+ if l == n.Left() {
return n
}
- a := n.sepcopy()
- a.Left = l
+ a := ir.SepCopy(n)
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
- case OINDEX, OINDEXMAP:
- var l *Node
- if n.Left.Type.IsArray() {
- l = o.safeExpr(n.Left)
+ case ir.OINDEX, ir.OINDEXMAP:
+ var l ir.Node
+ if n.Left().Type().IsArray() {
+ l = o.safeExpr(n.Left())
} else {
- l = o.cheapExpr(n.Left)
+ l = o.cheapExpr(n.Left())
}
- r := o.cheapExpr(n.Right)
- if l == n.Left && r == n.Right {
+ r := o.cheapExpr(n.Right())
+ if l == n.Left() && r == n.Right() {
return n
}
- a := n.sepcopy()
- a.Left = l
- a.Right = r
+ a := ir.SepCopy(n)
+ a.SetLeft(l)
+ a.SetRight(r)
return typecheck(a, ctxExpr)
default:
- Fatalf("order.safeExpr %v", n.Op)
+ base.Fatalf("order.safeExpr %v", n.Op())
return nil // not reached
}
}
@@ -192,8 +194,8 @@ func (o *Order) safeExpr(n *Node) *Node {
// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
-func isaddrokay(n *Node) bool {
- return islvalue(n) && (n.Op != ONAME || n.Class() == PEXTERN || n.IsAutoTmp())
+func isaddrokay(n ir.Node) bool {
+ return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n))
}
// addrTemp ensures that n is okay to pass by address to runtime routines.
@@ -201,16 +203,16 @@ func isaddrokay(n *Node) bool {
// tmp = n, and then returns tmp.
// The result of addrTemp MUST be assigned back to n, e.g.
// n.Left = o.addrTemp(n.Left)
-func (o *Order) addrTemp(n *Node) *Node {
- if consttype(n) != CTxxx {
+func (o *Order) addrTemp(n ir.Node) ir.Node {
+ if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
- dowidth(n.Type)
- vstat := readonlystaticname(n.Type)
+ dowidth(n.Type())
+ vstat := readonlystaticname(n.Type())
var s InitSchedule
s.staticassign(vstat, n)
if s.out != nil {
- Fatalf("staticassign of const generated code: %+v", n)
+ base.Fatalf("staticassign of const generated code: %+v", n)
}
vstat = typecheck(vstat, ctxExpr)
return vstat
@@ -218,12 +220,12 @@ func (o *Order) addrTemp(n *Node) *Node {
if isaddrokay(n) {
return n
}
- return o.copyExpr(n, n.Type, false)
+ return o.copyExpr(n, n.Type(), false)
}
// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
// It should only be used for map runtime calls which have *_fast* versions.
-func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node {
+func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
// Most map calls need to take the address of the key.
// Exception: map*_fast* calls. See golang.org/issue/19015.
if mapfast(t) == mapslow {
@@ -246,22 +248,22 @@ func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node {
// It would be nice to handle these generally, but because
// []byte keys are not allowed in maps, the use of string(k)
// comes up in important cases in practice. See issue 3512.
-func mapKeyReplaceStrConv(n *Node) bool {
+func mapKeyReplaceStrConv(n ir.Node) bool {
var replaced bool
- switch n.Op {
- case OBYTES2STR:
- n.Op = OBYTES2STRTMP
+ switch n.Op() {
+ case ir.OBYTES2STR:
+ n.SetOp(ir.OBYTES2STRTMP)
replaced = true
- case OSTRUCTLIT:
- for _, elem := range n.List.Slice() {
- if mapKeyReplaceStrConv(elem.Left) {
+ case ir.OSTRUCTLIT:
+ for _, elem := range n.List().Slice() {
+ if mapKeyReplaceStrConv(elem.Left()) {
replaced = true
}
}
- case OARRAYLIT:
- for _, elem := range n.List.Slice() {
- if elem.Op == OKEY {
- elem = elem.Right
+ case ir.OARRAYLIT:
+ for _, elem := range n.List().Slice() {
+ if elem.Op() == ir.OKEY {
+ elem = elem.Right()
}
if mapKeyReplaceStrConv(elem) {
replaced = true
@@ -282,7 +284,7 @@ func (o *Order) markTemp() ordermarker {
// which must have been returned by markTemp.
func (o *Order) popTemp(mark ordermarker) {
for _, n := range o.temp[mark:] {
- key := n.Type.LongString()
+ key := n.Type().LongString()
o.free[key] = append(o.free[key], n)
}
o.temp = o.temp[:mark]
@@ -291,11 +293,11 @@ func (o *Order) popTemp(mark ordermarker) {
// cleanTempNoPop emits VARKILL instructions to *out
// for each temporary above the mark on the temporary stack.
// It does not pop the temporaries from the stack.
-func (o *Order) cleanTempNoPop(mark ordermarker) []*Node {
- var out []*Node
+func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node {
+ var out []ir.Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
- kill := nod(OVARKILL, n, nil)
+ kill := ir.Nod(ir.OVARKILL, n, nil)
kill = typecheck(kill, ctxStmt)
out = append(out, kill)
}
@@ -310,7 +312,7 @@ func (o *Order) cleanTemp(top ordermarker) {
}
// stmtList orders each of the statements in the list.
-func (o *Order) stmtList(l Nodes) {
+func (o *Order) stmtList(l ir.Nodes) {
s := l.Slice()
for i := range s {
orderMakeSliceCopy(s[i:])
@@ -322,8 +324,8 @@ func (o *Order) stmtList(l Nodes) {
// m = OMAKESLICE([]T, x); OCOPY(m, s)
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
-func orderMakeSliceCopy(s []*Node) {
- if Debug.N != 0 || instrumenting {
+func orderMakeSliceCopy(s []ir.Node) {
+ if base.Flag.N != 0 || instrumenting {
return
}
@@ -334,46 +336,46 @@ func orderMakeSliceCopy(s []*Node) {
asn := s[0]
copyn := s[1]
- if asn == nil || asn.Op != OAS {
+ if asn == nil || asn.Op() != ir.OAS {
return
}
- if asn.Left.Op != ONAME {
+ if asn.Left().Op() != ir.ONAME {
return
}
- if asn.Left.isBlank() {
+ if ir.IsBlank(asn.Left()) {
return
}
- maken := asn.Right
- if maken == nil || maken.Op != OMAKESLICE {
+ maken := asn.Right()
+ if maken == nil || maken.Op() != ir.OMAKESLICE {
return
}
- if maken.Esc == EscNone {
+ if maken.Esc() == EscNone {
return
}
- if maken.Left == nil || maken.Right != nil {
+ if maken.Left() == nil || maken.Right() != nil {
return
}
- if copyn.Op != OCOPY {
+ if copyn.Op() != ir.OCOPY {
return
}
- if copyn.Left.Op != ONAME {
+ if copyn.Left().Op() != ir.ONAME {
return
}
- if asn.Left.Sym != copyn.Left.Sym {
+ if asn.Left().Sym() != copyn.Left().Sym() {
return
}
- if copyn.Right.Op != ONAME {
+ if copyn.Right().Op() != ir.ONAME {
return
}
- if copyn.Left.Sym == copyn.Right.Sym {
+ if copyn.Left().Sym() == copyn.Right().Sym() {
return
}
- maken.Op = OMAKESLICECOPY
- maken.Right = copyn.Right
+ maken.SetOp(ir.OMAKESLICECOPY)
+ maken.SetRight(copyn.Right())
// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
- maken.SetBounded(maken.Left.Op == OLEN && samesafeexpr(maken.Left.Left, copyn.Right))
+ maken.SetBounded(maken.Left().Op() == ir.OLEN && samesafeexpr(maken.Left().Left(), copyn.Right()))
maken = typecheck(maken, ctxExpr)
@@ -384,18 +386,18 @@ func orderMakeSliceCopy(s []*Node) {
// edge inserts coverage instrumentation for libfuzzer.
func (o *Order) edge() {
- if Debug_libfuzzer == 0 {
+ if base.Debug.Libfuzzer == 0 {
return
}
// Create a new uint8 counter to be allocated in section
// __libfuzzer_extra_counters.
- counter := staticname(types.Types[TUINT8])
- counter.Name.SetLibfuzzerExtraCounter(true)
+ counter := staticname(types.Types[types.TUINT8])
+ counter.Name().SetLibfuzzerExtraCounter(true)
// counter += 1
- incr := nod(OASOP, counter, nodintconst(1))
- incr.SetSubOp(OADD)
+ incr := ir.Nod(ir.OASOP, counter, nodintconst(1))
+ incr.SetSubOp(ir.OADD)
incr = typecheck(incr, ctxStmt)
o.out = append(o.out, incr)
@@ -404,7 +406,7 @@ func (o *Order) edge() {
// orderBlock orders the block of statements in n into a new slice,
// and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type.
-func orderBlock(n *Nodes, free map[string][]*Node) {
+func orderBlock(n *ir.Nodes, free map[string][]ir.Node) {
var order Order
order.free = free
mark := order.markTemp()
@@ -418,7 +420,7 @@ func orderBlock(n *Nodes, free map[string][]*Node) {
// leaves them as the init list of the final *np.
// The result of exprInPlace MUST be assigned back to n, e.g.
// n.Left = o.exprInPlace(n.Left)
-func (o *Order) exprInPlace(n *Node) *Node {
+func (o *Order) exprInPlace(n ir.Node) ir.Node {
var order Order
order.free = o.free
n = order.expr(n, nil)
@@ -435,7 +437,7 @@ func (o *Order) exprInPlace(n *Node) *Node {
// The result of orderStmtInPlace MUST be assigned back to n, e.g.
// n.Left = orderStmtInPlace(n.Left)
// free is a map that can be used to obtain temporary variables by type.
-func orderStmtInPlace(n *Node, free map[string][]*Node) *Node {
+func orderStmtInPlace(n ir.Node, free map[string][]ir.Node) ir.Node {
var order Order
order.free = free
mark := order.markTemp()
@@ -445,60 +447,60 @@ func orderStmtInPlace(n *Node, free map[string][]*Node) *Node {
}
// init moves n's init list to o.out.
-func (o *Order) init(n *Node) {
- if n.mayBeShared() {
+func (o *Order) init(n ir.Node) {
+ if ir.MayBeShared(n) {
// For concurrency safety, don't mutate potentially shared nodes.
// First, ensure that no work is required here.
- if n.Ninit.Len() > 0 {
- Fatalf("order.init shared node with ninit")
+ if n.Init().Len() > 0 {
+ base.Fatalf("order.init shared node with ninit")
}
return
}
- o.stmtList(n.Ninit)
- n.Ninit.Set(nil)
+ o.stmtList(n.Init())
+ n.PtrInit().Set(nil)
}
// call orders the call expression n.
// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func (o *Order) call(n *Node) {
- if n.Ninit.Len() > 0 {
+func (o *Order) call(n ir.Node) {
+ if n.Init().Len() > 0 {
// Caller should have already called o.init(n).
- Fatalf("%v with unexpected ninit", n.Op)
+ base.Fatalf("%v with unexpected ninit", n.Op())
}
// Builtin functions.
- if n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER {
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
+ if n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER {
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
return
}
fixVariadicCall(n)
- n.Left = o.expr(n.Left, nil)
- o.exprList(n.List)
+ n.SetLeft(o.expr(n.Left(), nil))
+ o.exprList(n.List())
- if n.Op == OCALLINTER {
+ if n.Op() == ir.OCALLINTER {
return
}
- keepAlive := func(arg *Node) {
+ keepAlive := func(arg ir.Node) {
// If the argument is really a pointer being converted to uintptr,
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
- if arg.Op == OCONVNOP && arg.Left.Type.IsUnsafePtr() {
- x := o.copyExpr(arg.Left, arg.Left.Type, false)
- arg.Left = x
- x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable
- n.Nbody.Append(typecheck(nod(OVARLIVE, x, nil), ctxStmt))
+ if arg.Op() == ir.OCONVNOP && arg.Left().Type().IsUnsafePtr() {
+ x := o.copyExpr(arg.Left(), arg.Left().Type(), false)
+ arg.SetLeft(x)
+ x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable
+ n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt))
}
}
// Check for "unsafe-uintptr" tag provided by escape analysis.
- for i, param := range n.Left.Type.Params().FieldSlice() {
+ for i, param := range n.Left().Type().Params().FieldSlice() {
if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag {
- if arg := n.List.Index(i); arg.Op == OSLICELIT {
- for _, elt := range arg.List.Slice() {
+ if arg := n.List().Index(i); arg.Op() == ir.OSLICELIT {
+ for _, elt := range arg.List().Slice() {
keepAlive(elt)
}
} else {
@@ -523,42 +525,42 @@ func (o *Order) call(n *Node) {
// cases they are also typically registerizable, so not much harm done.
// And this only applies to the multiple-assignment form.
// We could do a more precise analysis if needed, like in walk.go.
-func (o *Order) mapAssign(n *Node) {
- switch n.Op {
+func (o *Order) mapAssign(n ir.Node) {
+ switch n.Op() {
default:
- Fatalf("order.mapAssign %v", n.Op)
+ base.Fatalf("order.mapAssign %v", n.Op())
- case OAS, OASOP:
- if n.Left.Op == OINDEXMAP {
+ case ir.OAS, ir.OASOP:
+ if n.Left().Op() == ir.OINDEXMAP {
// Make sure we evaluate the RHS before starting the map insert.
// We need to make sure the RHS won't panic. See issue 22881.
- if n.Right.Op == OAPPEND {
- s := n.Right.List.Slice()[1:]
+ if n.Right().Op() == ir.OAPPEND {
+ s := n.Right().List().Slice()[1:]
for i, n := range s {
s[i] = o.cheapExpr(n)
}
} else {
- n.Right = o.cheapExpr(n.Right)
+ n.SetRight(o.cheapExpr(n.Right()))
}
}
o.out = append(o.out, n)
- case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC:
- var post []*Node
- for i, m := range n.List.Slice() {
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
+ var post []ir.Node
+ for i, m := range n.List().Slice() {
switch {
- case m.Op == OINDEXMAP:
- if !m.Left.IsAutoTmp() {
- m.Left = o.copyExpr(m.Left, m.Left.Type, false)
+ case m.Op() == ir.OINDEXMAP:
+ if !ir.IsAutoTmp(m.Left()) {
+ m.SetLeft(o.copyExpr(m.Left(), m.Left().Type(), false))
}
- if !m.Right.IsAutoTmp() {
- m.Right = o.copyExpr(m.Right, m.Right.Type, false)
+ if !ir.IsAutoTmp(m.Right()) {
+ m.SetRight(o.copyExpr(m.Right(), m.Right().Type(), false))
}
fallthrough
- case instrumenting && n.Op == OAS2FUNC && !m.isBlank():
- t := o.newTemp(m.Type, false)
- n.List.SetIndex(i, t)
- a := nod(OAS, m, t)
+ case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m):
+ t := o.newTemp(m.Type(), false)
+ n.List().SetIndex(i, t)
+ a := ir.Nod(ir.OAS, m, t)
a = typecheck(a, ctxStmt)
post = append(post, a)
}
@@ -572,7 +574,7 @@ func (o *Order) mapAssign(n *Node) {
// stmt orders the statement n, appending to o.out.
// Temporaries created during the statement are cleaned
// up using VARKILL instructions as possible.
-func (o *Order) stmt(n *Node) {
+func (o *Order) stmt(n ir.Node) {
if n == nil {
return
}
@@ -580,62 +582,62 @@ func (o *Order) stmt(n *Node) {
lno := setlineno(n)
o.init(n)
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("order.stmt %v", n.Op)
+ base.Fatalf("order.stmt %v", n.Op())
- case OVARKILL, OVARLIVE, OINLMARK:
+ case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
o.out = append(o.out, n)
- case OAS:
+ case ir.OAS:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, n.Left)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), n.Left()))
o.mapAssign(n)
o.cleanTemp(t)
- case OASOP:
+ case ir.OASOP:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
- if instrumenting || n.Left.Op == OINDEXMAP && (n.SubOp() == ODIV || n.SubOp() == OMOD) {
+ if instrumenting || n.Left().Op() == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) {
// Rewrite m[k] op= r into m[k] = m[k] op r so
// that we can ensure that if op panics
// because r is zero, the panic happens before
// the map assignment.
- n.Left = o.safeExpr(n.Left)
+ n.SetLeft(o.safeExpr(n.Left()))
- l := treecopy(n.Left, src.NoXPos)
- if l.Op == OINDEXMAP {
+ l := treecopy(n.Left(), src.NoXPos)
+ if l.Op() == ir.OINDEXMAP {
l.SetIndexMapLValue(false)
}
- l = o.copyExpr(l, n.Left.Type, false)
- n.Right = nod(n.SubOp(), l, n.Right)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Right = o.expr(n.Right, nil)
+ l = o.copyExpr(l, n.Left().Type(), false)
+ n.SetRight(ir.Nod(n.SubOp(), l, n.Right()))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ n.SetRight(o.expr(n.Right(), nil))
- n.Op = OAS
+ n.SetOp(ir.OAS)
n.ResetAux()
}
o.mapAssign(n)
o.cleanTemp(t)
- case OAS2:
+ case ir.OAS2:
t := o.markTemp()
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
o.mapAssign(n)
o.cleanTemp(t)
// Special: avoid copy of func call n.Right
- case OAS2FUNC:
+ case ir.OAS2FUNC:
t := o.markTemp()
- o.exprList(n.List)
- o.init(n.Right)
- o.call(n.Right)
+ o.exprList(n.List())
+ o.init(n.Right())
+ o.call(n.Right())
o.as2(n)
o.cleanTemp(t)
@@ -645,114 +647,114 @@ func (o *Order) stmt(n *Node) {
//
// OAS2MAPR: make sure key is addressable if needed,
// and make sure OINDEXMAP is not copied out.
- case OAS2DOTTYPE, OAS2RECV, OAS2MAPR:
+ case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
t := o.markTemp()
- o.exprList(n.List)
-
- switch r := n.Right; r.Op {
- case ODOTTYPE2, ORECV:
- r.Left = o.expr(r.Left, nil)
- case OINDEXMAP:
- r.Left = o.expr(r.Left, nil)
- r.Right = o.expr(r.Right, nil)
+ o.exprList(n.List())
+
+ switch r := n.Right(); r.Op() {
+ case ir.ODOTTYPE2, ir.ORECV:
+ r.SetLeft(o.expr(r.Left(), nil))
+ case ir.OINDEXMAP:
+ r.SetLeft(o.expr(r.Left(), nil))
+ r.SetRight(o.expr(r.Right(), nil))
// See similar conversion for OINDEXMAP below.
- _ = mapKeyReplaceStrConv(r.Right)
- r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
+ _ = mapKeyReplaceStrConv(r.Right())
+ r.SetRight(o.mapKeyTemp(r.Left().Type(), r.Right()))
default:
- Fatalf("order.stmt: %v", r.Op)
+ base.Fatalf("order.stmt: %v", r.Op())
}
o.okAs2(n)
o.cleanTemp(t)
// Special: does not save n onto out.
- case OBLOCK, OEMPTY:
- o.stmtList(n.List)
+ case ir.OBLOCK, ir.OEMPTY:
+ o.stmtList(n.List())
// Special: n->left is not an expression; save as is.
- case OBREAK,
- OCONTINUE,
- ODCL,
- ODCLCONST,
- ODCLTYPE,
- OFALL,
- OGOTO,
- OLABEL,
- ORETJMP:
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.ORETJMP:
o.out = append(o.out, n)
// Special: handle call arguments.
- case OCALLFUNC, OCALLINTER, OCALLMETH:
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
t := o.markTemp()
o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
- case OCLOSE,
- OCOPY,
- OPRINT,
- OPRINTN,
- ORECOVER,
- ORECV:
+ case ir.OCLOSE,
+ ir.OCOPY,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.ORECOVER,
+ ir.ORECV:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
o.out = append(o.out, n)
o.cleanTemp(t)
// Special: order arguments to inner call but not call itself.
- case ODEFER, OGO:
+ case ir.ODEFER, ir.OGO:
t := o.markTemp()
- o.init(n.Left)
- o.call(n.Left)
+ o.init(n.Left())
+ o.call(n.Left())
o.out = append(o.out, n)
o.cleanTemp(t)
- case ODELETE:
+ case ir.ODELETE:
t := o.markTemp()
- n.List.SetFirst(o.expr(n.List.First(), nil))
- n.List.SetSecond(o.expr(n.List.Second(), nil))
- n.List.SetSecond(o.mapKeyTemp(n.List.First().Type, n.List.Second()))
+ n.List().SetFirst(o.expr(n.List().First(), nil))
+ n.List().SetSecond(o.expr(n.List().Second(), nil))
+ n.List().SetSecond(o.mapKeyTemp(n.List().First().Type(), n.List().Second()))
o.out = append(o.out, n)
o.cleanTemp(t)
// Clean temporaries from condition evaluation at
// beginning of loop body and after for statement.
- case OFOR:
+ case ir.OFOR:
t := o.markTemp()
- n.Left = o.exprInPlace(n.Left)
- n.Nbody.Prepend(o.cleanTempNoPop(t)...)
- orderBlock(&n.Nbody, o.free)
- n.Right = orderStmtInPlace(n.Right, o.free)
+ n.SetLeft(o.exprInPlace(n.Left()))
+ n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
+ orderBlock(n.PtrBody(), o.free)
+ n.SetRight(orderStmtInPlace(n.Right(), o.free))
o.out = append(o.out, n)
o.cleanTemp(t)
// Clean temporaries from condition at
// beginning of both branches.
- case OIF:
+ case ir.OIF:
t := o.markTemp()
- n.Left = o.exprInPlace(n.Left)
- n.Nbody.Prepend(o.cleanTempNoPop(t)...)
- n.Rlist.Prepend(o.cleanTempNoPop(t)...)
+ n.SetLeft(o.exprInPlace(n.Left()))
+ n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
+ n.PtrRlist().Prepend(o.cleanTempNoPop(t)...)
o.popTemp(t)
- orderBlock(&n.Nbody, o.free)
- orderBlock(&n.Rlist, o.free)
+ orderBlock(n.PtrBody(), o.free)
+ orderBlock(n.PtrRlist(), o.free)
o.out = append(o.out, n)
// Special: argument will be converted to interface using convT2E
// so make sure it is an addressable temporary.
- case OPANIC:
+ case ir.OPANIC:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- if !n.Left.Type.IsInterface() {
- n.Left = o.addrTemp(n.Left)
+ n.SetLeft(o.expr(n.Left(), nil))
+ if !n.Left().Type().IsInterface() {
+ n.SetLeft(o.addrTemp(n.Left()))
}
o.out = append(o.out, n)
o.cleanTemp(t)
- case ORANGE:
+ case ir.ORANGE:
// n.Right is the expression being ranged over.
// order it, and then make a copy if we need one.
// We almost always do, to ensure that we don't
@@ -766,40 +768,40 @@ func (o *Order) stmt(n *Node) {
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
- if n.Right.Op == OSTR2BYTES {
- n.Right.Op = OSTR2BYTESTMP
+ if n.Right().Op() == ir.OSTR2BYTES {
+ n.Right().SetOp(ir.OSTR2BYTESTMP)
}
t := o.markTemp()
- n.Right = o.expr(n.Right, nil)
+ n.SetRight(o.expr(n.Right(), nil))
orderBody := true
- switch n.Type.Etype {
+ switch n.Type().Etype {
default:
- Fatalf("order.stmt range %v", n.Type)
+ base.Fatalf("order.stmt range %v", n.Type())
- case TARRAY, TSLICE:
- if n.List.Len() < 2 || n.List.Second().isBlank() {
+ case types.TARRAY, types.TSLICE:
+ if n.List().Len() < 2 || ir.IsBlank(n.List().Second()) {
// for i := range x will only use x once, to compute len(x).
// No need to copy it.
break
}
fallthrough
- case TCHAN, TSTRING:
+ case types.TCHAN, types.TSTRING:
// chan, string, slice, array ranges use value multiple times.
// make copy.
- r := n.Right
+ r := n.Right()
- if r.Type.IsString() && r.Type != types.Types[TSTRING] {
- r = nod(OCONV, r, nil)
- r.Type = types.Types[TSTRING]
+ if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
+ r = ir.Nod(ir.OCONV, r, nil)
+ r.SetType(types.Types[types.TSTRING])
r = typecheck(r, ctxExpr)
}
- n.Right = o.copyExpr(r, r.Type, false)
+ n.SetRight(o.copyExpr(r, r.Type(), false))
- case TMAP:
+ case types.TMAP:
if isMapClear(n) {
// Preserve the body of the map clear pattern so it can
// be detected during walk. The loop body will not be used
@@ -811,22 +813,22 @@ func (o *Order) stmt(n *Node) {
// copy the map value in case it is a map literal.
// TODO(rsc): Make tmp = literal expressions reuse tmp.
// For maps tmp is just one word so it hardly matters.
- r := n.Right
- n.Right = o.copyExpr(r, r.Type, false)
+ r := n.Right()
+ n.SetRight(o.copyExpr(r, r.Type(), false))
// prealloc[n] is the temp for the iterator.
// hiter contains pointers and needs to be zeroed.
- prealloc[n] = o.newTemp(hiter(n.Type), true)
+ prealloc[n] = o.newTemp(hiter(n.Type()), true)
}
- o.exprListInPlace(n.List)
+ o.exprListInPlace(n.List())
if orderBody {
- orderBlock(&n.Nbody, o.free)
+ orderBlock(n.PtrBody(), o.free)
}
o.out = append(o.out, n)
o.cleanTemp(t)
- case ORETURN:
- o.exprList(n.List)
+ case ir.ORETURN:
+ o.exprList(n.List())
o.out = append(o.out, n)
// Special: clean case temporaries in each block entry.
@@ -838,50 +840,50 @@ func (o *Order) stmt(n *Node) {
// reordered after the channel evaluation for a different
// case (if p were nil, then the timing of the fault would
// give this away).
- case OSELECT:
+ case ir.OSELECT:
t := o.markTemp()
- for _, n2 := range n.List.Slice() {
- if n2.Op != OCASE {
- Fatalf("order select case %v", n2.Op)
+ for _, n2 := range n.List().Slice() {
+ if n2.Op() != ir.OCASE {
+ base.Fatalf("order select case %v", n2.Op())
}
- r := n2.Left
+ r := n2.Left()
setlineno(n2)
// Append any new body prologue to ninit.
// The next loop will insert ninit into nbody.
- if n2.Ninit.Len() != 0 {
- Fatalf("order select ninit")
+ if n2.Init().Len() != 0 {
+ base.Fatalf("order select ninit")
}
if r == nil {
continue
}
- switch r.Op {
+ switch r.Op() {
default:
- Dump("select case", r)
- Fatalf("unknown op in select %v", r.Op)
+ ir.Dump("select case", r)
+ base.Fatalf("unknown op in select %v", r.Op())
// If this is case x := <-ch or case x, y := <-ch, the case has
// the ODCL nodes to declare x and y. We want to delay that
// declaration (and possible allocation) until inside the case body.
// Delete the ODCL nodes here and recreate them inside the body below.
- case OSELRECV, OSELRECV2:
+ case ir.OSELRECV, ir.OSELRECV2:
if r.Colas() {
i := 0
- if r.Ninit.Len() != 0 && r.Ninit.First().Op == ODCL && r.Ninit.First().Left == r.Left {
+ if r.Init().Len() != 0 && r.Init().First().Op() == ir.ODCL && r.Init().First().Left() == r.Left() {
i++
}
- if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() {
+ if i < r.Init().Len() && r.Init().Index(i).Op() == ir.ODCL && r.List().Len() != 0 && r.Init().Index(i).Left() == r.List().First() {
i++
}
- if i >= r.Ninit.Len() {
- r.Ninit.Set(nil)
+ if i >= r.Init().Len() {
+ r.PtrInit().Set(nil)
}
}
- if r.Ninit.Len() != 0 {
- dumplist("ninit", r.Ninit)
- Fatalf("ninit on select recv")
+ if r.Init().Len() != 0 {
+ ir.DumpList("ninit", r.Init())
+ base.Fatalf("ninit on select recv")
}
// case x = <-c
@@ -889,10 +891,10 @@ func (o *Order) stmt(n *Node) {
// r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
// r->left == N means 'case <-c'.
// c is always evaluated; x and ok are only evaluated when assigned.
- r.Right.Left = o.expr(r.Right.Left, nil)
+ r.Right().SetLeft(o.expr(r.Right().Left(), nil))
- if r.Right.Left.Op != ONAME {
- r.Right.Left = o.copyExpr(r.Right.Left, r.Right.Left.Type, false)
+ if r.Right().Left().Op() != ir.ONAME {
+ r.Right().SetLeft(o.copyExpr(r.Right().Left(), r.Right().Left().Type(), false))
}
// Introduce temporary for receive and move actual copy into case body.
@@ -901,91 +903,91 @@ func (o *Order) stmt(n *Node) {
// temporary per distinct type, sharing the temp among all receives
// with that temp. Similarly one ok bool could be shared among all
// the x,ok receives. Not worth doing until there's a clear need.
- if r.Left != nil && r.Left.isBlank() {
- r.Left = nil
+ if r.Left() != nil && ir.IsBlank(r.Left()) {
+ r.SetLeft(nil)
}
- if r.Left != nil {
+ if r.Left() != nil {
// use channel element type for temporary to avoid conversions,
// such as in case interfacevalue = <-intchan.
// the conversion happens in the OAS instead.
- tmp1 := r.Left
+ tmp1 := r.Left()
if r.Colas() {
- tmp2 := nod(ODCL, tmp1, nil)
+ tmp2 := ir.Nod(ir.ODCL, tmp1, nil)
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
- tmp2 := nod(OAS, tmp1, r.Left)
+ r.SetLeft(o.newTemp(r.Right().Left().Type().Elem(), r.Right().Left().Type().Elem().HasPointers()))
+ tmp2 := ir.Nod(ir.OAS, tmp1, r.Left())
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- if r.List.Len() != 0 && r.List.First().isBlank() {
- r.List.Set(nil)
+ if r.List().Len() != 0 && ir.IsBlank(r.List().First()) {
+ r.PtrList().Set(nil)
}
- if r.List.Len() != 0 {
- tmp1 := r.List.First()
+ if r.List().Len() != 0 {
+ tmp1 := r.List().First()
if r.Colas() {
- tmp2 := nod(ODCL, tmp1, nil)
+ tmp2 := ir.Nod(ir.ODCL, tmp1, nil)
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- r.List.Set1(o.newTemp(types.Types[TBOOL], false))
- tmp2 := okas(tmp1, r.List.First())
+ r.PtrList().Set1(o.newTemp(types.Types[types.TBOOL], false))
+ tmp2 := okas(tmp1, r.List().First())
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- orderBlock(&n2.Ninit, o.free)
+ orderBlock(n2.PtrInit(), o.free)
- case OSEND:
- if r.Ninit.Len() != 0 {
- dumplist("ninit", r.Ninit)
- Fatalf("ninit on select send")
+ case ir.OSEND:
+ if r.Init().Len() != 0 {
+ ir.DumpList("ninit", r.Init())
+ base.Fatalf("ninit on select send")
}
// case c <- x
// r->left is c, r->right is x, both are always evaluated.
- r.Left = o.expr(r.Left, nil)
+ r.SetLeft(o.expr(r.Left(), nil))
- if !r.Left.IsAutoTmp() {
- r.Left = o.copyExpr(r.Left, r.Left.Type, false)
+ if !ir.IsAutoTmp(r.Left()) {
+ r.SetLeft(o.copyExpr(r.Left(), r.Left().Type(), false))
}
- r.Right = o.expr(r.Right, nil)
- if !r.Right.IsAutoTmp() {
- r.Right = o.copyExpr(r.Right, r.Right.Type, false)
+ r.SetRight(o.expr(r.Right(), nil))
+ if !ir.IsAutoTmp(r.Right()) {
+ r.SetRight(o.copyExpr(r.Right(), r.Right().Type(), false))
}
}
}
// Now that we have accumulated all the temporaries, clean them.
// Also insert any ninit queued during the previous loop.
// (The temporary cleaning must follow that ninit work.)
- for _, n3 := range n.List.Slice() {
- orderBlock(&n3.Nbody, o.free)
- n3.Nbody.Prepend(o.cleanTempNoPop(t)...)
+ for _, n3 := range n.List().Slice() {
+ orderBlock(n3.PtrBody(), o.free)
+ n3.PtrBody().Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary?
// walkselect appears to walk Ninit.
- n3.Nbody.Prepend(n3.Ninit.Slice()...)
- n3.Ninit.Set(nil)
+ n3.PtrBody().Prepend(n3.Init().Slice()...)
+ n3.PtrInit().Set(nil)
}
o.out = append(o.out, n)
o.popTemp(t)
// Special: value being sent is passed as a pointer; make it addressable.
- case OSEND:
+ case ir.OSEND:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
if instrumenting {
// Force copying to the stack so that (chan T)(nil) <- x
// is still instrumented as a read of x.
- n.Right = o.copyExpr(n.Right, n.Right.Type, false)
+ n.SetRight(o.copyExpr(n.Right(), n.Right().Type(), false))
} else {
- n.Right = o.addrTemp(n.Right)
+ n.SetRight(o.addrTemp(n.Right()))
}
o.out = append(o.out, n)
o.cleanTemp(t)
@@ -997,35 +999,35 @@ func (o *Order) stmt(n *Node) {
// the if-else chain instead.)
// For now just clean all the temporaries at the end.
// In practice that's fine.
- case OSWITCH:
- if Debug_libfuzzer != 0 && !hasDefaultCase(n) {
+ case ir.OSWITCH:
+ if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
// Add empty "default:" case for instrumentation.
- n.List.Append(nod(OCASE, nil, nil))
+ n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil))
}
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- for _, ncas := range n.List.Slice() {
- if ncas.Op != OCASE {
- Fatalf("order switch case %v", ncas.Op)
+ n.SetLeft(o.expr(n.Left(), nil))
+ for _, ncas := range n.List().Slice() {
+ if ncas.Op() != ir.OCASE {
+ base.Fatalf("order switch case %v", ncas.Op())
}
- o.exprListInPlace(ncas.List)
- orderBlock(&ncas.Nbody, o.free)
+ o.exprListInPlace(ncas.List())
+ orderBlock(ncas.PtrBody(), o.free)
}
o.out = append(o.out, n)
o.cleanTemp(t)
}
- lineno = lno
+ base.Pos = lno
}
-func hasDefaultCase(n *Node) bool {
- for _, ncas := range n.List.Slice() {
- if ncas.Op != OCASE {
- Fatalf("expected case, found %v", ncas.Op)
+func hasDefaultCase(n ir.Node) bool {
+ for _, ncas := range n.List().Slice() {
+ if ncas.Op() != ir.OCASE {
+ base.Fatalf("expected case, found %v", ncas.Op())
}
- if ncas.List.Len() == 0 {
+ if ncas.List().Len() == 0 {
return true
}
}
@@ -1033,7 +1035,7 @@ func hasDefaultCase(n *Node) bool {
}
// exprList orders the expression list l into o.
-func (o *Order) exprList(l Nodes) {
+func (o *Order) exprList(l ir.Nodes) {
s := l.Slice()
for i := range s {
s[i] = o.expr(s[i], nil)
@@ -1042,7 +1044,7 @@ func (o *Order) exprList(l Nodes) {
// exprListInPlace orders the expression list l but saves
// the side effects on the individual expression ninit lists.
-func (o *Order) exprListInPlace(l Nodes) {
+func (o *Order) exprListInPlace(l ir.Nodes) {
s := l.Slice()
for i := range s {
s[i] = o.exprInPlace(s[i])
@@ -1050,7 +1052,7 @@ func (o *Order) exprListInPlace(l Nodes) {
}
// prealloc[x] records the allocation to use for x.
-var prealloc = map[*Node]*Node{}
+var prealloc = map[ir.Node]ir.Node{}
// expr orders a single expression, appending side
// effects to o.out as needed.
@@ -1059,7 +1061,7 @@ var prealloc = map[*Node]*Node{}
// to avoid copying the result of the expression to a temporary.)
// The result of expr MUST be assigned back to n, e.g.
// n.Left = o.expr(n.Left, lhs)
-func (o *Order) expr(n, lhs *Node) *Node {
+func (o *Order) expr(n, lhs ir.Node) ir.Node {
if n == nil {
return n
}
@@ -1067,21 +1069,21 @@ func (o *Order) expr(n, lhs *Node) *Node {
lno := setlineno(n)
o.init(n)
- switch n.Op {
+ switch n.Op() {
default:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
// Addition of strings turns into a function call.
// Allocate a temporary to hold the strings.
// Fewer than 5 strings use direct runtime helpers.
- case OADDSTR:
- o.exprList(n.List)
+ case ir.OADDSTR:
+ o.exprList(n.List())
- if n.List.Len() > 5 {
- t := types.NewArray(types.Types[TSTRING], int64(n.List.Len()))
+ if n.List().Len() > 5 {
+ t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len()))
prealloc[n] = o.newTemp(t, false)
}
@@ -1095,22 +1097,22 @@ func (o *Order) expr(n, lhs *Node) *Node {
hasbyte := false
haslit := false
- for _, n1 := range n.List.Slice() {
- hasbyte = hasbyte || n1.Op == OBYTES2STR
- haslit = haslit || n1.Op == OLITERAL && len(n1.StringVal()) != 0
+ for _, n1 := range n.List().Slice() {
+ hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
+ haslit = haslit || n1.Op() == ir.OLITERAL && len(n1.StringVal()) != 0
}
if haslit && hasbyte {
- for _, n2 := range n.List.Slice() {
- if n2.Op == OBYTES2STR {
- n2.Op = OBYTES2STRTMP
+ for _, n2 := range n.List().Slice() {
+ if n2.Op() == ir.OBYTES2STR {
+ n2.SetOp(ir.OBYTES2STRTMP)
}
}
}
- case OINDEXMAP:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ case ir.OINDEXMAP:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
needCopy := false
if !n.IndexMapLValue() {
@@ -1118,7 +1120,7 @@ func (o *Order) expr(n, lhs *Node) *Node {
// can not be changed before the map index by forcing
// the map index to happen immediately following the
// conversions. See copyExpr a few lines below.
- needCopy = mapKeyReplaceStrConv(n.Right)
+ needCopy = mapKeyReplaceStrConv(n.Right())
if instrumenting {
// Race detector needs the copy so it can
@@ -1128,40 +1130,40 @@ func (o *Order) expr(n, lhs *Node) *Node {
}
// key must be addressable
- n.Right = o.mapKeyTemp(n.Left.Type, n.Right)
+ n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right()))
if needCopy {
- n = o.copyExpr(n, n.Type, false)
+ n = o.copyExpr(n, n.Type(), false)
}
// concrete type (not interface) argument might need an addressable
// temporary to pass to the runtime conversion routine.
- case OCONVIFACE:
- n.Left = o.expr(n.Left, nil)
- if n.Left.Type.IsInterface() {
+ case ir.OCONVIFACE:
+ n.SetLeft(o.expr(n.Left(), nil))
+ if n.Left().Type().IsInterface() {
break
}
- if _, needsaddr := convFuncName(n.Left.Type, n.Type); needsaddr || isStaticCompositeLiteral(n.Left) {
+ if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) {
// Need a temp if we need to pass the address to the conversion function.
// We also process static composite literal node here, making a named static global
// whose address we can put directly in an interface (see OCONVIFACE case in walk).
- n.Left = o.addrTemp(n.Left)
+ n.SetLeft(o.addrTemp(n.Left()))
}
- case OCONVNOP:
- if n.Type.IsKind(TUNSAFEPTR) && n.Left.Type.IsKind(TUINTPTR) && (n.Left.Op == OCALLFUNC || n.Left.Op == OCALLINTER || n.Left.Op == OCALLMETH) {
+ case ir.OCONVNOP:
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) {
// When reordering unsafe.Pointer(f()) into a separate
// statement, the conversion and function call must stay
// together. See golang.org/issue/15329.
- o.init(n.Left)
- o.call(n.Left)
- if lhs == nil || lhs.Op != ONAME || instrumenting {
- n = o.copyExpr(n, n.Type, false)
+ o.init(n.Left())
+ o.call(n.Left())
+ if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ n = o.copyExpr(n, n.Type(), false)
}
} else {
- n.Left = o.expr(n.Left, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
}
- case OANDAND, OOROR:
+ case ir.OANDAND, ir.OOROR:
// ... = LHS && RHS
//
// var r bool
@@ -1171,78 +1173,78 @@ func (o *Order) expr(n, lhs *Node) *Node {
// }
// ... = r
- r := o.newTemp(n.Type, false)
+ r := o.newTemp(n.Type(), false)
// Evaluate left-hand side.
- lhs := o.expr(n.Left, nil)
- o.out = append(o.out, typecheck(nod(OAS, r, lhs), ctxStmt))
+ lhs := o.expr(n.Left(), nil)
+ o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, lhs), ctxStmt))
// Evaluate right-hand side, save generated code.
saveout := o.out
o.out = nil
t := o.markTemp()
o.edge()
- rhs := o.expr(n.Right, nil)
- o.out = append(o.out, typecheck(nod(OAS, r, rhs), ctxStmt))
+ rhs := o.expr(n.Right(), nil)
+ o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, rhs), ctxStmt))
o.cleanTemp(t)
gen := o.out
o.out = saveout
// If left-hand side doesn't cause a short-circuit, issue right-hand side.
- nif := nod(OIF, r, nil)
- if n.Op == OANDAND {
- nif.Nbody.Set(gen)
+ nif := ir.Nod(ir.OIF, r, nil)
+ if n.Op() == ir.OANDAND {
+ nif.PtrBody().Set(gen)
} else {
- nif.Rlist.Set(gen)
+ nif.PtrRlist().Set(gen)
}
o.out = append(o.out, nif)
n = r
- case OCALLFUNC,
- OCALLINTER,
- OCALLMETH,
- OCAP,
- OCOMPLEX,
- OCOPY,
- OIMAG,
- OLEN,
- OMAKECHAN,
- OMAKEMAP,
- OMAKESLICE,
- OMAKESLICECOPY,
- ONEW,
- OREAL,
- ORECOVER,
- OSTR2BYTES,
- OSTR2BYTESTMP,
- OSTR2RUNES:
+ case ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.OMAKESLICE,
+ ir.OMAKESLICECOPY,
+ ir.ONEW,
+ ir.OREAL,
+ ir.ORECOVER,
+ ir.OSTR2BYTES,
+ ir.OSTR2BYTESTMP,
+ ir.OSTR2RUNES:
if isRuneCount(n) {
// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
- n.Left.Left = o.expr(n.Left.Left, nil)
+ n.Left().SetLeft(o.expr(n.Left().Left(), nil))
} else {
o.call(n)
}
- if lhs == nil || lhs.Op != ONAME || instrumenting {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ n = o.copyExpr(n, n.Type(), false)
}
- case OAPPEND:
+ case ir.OAPPEND:
// Check for append(x, make([]T, y)...) .
if isAppendOfMake(n) {
- n.List.SetFirst(o.expr(n.List.First(), nil)) // order x
- n.List.Second().Left = o.expr(n.List.Second().Left, nil) // order y
+ n.List().SetFirst(o.expr(n.List().First(), nil)) // order x
+ n.List().Second().SetLeft(o.expr(n.List().Second().Left(), nil)) // order y
} else {
- o.exprList(n.List)
+ o.exprList(n.List())
}
- if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.First()) {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) {
+ n = o.copyExpr(n, n.Type(), false)
}
- case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
- n.Left = o.expr(n.Left, nil)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n.SetLeft(o.expr(n.Left(), nil))
low, high, max := n.SliceBounds()
low = o.expr(low, nil)
low = o.cheapExpr(low)
@@ -1251,65 +1253,65 @@ func (o *Order) expr(n, lhs *Node) *Node {
max = o.expr(max, nil)
max = o.cheapExpr(max)
n.SetSliceBounds(low, high, max)
- if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) {
+ n = o.copyExpr(n, n.Type(), false)
}
- case OCLOSURE:
- if n.Transient() && n.Func.Closure.Func.Cvars.Len() > 0 {
+ case ir.OCLOSURE:
+ if n.Transient() && n.Func().ClosureVars.Len() > 0 {
prealloc[n] = o.newTemp(closureType(n), false)
}
- case OSLICELIT, OCALLPART:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ case ir.OSLICELIT, ir.OCALLPART:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
if n.Transient() {
var t *types.Type
- switch n.Op {
- case OSLICELIT:
- t = types.NewArray(n.Type.Elem(), n.Right.Int64Val())
- case OCALLPART:
+ switch n.Op() {
+ case ir.OSLICELIT:
+ t = types.NewArray(n.Type().Elem(), n.Right().Int64Val())
+ case ir.OCALLPART:
t = partialCallType(n)
}
prealloc[n] = o.newTemp(t, false)
}
- case ODOTTYPE, ODOTTYPE2:
- n.Left = o.expr(n.Left, nil)
- if !isdirectiface(n.Type) || instrumenting {
- n = o.copyExpr(n, n.Type, true)
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n.SetLeft(o.expr(n.Left(), nil))
+ if !isdirectiface(n.Type()) || instrumenting {
+ n = o.copyExpr(n, n.Type(), true)
}
- case ORECV:
- n.Left = o.expr(n.Left, nil)
- n = o.copyExpr(n, n.Type, true)
+ case ir.ORECV:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n = o.copyExpr(n, n.Type(), true)
- case OEQ, ONE, OLT, OLE, OGT, OGE:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
- t := n.Left.Type
+ t := n.Left().Type()
switch {
case t.IsString():
// Mark string(byteSlice) arguments to reuse byteSlice backing
// buffer during conversion. String comparison does not
// memorize the strings for later use, so it is safe.
- if n.Left.Op == OBYTES2STR {
- n.Left.Op = OBYTES2STRTMP
+ if n.Left().Op() == ir.OBYTES2STR {
+ n.Left().SetOp(ir.OBYTES2STRTMP)
}
- if n.Right.Op == OBYTES2STR {
- n.Right.Op = OBYTES2STRTMP
+ if n.Right().Op() == ir.OBYTES2STR {
+ n.Right().SetOp(ir.OBYTES2STRTMP)
}
case t.IsStruct() || t.IsArray():
// for complex comparisons, we need both args to be
// addressable so we can pass them to the runtime.
- n.Left = o.addrTemp(n.Left)
- n.Right = o.addrTemp(n.Right)
+ n.SetLeft(o.addrTemp(n.Left()))
+ n.SetRight(o.addrTemp(n.Right()))
}
- case OMAPLIT:
+ case ir.OMAPLIT:
// Order map by converting:
// map[int]int{
// a(): b(),
@@ -1325,15 +1327,15 @@ func (o *Order) expr(n, lhs *Node) *Node {
// Without this special case, order would otherwise compute all
// the keys and values before storing any of them to the map.
// See issue 26552.
- entries := n.List.Slice()
+ entries := n.List().Slice()
statics := entries[:0]
- var dynamics []*Node
+ var dynamics []ir.Node
for _, r := range entries {
- if r.Op != OKEY {
- Fatalf("OMAPLIT entry not OKEY: %v\n", r)
+ if r.Op() != ir.OKEY {
+ base.Fatalf("OMAPLIT entry not OKEY: %v\n", r)
}
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
continue
}
@@ -1341,45 +1343,45 @@ func (o *Order) expr(n, lhs *Node) *Node {
// Recursively ordering some static entries can change them to dynamic;
// e.g., OCONVIFACE nodes. See #31777.
r = o.expr(r, nil)
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
continue
}
statics = append(statics, r)
}
- n.List.Set(statics)
+ n.PtrList().Set(statics)
if len(dynamics) == 0 {
break
}
// Emit the creation of the map (with all its static entries).
- m := o.newTemp(n.Type, false)
- as := nod(OAS, m, n)
+ m := o.newTemp(n.Type(), false)
+ as := ir.Nod(ir.OAS, m, n)
typecheck(as, ctxStmt)
o.stmt(as)
n = m
// Emit eval+insert of dynamic entries, one at a time.
for _, r := range dynamics {
- as := nod(OAS, nod(OINDEX, n, r.Left), r.Right)
+ as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left()), r.Right())
typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP
o.stmt(as)
}
}
- lineno = lno
+ base.Pos = lno
return n
}
// okas creates and returns an assignment of val to ok,
// including an explicit conversion if necessary.
-func okas(ok, val *Node) *Node {
- if !ok.isBlank() {
- val = conv(val, ok.Type)
+func okas(ok, val ir.Node) ir.Node {
+ if !ir.IsBlank(ok) {
+ val = conv(val, ok.Type())
}
- return nod(OAS, ok, val)
+ return ir.Nod(ir.OAS, ok, val)
}
// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
@@ -1390,13 +1392,13 @@ func okas(ok, val *Node) *Node {
// tmp1, tmp2, tmp3 = ...
// a, b, a = tmp1, tmp2, tmp3
// This is necessary to ensure left to right assignment order.
-func (o *Order) as2(n *Node) {
- tmplist := []*Node{}
- left := []*Node{}
- for ni, l := range n.List.Slice() {
- if !l.isBlank() {
- tmp := o.newTemp(l.Type, l.Type.HasPointers())
- n.List.SetIndex(ni, tmp)
+func (o *Order) as2(n ir.Node) {
+ tmplist := []ir.Node{}
+ left := []ir.Node{}
+ for ni, l := range n.List().Slice() {
+ if !ir.IsBlank(l) {
+ tmp := o.newTemp(l.Type(), l.Type().HasPointers())
+ n.List().SetIndex(ni, tmp)
tmplist = append(tmplist, tmp)
left = append(left, l)
}
@@ -1404,38 +1406,38 @@ func (o *Order) as2(n *Node) {
o.out = append(o.out, n)
- as := nod(OAS2, nil, nil)
- as.List.Set(left)
- as.Rlist.Set(tmplist)
+ as := ir.Nod(ir.OAS2, nil, nil)
+ as.PtrList().Set(left)
+ as.PtrRlist().Set(tmplist)
as = typecheck(as, ctxStmt)
o.stmt(as)
}
// okAs2 orders OAS2XXX with ok.
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *Order) okAs2(n *Node) {
- var tmp1, tmp2 *Node
- if !n.List.First().isBlank() {
- typ := n.Right.Type
+func (o *Order) okAs2(n ir.Node) {
+ var tmp1, tmp2 ir.Node
+ if !ir.IsBlank(n.List().First()) {
+ typ := n.Right().Type()
tmp1 = o.newTemp(typ, typ.HasPointers())
}
- if !n.List.Second().isBlank() {
- tmp2 = o.newTemp(types.Types[TBOOL], false)
+ if !ir.IsBlank(n.List().Second()) {
+ tmp2 = o.newTemp(types.Types[types.TBOOL], false)
}
o.out = append(o.out, n)
if tmp1 != nil {
- r := nod(OAS, n.List.First(), tmp1)
+ r := ir.Nod(ir.OAS, n.List().First(), tmp1)
r = typecheck(r, ctxStmt)
o.mapAssign(r)
- n.List.SetFirst(tmp1)
+ n.List().SetFirst(tmp1)
}
if tmp2 != nil {
- r := okas(n.List.Second(), tmp2)
+ r := okas(n.List().Second(), tmp2)
r = typecheck(r, ctxStmt)
o.mapAssign(r)
- n.List.SetSecond(tmp2)
+ n.List().SetSecond(tmp2)
}
}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 353f4b08c9..221b733a07 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
@@ -22,35 +24,34 @@ import (
// "Portable" code generation.
var (
- nBackendWorkers int // number of concurrent backend workers, set by a compiler flag
- compilequeue []*Node // functions waiting to be compiled
+ compilequeue []ir.Node // functions waiting to be compiled
)
-func emitptrargsmap(fn *Node) {
- if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
+func emitptrargsmap(fn ir.Node) {
+ if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" {
return
}
- lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
+ lsym := base.Ctxt.Lookup(fn.Func().LSym.Name + ".args_stackmap")
- nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
+ nptr := int(fn.Type().ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
nbitmap := 1
- if fn.Type.NumResults() > 0 {
+ if fn.Type().NumResults() > 0 {
nbitmap = 2
}
off := duint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n))
- if fn.IsMethod() {
- onebitwalktype1(fn.Type.Recvs(), 0, bv)
+ if ir.IsMethod(fn) {
+ onebitwalktype1(fn.Type().Recvs(), 0, bv)
}
- if fn.Type.NumParams() > 0 {
- onebitwalktype1(fn.Type.Params(), 0, bv)
+ if fn.Type().NumParams() > 0 {
+ onebitwalktype1(fn.Type().Params(), 0, bv)
}
off = dbvec(lsym, off, bv)
- if fn.Type.NumResults() > 0 {
- onebitwalktype1(fn.Type.Results(), 0, bv)
+ if fn.Type().NumResults() > 0 {
+ onebitwalktype1(fn.Type().Results(), 0, bv)
off = dbvec(lsym, off, bv)
}
@@ -67,40 +68,40 @@ func emitptrargsmap(fn *Node) {
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
-func cmpstackvarlt(a, b *Node) bool {
- if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
- return b.Class() == PAUTO
+func cmpstackvarlt(a, b ir.Node) bool {
+ if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
+ return b.Class() == ir.PAUTO
}
- if a.Class() != PAUTO {
- return a.Xoffset < b.Xoffset
+ if a.Class() != ir.PAUTO {
+ return a.Offset() < b.Offset()
}
- if a.Name.Used() != b.Name.Used() {
- return a.Name.Used()
+ if a.Name().Used() != b.Name().Used() {
+ return a.Name().Used()
}
- ap := a.Type.HasPointers()
- bp := b.Type.HasPointers()
+ ap := a.Type().HasPointers()
+ bp := b.Type().HasPointers()
if ap != bp {
return ap
}
- ap = a.Name.Needzero()
- bp = b.Name.Needzero()
+ ap = a.Name().Needzero()
+ bp = b.Name().Needzero()
if ap != bp {
return ap
}
- if a.Type.Width != b.Type.Width {
- return a.Type.Width > b.Type.Width
+ if a.Type().Width != b.Type().Width {
+ return a.Type().Width > b.Type().Width
}
- return a.Sym.Name < b.Sym.Name
+ return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []*Node
+type byStackVar []ir.Node
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
@@ -109,33 +110,33 @@ func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
- fn := s.curfn.Func
+ fn := s.curfn.Func()
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
- if ln.Class() == PAUTO {
- ln.Name.SetUsed(false)
+ if ln.Class() == ir.PAUTO {
+ ln.Name().SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
- ls.N.(*Node).Name.SetUsed(true)
+ ls.N.Name().SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
- if n, ok := v.Aux.(*Node); ok {
+ if n, ok := v.Aux.(ir.Node); ok {
switch n.Class() {
- case PPARAM, PPARAMOUT:
+ case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != nodfp {
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
}
- case PAUTO:
- n.Name.SetUsed(true)
+ case ir.PAUTO:
+ n.Name().SetUsed(true)
}
}
if !scratchUsed {
@@ -146,7 +147,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
}
if f.Config.NeedsFpScratch && scratchUsed {
- s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
+ s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
@@ -154,18 +155,18 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
- if n.Op != ONAME || n.Class() != PAUTO {
+ if n.Op() != ir.ONAME || n.Class() != ir.PAUTO {
continue
}
- if !n.Name.Used() {
+ if !n.Name().Used() {
fn.Dcl = fn.Dcl[:i]
break
}
- dowidth(n.Type)
- w := n.Type.Width
+ dowidth(n.Type())
+ w := n.Type().Width
if w >= thearch.MAXWIDTH || w < 0 {
- Fatalf("bad width")
+ base.Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
@@ -175,8 +176,8 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
w = 1
}
s.stksize += w
- s.stksize = Rnd(s.stksize, int64(n.Type.Align))
- if n.Type.HasPointers() {
+ s.stksize = Rnd(s.stksize, int64(n.Type().Align))
+ if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
@@ -185,59 +186,58 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = Rnd(s.stksize, int64(Widthptr))
}
- n.Xoffset = -s.stksize
+ n.SetOffset(-s.stksize)
}
s.stksize = Rnd(s.stksize, int64(Widthreg))
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
-func funccompile(fn *Node) {
+func funccompile(fn ir.Node) {
if Curfn != nil {
- Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
+ base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym())
}
- if fn.Type == nil {
- if nerrors == 0 {
- Fatalf("funccompile missing type")
+ if fn.Type() == nil {
+ if base.Errors() == 0 {
+ base.Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
- dowidth(fn.Type)
+ dowidth(fn.Type())
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
// Initialize ABI wrappers if necessary.
- fn.Func.initLSym(false)
+ initLSym(fn.Func(), false)
emitptrargsmap(fn)
return
}
- dclcontext = PAUTO
+ dclcontext = ir.PAUTO
Curfn = fn
compile(fn)
Curfn = nil
- dclcontext = PEXTERN
+ dclcontext = ir.PEXTERN
}
-func compile(fn *Node) {
- saveerrors()
-
+func compile(fn ir.Node) {
+ errorsBefore := base.Errors()
order(fn)
- if nerrors != 0 {
+ if base.Errors() > errorsBefore {
return
}
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
- fn.Func.initLSym(true)
+ initLSym(fn.Func(), true)
walk(fn)
- if nerrors != 0 {
+ if base.Errors() > errorsBefore {
return
}
if instrumenting {
@@ -247,7 +247,7 @@ func compile(fn *Node) {
// From this point, there should be no uses of Curfn. Enforce that.
Curfn = nil
- if fn.funcname() == "_" {
+ if ir.FuncName(fn) == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
@@ -259,15 +259,15 @@ func compile(fn *Node) {
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Func().Dcl {
switch n.Class() {
- case PPARAM, PPARAMOUT, PAUTO:
- if livenessShouldTrack(n) && n.Name.Addrtaken() {
- dtypesym(n.Type)
+ case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
+ if livenessShouldTrack(n) && n.Name().Addrtaken() {
+ dtypesym(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
- if fn.Func.lsym.Func().StackObjects == nil {
- fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
+ if fn.Func().LSym.Func().StackObjects == nil {
+ fn.Func().LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func().LSym.Name + ".stkobj")
}
}
}
@@ -284,29 +284,29 @@ func compile(fn *Node) {
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
-func compilenow(fn *Node) bool {
+func compilenow(fn ir.Node) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the xtop list.
- if fn.IsMethod() && isInlinableButNotInlined(fn) {
+ if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
- return nBackendWorkers == 1 && Debug_compilelater == 0
+ return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
-func isInlinableButNotInlined(fn *Node) bool {
- if fn.Func.Nname.Func.Inl == nil {
+func isInlinableButNotInlined(fn ir.Node) bool {
+ if fn.Func().Nname.Func().Inl == nil {
return false
}
- if fn.Sym == nil {
+ if fn.Sym() == nil {
return true
}
- return !fn.Sym.Linksym().WasInlined()
+ return !fn.Sym().Linksym().WasInlined()
}
const maxStackSize = 1 << 30
@@ -315,12 +315,12 @@ const maxStackSize = 1 << 30
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
-func compileSSA(fn *Node, worker int) {
+func compileSSA(fn ir.Node, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
- if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
+ if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
- largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
+ largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
@@ -336,14 +336,14 @@ func compileSSA(fn *Node, worker int) {
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
- largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
+ largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
- fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
+ fieldtrack(pp.Text.From.Sym, fn.Func().FieldTrack)
}
func init() {
@@ -360,7 +360,7 @@ func compileFunctions() {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
- tmp := make([]*Node, len(compilequeue))
+ tmp := make([]ir.Node, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
@@ -371,13 +371,13 @@ func compileFunctions() {
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
- return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
+ return compilequeue[i].Body().Len() > compilequeue[j].Body().Len()
})
}
var wg sync.WaitGroup
- Ctxt.InParallel = true
- c := make(chan *Node, nBackendWorkers)
- for i := 0; i < nBackendWorkers; i++ {
+ base.Ctxt.InParallel = true
+ c := make(chan ir.Node, base.Flag.LowerC)
+ for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
@@ -392,46 +392,75 @@ func compileFunctions() {
close(c)
compilequeue = nil
wg.Wait()
- Ctxt.InParallel = false
+ base.Ctxt.InParallel = false
sizeCalculationDisabled = false
}
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
- fn := curfn.(*Node)
- if fn.Func.Nname != nil {
- if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
- Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
- }
- }
-
- var apdecls []*Node
+ fn := curfn.(ir.Node)
+ if fn.Func().Nname != nil {
+ if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect {
+ base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+ }
+ }
+
+ // Back when there were two different *Funcs for a function, this code
+ // was not consistent about whether a particular *Node being processed
+ // was an ODCLFUNC or ONAME node. Partly this is because inlined function
+ // bodies have no ODCLFUNC node, which was it's own inconsistency.
+ // In any event, the handling of the two different nodes for DWARF purposes
+ // was subtly different, likely in unintended ways. CL 272253 merged the
+ // two nodes' Func fields, so that code sees the same *Func whether it is
+ // holding the ODCLFUNC or the ONAME. This resulted in changes in the
+ // DWARF output. To preserve the existing DWARF output and leave an
+ // intentional change for a future CL, this code does the following when
+ // fn.Op == ONAME:
+ //
+ // 1. Disallow use of createComplexVars in createDwarfVars.
+ // It was not possible to reach that code for an ONAME before,
+ // because the DebugInfo was set only on the ODCLFUNC Func.
+ // Calling into it in the ONAME case causes an index out of bounds panic.
+ //
+ // 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
+ // not the ONAME Func. Populating apdecls for the ONAME case results
+ // in selected being populated after createSimpleVars is called in
+ // createDwarfVars, and then that causes the loop to skip all the entries
+ // in dcl, meaning that the RecordAutoType calls don't happen.
+ //
+ // These two adjustments keep toolstash -cmp working for now.
+ // Deciding the right answer is, as they say, future work.
+ isODCLFUNC := fn.Op() == ir.ODCLFUNC
+
+ var apdecls []ir.Node
// Populate decls for fn.
- for _, n := range fn.Func.Dcl {
- if n.Op != ONAME { // might be OTYPE or OLITERAL
- continue
- }
- switch n.Class() {
- case PAUTO:
- if !n.Name.Used() {
- // Text == nil -> generating abstract function
- if fnsym.Func().Text != nil {
- Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ if isODCLFUNC {
+ for _, n := range fn.Func().Dcl {
+ if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class() {
+ case ir.PAUTO:
+ if !n.Name().Used() {
+ // Text == nil -> generating abstract function
+ if fnsym.Func().Text != nil {
+ base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ }
+ continue
}
+ case ir.PPARAM, ir.PPARAMOUT:
+ default:
continue
}
- case PPARAM, PPARAMOUT:
- default:
- continue
+ apdecls = append(apdecls, n)
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
- apdecls = append(apdecls, n)
- fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
- decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
+ decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func(), apdecls)
// For each type referenced by the functions auto vars but not
- // already referenced by a dwarf var, attach a dummy relocation to
+ // already referenced by a dwarf var, attach an R_USETYPE relocation to
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
@@ -446,22 +475,22 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
}
fnsym.Func().Autot = nil
- var varScopes []ScopeID
+ var varScopes []ir.ScopeID
for _, decl := range decls {
pos := declPos(decl)
- varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
+ varScopes = append(varScopes, findScope(fn.Func().Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
var inlcalls dwarf.InlCalls
- if genDwarfInline > 0 {
+ if base.Flag.GenDwarfInl > 0 {
inlcalls = assembleInlines(fnsym, dwarfVars)
}
return scopes, inlcalls
}
-func declPos(decl *Node) src.XPos {
- if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
+func declPos(decl ir.Node) src.XPos {
+ if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) {
// It's not clear which position is correct for captured variables here:
// * decl.Pos is the wrong position for captured variables, in the inner
// function, but it is the right position in the outer function.
@@ -476,19 +505,19 @@ func declPos(decl *Node) src.XPos {
// case statement.
// This code is probably wrong for type switch variables that are also
// captured.
- return decl.Name.Defn.Pos
+ return decl.Name().Defn.Pos()
}
- return decl.Pos
+ return decl.Pos()
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
-func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
+func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
var vars []*dwarf.Var
- var decls []*Node
- selected := make(map[*Node]bool)
+ var decls []ir.Node
+ selected := make(map[ir.Node]bool)
for _, n := range apDecls {
- if n.IsAutoTmp() {
+ if ir.IsAutoTmp(n) {
continue
}
@@ -499,14 +528,14 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var,
return decls, vars, selected
}
-func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
+func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var {
var abbrev int
- offs := n.Xoffset
+ offs := n.Offset()
switch n.Class() {
- case PAUTO:
+ case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO
- if Ctxt.FixedFrameSize() == 0 {
+ if base.Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
@@ -514,32 +543,32 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
offs -= int64(Widthptr)
}
- case PPARAM, PPARAMOUT:
+ case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
- offs += Ctxt.FixedFrameSize()
+ offs += base.Ctxt.FixedFrameSize()
default:
- Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
+ base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
}
- typename := dwarf.InfoPrefix + typesymname(n.Type)
+ typename := dwarf.InfoPrefix + typesymname(n.Type())
delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
- if genDwarfInline > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if base.Flag.GenDwarfInl > 1 {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
- declpos := Ctxt.InnermostPos(declPos(n))
+ declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
- Name: n.Sym.Name,
- IsReturnValue: n.Class() == PPARAMOUT,
- IsInlFormal: n.Name.InlFormal(),
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class() == ir.PPARAMOUT,
+ IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
- Type: Ctxt.Lookup(typename),
+ Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
@@ -550,19 +579,19 @@ func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
-func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
- debugInfo := fn.DebugInfo
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
- var decls []*Node
+ var decls []ir.Node
var vars []*dwarf.Var
- ssaVars := make(map[*Node]bool)
+ ssaVars := make(map[ir.Node]bool)
for varID, dvar := range debugInfo.Vars {
- n := dvar.(*Node)
+ n := dvar
ssaVars[n] = true
for _, slot := range debugInfo.VarSlots[varID] {
- ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
+ ssaVars[debugInfo.Slots[slot].N] = true
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
@@ -576,12 +605,12 @@ func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*N
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
-func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
- var decls []*Node
- var selected map[*Node]bool
- if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
+ var decls []ir.Node
+ var selected map[ir.Node]bool
+ if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
@@ -608,11 +637,11 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
if _, found := selected[n]; found {
continue
}
- c := n.Sym.Name[0]
- if c == '.' || n.Type.IsUntyped() {
+ c := n.Sym().Name[0]
+ if c == '.' || n.Type().IsUntyped() {
continue
}
- if n.Class() == PPARAM && !canSSAType(n.Type) {
+ if n.Class() == ir.PPARAM && !canSSAType(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
@@ -624,13 +653,13 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
decls = append(decls, n)
continue
}
- typename := dwarf.InfoPrefix + typesymname(n.Type)
+ typename := dwarf.InfoPrefix + typesymname(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
- isReturnValue := (n.Class() == PPARAMOUT)
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ isReturnValue := (n.Class() == ir.PPARAMOUT)
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- } else if n.Class() == PAUTOHEAP {
+ } else if n.Class() == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
@@ -638,28 +667,28 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
// misleading location for the param (we want pointer-to-heap
// and not stack).
// TODO(thanm): generate a better location expression
- stackcopy := n.Name.Param.Stackcopy
- if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
+ stackcopy := n.Name().Param.Stackcopy
+ if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- isReturnValue = (stackcopy.Class() == PPARAMOUT)
+ isReturnValue = (stackcopy.Class() == ir.PPARAMOUT)
}
}
inlIndex := 0
- if genDwarfInline > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if base.Flag.GenDwarfInl > 1 {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
- declpos := Ctxt.InnermostPos(n.Pos)
+ declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
- Name: n.Sym.Name,
+ Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
- StackOffset: int32(n.Xoffset),
- Type: Ctxt.Lookup(typename),
+ StackOffset: int32(n.Offset()),
+ Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
@@ -679,14 +708,14 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
-func preInliningDcls(fnsym *obj.LSym) []*Node {
- fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
- var rdcl []*Node
- for _, n := range fn.Func.Inl.Dcl {
- c := n.Sym.Name[0]
+func preInliningDcls(fnsym *obj.LSym) []ir.Node {
+ fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node)
+ var rdcl []ir.Node
+ for _, n := range fn.Func().Inl.Dcl {
+ c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
- if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
+ if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
@@ -698,33 +727,33 @@ func preInliningDcls(fnsym *obj.LSym) []*Node {
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func stackOffset(slot ssa.LocalSlot) int32 {
- n := slot.N.(*Node)
- var base int64
+ n := slot.N
+ var off int64
switch n.Class() {
- case PAUTO:
- if Ctxt.FixedFrameSize() == 0 {
- base -= int64(Widthptr)
+ case ir.PAUTO:
+ if base.Ctxt.FixedFrameSize() == 0 {
+ off -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
- base -= int64(Widthptr)
+ off -= int64(Widthptr)
}
- case PPARAM, PPARAMOUT:
- base += Ctxt.FixedFrameSize()
+ case ir.PPARAM, ir.PPARAMOUT:
+ off += base.Ctxt.FixedFrameSize()
}
- return int32(base + n.Xoffset + slot.Off)
+ return int32(off + n.Offset() + slot.Off)
}
// createComplexVar builds a single DWARF variable entry and location list.
-func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
- debug := fn.DebugInfo
- n := debug.Vars[varID].(*Node)
+func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
+ debug := fn.DebugInfo.(*ssa.FuncDebug)
+ n := debug.Vars[varID]
var abbrev int
switch n.Class() {
- case PAUTO:
+ case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
- case PPARAM, PPARAMOUT:
+ case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
default:
return nil
@@ -734,21 +763,21 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
- if genDwarfInline > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if base.Flag.GenDwarfInl > 1 {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
- declpos := Ctxt.InnermostPos(n.Pos)
+ declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
- Name: n.Sym.Name,
- IsReturnValue: n.Class() == PPARAMOUT,
- IsInlFormal: n.Name.InlFormal(),
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class() == ir.PPARAMOUT,
+ IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
- Type: Ctxt.Lookup(typename),
+ Type: base.Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
@@ -763,7 +792,7 @@ func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
list := debug.LocationLists[varID]
if len(list) != 0 {
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
- debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+ debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
}
}
return dvar
diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go
index b1db29825c..1984f9aa08 100644
--- a/src/cmd/compile/internal/gc/pgen_test.go
+++ b/src/cmd/compile/internal/gc/pgen_test.go
@@ -5,6 +5,7 @@
package gc
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"reflect"
"sort"
@@ -12,129 +13,133 @@ import (
)
func typeWithoutPointers() *types.Type {
- t := types.New(TSTRUCT)
- f := &types.Field{Type: types.New(TINT)}
+ t := types.New(types.TSTRUCT)
+ f := &types.Field{Type: types.New(types.TINT)}
t.SetFields([]*types.Field{f})
return t
}
func typeWithPointers() *types.Type {
- t := types.New(TSTRUCT)
- f := &types.Field{Type: types.NewPtr(types.New(TINT))}
+ t := types.New(types.TSTRUCT)
+ f := &types.Field{Type: types.NewPtr(types.New(types.TINT))}
t.SetFields([]*types.Field{f})
return t
}
-func markUsed(n *Node) *Node {
- n.Name.SetUsed(true)
+func markUsed(n ir.Node) ir.Node {
+ n.Name().SetUsed(true)
return n
}
-func markNeedZero(n *Node) *Node {
- n.Name.SetNeedzero(true)
+func markNeedZero(n ir.Node) ir.Node {
+ n.Name().SetNeedzero(true)
return n
}
-func nodeWithClass(n Node, c Class) *Node {
- n.SetClass(c)
- n.Name = new(Name)
- return &n
-}
-
// Test all code paths for cmpstackvarlt.
func TestCmpstackvar(t *testing.T) {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
+ if s == nil {
+ s = &types.Sym{Name: "."}
+ }
+ n := NewName(s)
+ n.SetType(t)
+ n.SetOffset(xoffset)
+ n.SetClass(cl)
+ return n
+ }
testdata := []struct {
- a, b *Node
+ a, b ir.Node
lt bool
}{
{
- nodeWithClass(Node{}, PAUTO),
- nodeWithClass(Node{}, PFUNC),
+ nod(0, nil, nil, ir.PAUTO),
+ nod(0, nil, nil, ir.PFUNC),
false,
},
{
- nodeWithClass(Node{}, PFUNC),
- nodeWithClass(Node{}, PAUTO),
+ nod(0, nil, nil, ir.PFUNC),
+ nod(0, nil, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Xoffset: 0}, PFUNC),
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ nod(0, nil, nil, ir.PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
true,
},
{
- nodeWithClass(Node{Xoffset: 20}, PFUNC),
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ nod(20, nil, nil, ir.PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
false,
},
{
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
- nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
+ nod(10, nil, nil, ir.PFUNC),
false,
},
{
- nodeWithClass(Node{Xoffset: 10}, PPARAM),
- nodeWithClass(Node{Xoffset: 20}, PPARAMOUT),
+ nod(10, nil, nil, ir.PPARAM),
+ nod(20, nil, nil, ir.PPARAMOUT),
true,
},
{
- nodeWithClass(Node{Xoffset: 10}, PPARAMOUT),
- nodeWithClass(Node{Xoffset: 20}, PPARAM),
+ nod(10, nil, nil, ir.PPARAMOUT),
+ nod(20, nil, nil, ir.PPARAM),
true,
},
{
- markUsed(nodeWithClass(Node{}, PAUTO)),
- nodeWithClass(Node{}, PAUTO),
+ markUsed(nod(0, nil, nil, ir.PAUTO)),
+ nod(0, nil, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{}, PAUTO),
- markUsed(nodeWithClass(Node{}, PAUTO)),
+ nod(0, nil, nil, ir.PAUTO),
+ markUsed(nod(0, nil, nil, ir.PAUTO)),
false,
},
{
- nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
- nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
+ nod(0, typeWithoutPointers(), nil, ir.PAUTO),
+ nod(0, typeWithPointers(), nil, ir.PAUTO),
false,
},
{
- nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
- nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
+ nod(0, typeWithPointers(), nil, ir.PAUTO),
+ nod(0, typeWithoutPointers(), nil, ir.PAUTO),
true,
},
{
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
- nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
+ markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
+ nod(0, &types.Type{}, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
+ nod(0, &types.Type{}, nil, ir.PAUTO),
+ markNeedZero(nod(0, &types.Type{}, nil, ir.PAUTO)),
false,
},
{
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
+ nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
+ nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
false,
},
{
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
+ nod(0, &types.Type{Width: 2}, nil, ir.PAUTO),
+ nod(0, &types.Type{Width: 1}, nil, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
true,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
false,
},
{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
false,
},
}
@@ -151,35 +156,42 @@ func TestCmpstackvar(t *testing.T) {
}
func TestStackvarSort(t *testing.T) {
- inp := []*Node{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
+ n := NewName(s)
+ n.SetType(t)
+ n.SetOffset(xoffset)
+ n.SetClass(cl)
+ return n
+ }
+ inp := []ir.Node{
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
}
- want := []*Node{
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
- markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
- nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
- nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
- nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
+ want := []ir.Node{
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ nod(20, &types.Type{}, &types.Sym{}, ir.PFUNC),
+ markUsed(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ markNeedZero(nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO)),
+ nod(0, &types.Type{Width: 2}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{Width: 1}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
+ nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
+ nod(0, typeWithoutPointers(), &types.Sym{}, ir.PAUTO),
}
sort.Sort(byStackVar(inp))
if !reflect.DeepEqual(want, inp) {
diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go
index 5218cd0ef3..677bfc92df 100644
--- a/src/cmd/compile/internal/gc/phi.go
+++ b/src/cmd/compile/internal/gc/phi.go
@@ -5,6 +5,7 @@
package gc
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/src"
@@ -40,11 +41,11 @@ func (s *state) insertPhis() {
}
type phiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- defvars []map[*Node]*ssa.Value // defined variables at end of each block
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
- varnum map[*Node]int32 // variable numbering
+ varnum map[ir.Node]int32 // variable numbering
// properties of the dominator tree
idom []*ssa.Block // dominator parents
@@ -59,7 +60,7 @@ type phiState struct {
hasDef *sparseSet // has a write of the variable we're processing
// miscellaneous
- placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder.
+ placeholder *ssa.Value // value to use as a "not set yet" placeholder.
}
func (s *phiState) insertPhis() {
@@ -70,15 +71,15 @@ func (s *phiState) insertPhis() {
// Find all the variables for which we need to match up reads & writes.
// This step prunes any basic-block-only variables from consideration.
// Generate a numbering for these variables.
- s.varnum = map[*Node]int32{}
- var vars []*Node
+ s.varnum = map[ir.Node]int32{}
+ var vars []ir.Node
var vartypes []*types.Type
for _, b := range s.f.Blocks {
for _, v := range b.Values {
if v.Op != ssa.OpFwdRef {
continue
}
- var_ := v.Aux.(*Node)
+ var_ := v.Aux.(ir.Node)
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
@@ -183,7 +184,7 @@ levels:
}
}
-func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) {
+func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
priq := &s.priq
q := s.q
queued := s.queued
@@ -318,7 +319,7 @@ func (s *phiState) resolveFwdRefs() {
if v.Op != ssa.OpFwdRef {
continue
}
- n := s.varnum[v.Aux.(*Node)]
+ n := s.varnum[v.Aux.(ir.Node)]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
@@ -432,11 +433,11 @@ func (s *sparseSet) clear() {
// Variant to use for small functions.
type simplePhiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- fwdrefs []*ssa.Value // list of FwdRefs to be processed
- defvars []map[*Node]*ssa.Value // defined variables at end of each block
- reachable []bool // which blocks are reachable
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ fwdrefs []*ssa.Value // list of FwdRefs to be processed
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+ reachable []bool // which blocks are reachable
}
func (s *simplePhiState) insertPhis() {
@@ -449,7 +450,7 @@ func (s *simplePhiState) insertPhis() {
continue
}
s.fwdrefs = append(s.fwdrefs, v)
- var_ := v.Aux.(*Node)
+ var_ := v.Aux.(ir.Node)
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
@@ -463,7 +464,7 @@ loop:
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
- var_ := v.Aux.(*Node)
+ var_ := v.Aux.(ir.Node)
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
@@ -511,7 +512,7 @@ loop:
}
// lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value {
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
for {
if v := s.defvars[b.ID][var_]; v != nil {
return v
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index a48173e0d6..bd7696d859 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -15,6 +15,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@@ -99,10 +101,10 @@ type BlockEffects struct {
// A collection of global state used by liveness analysis.
type Liveness struct {
- fn *Node
+ fn ir.Node
f *ssa.Func
- vars []*Node
- idx map[*Node]int32
+ vars []ir.Node
+ idx map[ir.Node]int32
stkptrsize int64
be []BlockEffects
@@ -204,20 +206,20 @@ type progeffectscache struct {
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
-func livenessShouldTrack(n *Node) bool {
- return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
+func livenessShouldTrack(n ir.Node) bool {
+ return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
-func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
- var vars []*Node
- for _, n := range fn.Func.Dcl {
+func getvariables(fn ir.Node) ([]ir.Node, map[ir.Node]int32) {
+ var vars []ir.Node
+ for _, n := range fn.Func().Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
}
- idx := make(map[*Node]int32, len(vars))
+ idx := make(map[ir.Node]int32, len(vars))
for i, n := range vars {
idx[n] = int32(i)
}
@@ -226,14 +228,14 @@ func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
func (lv *Liveness) initcache() {
if lv.cache.initialized {
- Fatalf("liveness cache initialized twice")
+ base.Fatalf("liveness cache initialized twice")
return
}
lv.cache.initialized = true
for i, node := range lv.vars {
switch node.Class() {
- case PPARAM:
+ case ir.PPARAM:
// A return instruction with a p.to is a tail return, which brings
// the stack pointer back up (if it ever went down) and then jumps
// to a new function entirely. That form of instruction must read
@@ -242,7 +244,7 @@ func (lv *Liveness) initcache() {
// function runs.
lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
- case PPARAMOUT:
+ case ir.PPARAMOUT:
// All results are live at every return point.
// Note that this point is after escaping return values
// are copied back to the stack using their PAUTOHEAP references.
@@ -270,7 +272,7 @@ const (
// If v does not affect any tracked variables, it returns -1, 0.
func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
n, e := affectedNode(v)
- if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first
+ if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first
return -1, 0
}
@@ -280,7 +282,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
// variable" ICEs (issue 19632).
switch v.Op {
case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
- if !n.Name.Used() {
+ if !n.Name().Used() {
return -1, 0
}
}
@@ -295,7 +297,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
if e&(ssa.SymRead|ssa.SymAddr) != 0 {
effect |= uevar
}
- if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
+ if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) {
effect |= varkill
}
@@ -310,7 +312,7 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
}
// affectedNode returns the *Node affected by v
-func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
+func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
@@ -321,9 +323,9 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
return n, ssa.SymWrite
case ssa.OpVarLive:
- return v.Aux.(*Node), ssa.SymRead
+ return v.Aux.(ir.Node), ssa.SymRead
case ssa.OpVarDef, ssa.OpVarKill:
- return v.Aux.(*Node), ssa.SymWrite
+ return v.Aux.(ir.Node), ssa.SymWrite
case ssa.OpKeepAlive:
n, _ := AutoVar(v.Args[0])
return n, ssa.SymRead
@@ -338,10 +340,10 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
case nil, *obj.LSym:
// ok, but no node
return nil, e
- case *Node:
+ case ir.Node:
return a, e
default:
- Fatalf("weird aux: %s", v.LongString())
+ base.Fatalf("weird aux: %s", v.LongString())
return nil, e
}
}
@@ -354,7 +356,7 @@ type livenessFuncCache struct {
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
-func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness {
+func newliveness(fn ir.Node, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
fn: fn,
f: f,
@@ -406,7 +408,7 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
// on future calls with the same type t.
func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
- Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
+ base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
if !t.HasPointers() {
// Note: this case ensures that pointers to go:notinheap types
@@ -415,25 +417,25 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
}
switch t.Etype {
- case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
+ case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer
- case TSTRING:
+ case types.TSTRING:
// struct { byte *str; intgo len; }
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
- case TINTER:
+ case types.TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
// treat it as such.
@@ -449,14 +451,14 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
// well as scan itabs to update their itab._type fields).
bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
- case TSLICE:
+ case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
if off&int64(Widthptr-1) != 0 {
- Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
+ base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
- case TARRAY:
+ case types.TARRAY:
elt := t.Elem()
if elt.Width == 0 {
// Short-circuit for #20739.
@@ -467,20 +469,20 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
off += elt.Width
}
- case TSTRUCT:
+ case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
onebitwalktype1(f.Type, off+f.Offset, bv)
}
default:
- Fatalf("onebitwalktype1: unexpected type, %v", t)
+ base.Fatalf("onebitwalktype1: unexpected type, %v", t)
}
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
-func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
+func (lv *Liveness) pointerMap(liveout bvec, vars []ir.Node, args, locals bvec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
@@ -488,11 +490,11 @@ func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
}
node := vars[i]
switch node.Class() {
- case PAUTO:
- onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
+ case ir.PAUTO:
+ onebitwalktype1(node.Type(), node.Offset()+lv.stkptrsize, locals)
- case PPARAM, PPARAMOUT:
- onebitwalktype1(node.Type, node.Xoffset, args)
+ case ir.PPARAM, ir.PPARAMOUT:
+ onebitwalktype1(node.Type(), node.Offset(), args)
}
}
}
@@ -509,7 +511,7 @@ func allUnsafe(f *ssa.Func) bool {
// go:nosplit functions are similar. Since safe points used to
// be coupled with stack checks, go:nosplit often actually
// means "no safe points in this function".
- return compiling_runtime || f.NoSplit
+ return base.Flag.CompilingRuntime || f.NoSplit
}
// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
@@ -786,14 +788,14 @@ func (lv *Liveness) epilogue() {
// pointers to copy values back to the stack).
// TODO: if the output parameter is heap-allocated, then we
// don't need to keep the stack copy live?
- if lv.fn.Func.HasDefer() {
+ if lv.fn.Func().HasDefer() {
for i, n := range lv.vars {
- if n.Class() == PPARAMOUT {
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Class() == ir.PPARAMOUT {
+ if n.Name().IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
- Fatalf("variable %v both output param and heap output param", n)
+ base.Fatalf("variable %v both output param and heap output param", n)
}
- if n.Name.Param.Heapaddr != nil {
+ if n.Name().Param.Heapaddr != nil {
// If this variable moved to the heap, then
// its stack copy is not live.
continue
@@ -801,22 +803,22 @@ func (lv *Liveness) epilogue() {
// Note: zeroing is handled by zeroResults in walk.go.
livedefer.Set(int32(i))
}
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Name().IsOutputParamHeapAddr() {
// This variable will be overwritten early in the function
// prologue (from the result of a mallocgc) but we need to
// zero it in case that malloc causes a stack scan.
- n.Name.SetNeedzero(true)
+ n.Name().SetNeedzero(true)
livedefer.Set(int32(i))
}
- if n.Name.OpenDeferSlot() {
+ if n.Name().OpenDeferSlot() {
// Open-coded defer args slots must be live
// everywhere in a function, since a panic can
// occur (almost) anywhere. Because it is live
// everywhere, it must be zeroed on entry.
livedefer.Set(int32(i))
// It was already marked as Needzero when created.
- if !n.Name.Needzero() {
- Fatalf("all pointer-containing defer arg slots should have Needzero set")
+ if !n.Name().Needzero() {
+ base.Fatalf("all pointer-containing defer arg slots should have Needzero set")
}
}
}
@@ -878,7 +880,7 @@ func (lv *Liveness) epilogue() {
if b == lv.f.Entry {
if index != 0 {
- Fatalf("bad index for entry point: %v", index)
+ base.Fatalf("bad index for entry point: %v", index)
}
// Check to make sure only input variables are live.
@@ -886,10 +888,10 @@ func (lv *Liveness) epilogue() {
if !liveout.Get(int32(i)) {
continue
}
- if n.Class() == PPARAM {
+ if n.Class() == ir.PPARAM {
continue // ok
}
- Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
+ base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func().Nname, n)
}
// Record live variables.
@@ -902,7 +904,7 @@ func (lv *Liveness) epilogue() {
}
// If we have an open-coded deferreturn call, make a liveness map for it.
- if lv.fn.Func.OpenCodedDeferDisallowed() {
+ if lv.fn.Func().OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
@@ -919,8 +921,8 @@ func (lv *Liveness) epilogue() {
// the only things that can possibly be live are the
// input parameters.
for j, n := range lv.vars {
- if n.Class() != PPARAM && lv.stackMaps[0].Get(int32(j)) {
- lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
+ if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
+ lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func().Nname, n)
}
}
}
@@ -966,7 +968,7 @@ func (lv *Liveness) compact(b *ssa.Block) {
}
func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
- if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") {
+ if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
if !(v == nil || v.Op.IsCall()) {
@@ -978,14 +980,14 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
return
}
- pos := lv.fn.Func.Nname.Pos
+ pos := lv.fn.Func().Nname.Pos()
if v != nil {
pos = v.Pos
}
s := "live at "
if v == nil {
- s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
+ s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn))
} else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
fn := sym.Fn.Name
if pos := strings.Index(fn, "."); pos >= 0 {
@@ -1002,7 +1004,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
}
}
- Warnl(pos, s)
+ base.WarnfAt(pos, s)
}
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
@@ -1022,7 +1024,7 @@ func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
if !live.Get(int32(i)) {
continue
}
- fmt.Printf("%s%s", comma, n.Sym.Name)
+ fmt.Printf("%s%s", comma, n.Sym().Name)
comma = ","
}
return true
@@ -1040,7 +1042,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo
}
fmt.Printf("%s=", name)
if x {
- fmt.Printf("%s", lv.vars[pos].Sym.Name)
+ fmt.Printf("%s", lv.vars[pos].Sym().Name)
}
return true
@@ -1050,7 +1052,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo
// This format synthesizes the information used during the multiple passes
// into a single presentation.
func (lv *Liveness) printDebug() {
- fmt.Printf("liveness: %s\n", lv.fn.funcname())
+ fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
for i, b := range lv.f.Blocks {
if i > 0 {
@@ -1088,7 +1090,7 @@ func (lv *Liveness) printDebug() {
if b == lv.f.Entry {
live := lv.stackMaps[0]
- fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos))
+ fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func().Nname.Pos()))
fmt.Printf("\tlive=")
printed = false
for j, n := range lv.vars {
@@ -1105,7 +1107,7 @@ func (lv *Liveness) printDebug() {
}
for _, v := range b.Values {
- fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString())
+ fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString())
pcdata := lv.livenessMap.Get(v)
@@ -1162,11 +1164,11 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
- var maxArgNode *Node
+ var maxArgNode ir.Node
for _, n := range lv.vars {
switch n.Class() {
- case PPARAM, PPARAMOUT:
- if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if maxArgNode == nil || n.Offset() > maxArgNode.Offset() {
maxArgNode = n
}
}
@@ -1174,7 +1176,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Next, find the offset of the largest pointer in the largest node.
var maxArgs int64
if maxArgNode != nil {
- maxArgs = maxArgNode.Xoffset + typeptrdata(maxArgNode.Type)
+ maxArgs = maxArgNode.Offset() + typeptrdata(maxArgNode.Type())
}
// Size locals bitmaps to be stkptrsize sized.
@@ -1214,7 +1216,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// These symbols will be added to Ctxt.Data by addGCLocals
// after parallel compilation is done.
makeSym := func(tmpSym *obj.LSym) *obj.LSym {
- return Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
+ return base.Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
lsym.P = tmpSym.P
lsym.Set(obj.AttrContentAddressable, true)
})
@@ -1235,7 +1237,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
lv.prologue()
lv.solve()
lv.epilogue()
- if debuglive > 0 {
+ if base.Flag.Live > 0 {
lv.showlive(nil, lv.stackMaps[0])
for _, b := range f.Blocks {
for _, val := range b.Values {
@@ -1245,7 +1247,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
}
}
}
- if debuglive >= 2 {
+ if base.Flag.Live >= 2 {
lv.printDebug()
}
@@ -1264,7 +1266,7 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
}
// Emit the live pointer map data structures
- ls := e.curfn.Func.lsym
+ ls := e.curfn.Func().LSym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
@@ -1299,16 +1301,16 @@ func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
func isfat(t *types.Type) bool {
if t != nil {
switch t.Etype {
- case TSLICE, TSTRING,
- TINTER: // maybe remove later
+ case types.TSLICE, types.TSTRING,
+ types.TINTER: // maybe remove later
return true
- case TARRAY:
+ case types.TARRAY:
// Array of 1 element, check if element is fat
if t.NumElem() == 1 {
return isfat(t.Elem())
}
return true
- case TSTRUCT:
+ case types.TSTRUCT:
// Struct with 1 field, check if field is fat
if t.NumFields() == 1 {
return isfat(t.Field(0).Type)
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
index 3552617401..c41d923f78 100644
--- a/src/cmd/compile/internal/gc/racewalk.go
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"cmd/internal/sys"
@@ -47,9 +49,9 @@ var omit_pkgs = []string{
var norace_inst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) bool {
- if myimportpath != "" {
+ if base.Ctxt.Pkgpath != "" {
for _, p := range pkgs {
- if myimportpath == p {
+ if base.Ctxt.Pkgpath == p {
return true
}
}
@@ -58,22 +60,22 @@ func ispkgin(pkgs []string) bool {
return false
}
-func instrument(fn *Node) {
- if fn.Func.Pragma&Norace != 0 {
+func instrument(fn ir.Node) {
+ if fn.Func().Pragma&ir.Norace != 0 {
return
}
- if !flag_race || !ispkgin(norace_inst_pkgs) {
- fn.Func.SetInstrumentBody(true)
+ if !base.Flag.Race || !ispkgin(norace_inst_pkgs) {
+ fn.Func().SetInstrumentBody(true)
}
- if flag_race {
- lno := lineno
- lineno = src.NoXPos
+ if base.Flag.Race {
+ lno := base.Pos
+ base.Pos = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
- fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
- fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ fn.Func().Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
+ fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
} else {
// nodpc is the PC of the caller as extracted by
@@ -81,13 +83,13 @@ func instrument(fn *Node) {
// This only works for amd64. This will not
// work on arm or others that might support
// race in the future.
- nodpc := nodfp.copy()
- nodpc.Type = types.Types[TUINTPTR]
- nodpc.Xoffset = int64(-Widthptr)
- fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
- fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
- fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ nodpc := ir.Copy(nodfp)
+ nodpc.SetType(types.Types[types.TUINTPTR])
+ nodpc.SetOffset(int64(-Widthptr))
+ fn.Func().Dcl = append(fn.Func().Dcl, nodpc)
+ fn.Func().Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
+ fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
}
- lineno = lno
+ base.Pos = lno
}
}
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
index 1b4d765d42..0ff00cca44 100644
--- a/src/cmd/compile/internal/gc/range.go
+++ b/src/cmd/compile/internal/gc/range.go
@@ -5,13 +5,15 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/sys"
"unicode/utf8"
)
// range
-func typecheckrange(n *Node) {
+func typecheckrange(n ir.Node) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
@@ -25,7 +27,7 @@ func typecheckrange(n *Node) {
// second half of dance, the first half being typecheckrangeExpr
n.SetTypecheck(1)
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
@@ -33,21 +35,21 @@ func typecheckrange(n *Node) {
}
decldepth++
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
}
-func typecheckrangeExpr(n *Node) {
- n.Right = typecheck(n.Right, ctxExpr)
+func typecheckrangeExpr(n ir.Node) {
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- t := n.Right.Type
+ t := n.Right().Type()
if t == nil {
return
}
// delicate little dance. see typecheckas2
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
- if n1.Name == nil || n1.Name.Defn != n {
+ if n1.Name() == nil || n1.Name().Defn != n {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
@@ -55,80 +57,80 @@ func typecheckrangeExpr(n *Node) {
if t.IsPtr() && t.Elem().IsArray() {
t = t.Elem()
}
- n.Type = t
+ n.SetType(t)
var t1, t2 *types.Type
toomany := false
switch t.Etype {
default:
- yyerrorl(n.Pos, "cannot range over %L", n.Right)
+ base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right())
return
- case TARRAY, TSLICE:
- t1 = types.Types[TINT]
+ case types.TARRAY, types.TSLICE:
+ t1 = types.Types[types.TINT]
t2 = t.Elem()
- case TMAP:
+ case types.TMAP:
t1 = t.Key()
t2 = t.Elem()
- case TCHAN:
+ case types.TCHAN:
if !t.ChanDir().CanRecv() {
- yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
+ base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type())
return
}
t1 = t.Elem()
t2 = nil
- if n.List.Len() == 2 {
+ if n.List().Len() == 2 {
toomany = true
}
- case TSTRING:
- t1 = types.Types[TINT]
+ case types.TSTRING:
+ t1 = types.Types[types.TINT]
t2 = types.Runetype
}
- if n.List.Len() > 2 || toomany {
- yyerrorl(n.Pos, "too many variables in range")
+ if n.List().Len() > 2 || toomany {
+ base.ErrorfAt(n.Pos(), "too many variables in range")
}
- var v1, v2 *Node
- if n.List.Len() != 0 {
- v1 = n.List.First()
+ var v1, v2 ir.Node
+ if n.List().Len() != 0 {
+ v1 = n.List().First()
}
- if n.List.Len() > 1 {
- v2 = n.List.Second()
+ if n.List().Len() > 1 {
+ v2 = n.List().Second()
}
// this is not only an optimization but also a requirement in the spec.
// "if the second iteration variable is the blank identifier, the range
// clause is equivalent to the same clause with only the first variable
// present."
- if v2.isBlank() {
+ if ir.IsBlank(v2) {
if v1 != nil {
- n.List.Set1(v1)
+ n.PtrList().Set1(v1)
}
v2 = nil
}
if v1 != nil {
- if v1.Name != nil && v1.Name.Defn == n {
- v1.Type = t1
- } else if v1.Type != nil {
- if op, why := assignop(t1, v1.Type); op == OXXX {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ if v1.Name() != nil && v1.Name().Defn == n {
+ v1.SetType(t1)
+ } else if v1.Type() != nil {
+ if op, why := assignop(t1, v1.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why)
}
}
checkassign(n, v1)
}
if v2 != nil {
- if v2.Name != nil && v2.Name.Defn == n {
- v2.Type = t2
- } else if v2.Type != nil {
- if op, why := assignop(t2, v2.Type); op == OXXX {
- yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ if v2.Name() != nil && v2.Name().Defn == n {
+ v2.SetType(t2)
+ } else if v2.Type() != nil {
+ if op, why := assignop(t2, v2.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why)
}
}
checkassign(n, v2)
@@ -155,12 +157,12 @@ func cheapComputableIndex(width int64) bool {
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
-func walkrange(n *Node) *Node {
+func walkrange(n ir.Node) ir.Node {
if isMapClear(n) {
- m := n.Right
+ m := n.Right()
lno := setlineno(m)
n = mapClear(m)
- lineno = lno
+ base.Pos = lno
return n
}
@@ -171,65 +173,65 @@ func walkrange(n *Node) *Node {
// hb: hidden bool
// a, v1, v2: not hidden aggregate, val 1, 2
- t := n.Type
+ t := n.Type()
- a := n.Right
+ a := n.Right()
lno := setlineno(a)
- n.Right = nil
+ n.SetRight(nil)
- var v1, v2 *Node
- l := n.List.Len()
+ var v1, v2 ir.Node
+ l := n.List().Len()
if l > 0 {
- v1 = n.List.First()
+ v1 = n.List().First()
}
if l > 1 {
- v2 = n.List.Second()
+ v2 = n.List().Second()
}
- if v2.isBlank() {
+ if ir.IsBlank(v2) {
v2 = nil
}
- if v1.isBlank() && v2 == nil {
+ if ir.IsBlank(v1) && v2 == nil {
v1 = nil
}
if v1 == nil && v2 != nil {
- Fatalf("walkrange: v2 != nil while v1 == nil")
+ base.Fatalf("walkrange: v2 != nil while v1 == nil")
}
// n.List has no meaning anymore, clear it
// to avoid erroneous processing by racewalk.
- n.List.Set(nil)
+ n.PtrList().Set(nil)
- var ifGuard *Node
+ var ifGuard ir.Node
- translatedLoopOp := OFOR
+ translatedLoopOp := ir.OFOR
- var body []*Node
- var init []*Node
+ var body []ir.Node
+ var init []ir.Node
switch t.Etype {
default:
- Fatalf("walkrange")
+ base.Fatalf("walkrange")
- case TARRAY, TSLICE:
+ case types.TARRAY, types.TSLICE:
if arrayClear(n, v1, v2, a) {
- lineno = lno
+ base.Pos = lno
return n
}
// order.stmt arranged for a copy of the array/slice variable if needed.
ha := a
- hv1 := temp(types.Types[TINT])
- hn := temp(types.Types[TINT])
+ hv1 := temp(types.Types[types.TINT])
+ hn := temp(types.Types[types.TINT])
- init = append(init, nod(OAS, hv1, nil))
- init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
+ init = append(init, ir.Nod(ir.OAS, hv1, nil))
+ init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil)))
- n.Left = nod(OLT, hv1, hn)
- n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
+ n.SetLeft(ir.Nod(ir.OLT, hv1, hn))
+ n.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// for range ha { body }
if v1 == nil {
@@ -238,21 +240,21 @@ func walkrange(n *Node) *Node {
// for v1 := range ha { body }
if v2 == nil {
- body = []*Node{nod(OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
break
}
// for v1, v2 := range ha { body }
- if cheapComputableIndex(n.Type.Elem().Width) {
+ if cheapComputableIndex(n.Type().Elem().Width) {
// v1, v2 = hv1, ha[hv1]
- tmp := nod(OINDEX, ha, hv1)
+ tmp := ir.Nod(ir.OINDEX, ha, hv1)
tmp.SetBounded(true)
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1, tmp)
- body = []*Node{a}
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1, tmp)
+ body = []ir.Node{a}
break
}
@@ -268,20 +270,20 @@ func walkrange(n *Node) *Node {
// TODO(austin): OFORUNTIL inhibits bounds-check
// elimination on the index variable (see #20711).
// Enhance the prove pass to understand this.
- ifGuard = nod(OIF, nil, nil)
- ifGuard.Left = nod(OLT, hv1, hn)
- translatedLoopOp = OFORUNTIL
+ ifGuard = ir.Nod(ir.OIF, nil, nil)
+ ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn))
+ translatedLoopOp = ir.OFORUNTIL
- hp := temp(types.NewPtr(n.Type.Elem()))
- tmp := nod(OINDEX, ha, nodintconst(0))
+ hp := temp(types.NewPtr(n.Type().Elem()))
+ tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
- init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
+ init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1, nod(ODEREF, hp, nil))
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil))
body = append(body, a)
// Advance pointer as part of the late increment.
@@ -289,76 +291,76 @@ func walkrange(n *Node) *Node {
// This runs *after* the condition check, so we know
// advancing the pointer is safe and won't go past the
// end of the allocation.
- a = nod(OAS, hp, addptr(hp, t.Elem().Width))
+ a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
a = typecheck(a, ctxStmt)
- n.List.Set1(a)
+ n.PtrList().Set1(a)
- case TMAP:
+ case types.TMAP:
// order.stmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
hit := prealloc[n]
- th := hit.Type
- n.Left = nil
+ th := hit.Type()
+ n.SetLeft(nil)
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
fn := syslook("mapiterinit")
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
- init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
- n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
+ init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil)))
+ n.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()))
fn = syslook("mapiternext")
fn = substArgTypes(fn, th)
- n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
+ n.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil)))
- key := nodSym(ODOT, hit, keysym)
- key = nod(ODEREF, key, nil)
+ key := nodSym(ir.ODOT, hit, keysym)
+ key = ir.Nod(ir.ODEREF, key, nil)
if v1 == nil {
body = nil
} else if v2 == nil {
- body = []*Node{nod(OAS, v1, key)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
} else {
- elem := nodSym(ODOT, hit, elemsym)
- elem = nod(ODEREF, elem, nil)
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(key, elem)
- body = []*Node{a}
+ elem := nodSym(ir.ODOT, hit, elemsym)
+ elem = ir.Nod(ir.ODEREF, elem, nil)
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(key, elem)
+ body = []ir.Node{a}
}
- case TCHAN:
+ case types.TCHAN:
// order.stmt arranged for a copy of the channel variable.
ha := a
- n.Left = nil
+ n.SetLeft(nil)
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
if t.Elem().HasPointers() {
- init = append(init, nod(OAS, hv1, nil))
+ init = append(init, ir.Nod(ir.OAS, hv1, nil))
}
- hb := temp(types.Types[TBOOL])
+ hb := temp(types.Types[types.TBOOL])
- n.Left = nod(ONE, hb, nodbool(false))
- a := nod(OAS2RECV, nil, nil)
+ n.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false)))
+ a := ir.Nod(ir.OAS2RECV, nil, nil)
a.SetTypecheck(1)
- a.List.Set2(hv1, hb)
- a.Right = nod(ORECV, ha, nil)
- n.Left.Ninit.Set1(a)
+ a.PtrList().Set2(hv1, hb)
+ a.SetRight(ir.Nod(ir.ORECV, ha, nil))
+ n.Left().PtrInit().Set1(a)
if v1 == nil {
body = nil
} else {
- body = []*Node{nod(OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
// See issue 15281.
- body = append(body, nod(OAS, hv1, nil))
+ body = append(body, ir.Nod(ir.OAS, hv1, nil))
- case TSTRING:
+ case types.TSTRING:
// Transform string range statements like "for v1, v2 = range a" into
//
// ha := a
@@ -377,84 +379,84 @@ func walkrange(n *Node) *Node {
// order.stmt arranged for a copy of the string variable.
ha := a
- hv1 := temp(types.Types[TINT])
- hv1t := temp(types.Types[TINT])
+ hv1 := temp(types.Types[types.TINT])
+ hv1t := temp(types.Types[types.TINT])
hv2 := temp(types.Runetype)
// hv1 := 0
- init = append(init, nod(OAS, hv1, nil))
+ init = append(init, ir.Nod(ir.OAS, hv1, nil))
// hv1 < len(ha)
- n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
+ n.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)))
if v1 != nil {
// hv1t = hv1
- body = append(body, nod(OAS, hv1t, hv1))
+ body = append(body, ir.Nod(ir.OAS, hv1t, hv1))
}
// hv2 := rune(ha[hv1])
- nind := nod(OINDEX, ha, hv1)
+ nind := ir.Nod(ir.OINDEX, ha, hv1)
nind.SetBounded(true)
- body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
+ body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.Runetype)))
// if hv2 < utf8.RuneSelf
- nif := nod(OIF, nil, nil)
- nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf))
+ nif := ir.Nod(ir.OIF, nil, nil)
+ nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)))
// hv1++
- nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// } else {
- eif := nod(OAS2, nil, nil)
- nif.Rlist.Set1(eif)
+ eif := ir.Nod(ir.OAS2, nil, nil)
+ nif.PtrRlist().Set1(eif)
// hv2, hv1 = decoderune(ha, hv1)
- eif.List.Set2(hv2, hv1)
+ eif.PtrList().Set2(hv2, hv1)
fn := syslook("decoderune")
- eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
+ eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1))
body = append(body, nif)
if v1 != nil {
if v2 != nil {
// v1, v2 = hv1t, hv2
- a := nod(OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1t, hv2)
+ a := ir.Nod(ir.OAS2, nil, nil)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1t, hv2)
body = append(body, a)
} else {
// v1 = hv1t
- body = append(body, nod(OAS, v1, hv1t))
+ body = append(body, ir.Nod(ir.OAS, v1, hv1t))
}
}
}
- n.Op = translatedLoopOp
+ n.SetOp(translatedLoopOp)
typecheckslice(init, ctxStmt)
if ifGuard != nil {
- ifGuard.Ninit.Append(init...)
+ ifGuard.PtrInit().Append(init...)
ifGuard = typecheck(ifGuard, ctxStmt)
} else {
- n.Ninit.Append(init...)
+ n.PtrInit().Append(init...)
}
- typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
+ typecheckslice(n.Left().Init().Slice(), ctxStmt)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Right = typecheck(n.Right, ctxStmt)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetRight(typecheck(n.Right(), ctxStmt))
typecheckslice(body, ctxStmt)
- n.Nbody.Prepend(body...)
+ n.PtrBody().Prepend(body...)
if ifGuard != nil {
- ifGuard.Nbody.Set1(n)
+ ifGuard.PtrBody().Set1(n)
n = ifGuard
}
n = walkstmt(n)
- lineno = lno
+ base.Pos = lno
return n
}
@@ -465,41 +467,41 @@ func walkrange(n *Node) *Node {
// }
//
// where == for keys of map m is reflexive.
-func isMapClear(n *Node) bool {
- if Debug.N != 0 || instrumenting {
+func isMapClear(n ir.Node) bool {
+ if base.Flag.N != 0 || instrumenting {
return false
}
- if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
+ if n.Op() != ir.ORANGE || n.Type().Etype != types.TMAP || n.List().Len() != 1 {
return false
}
- k := n.List.First()
- if k == nil || k.isBlank() {
+ k := n.List().First()
+ if k == nil || ir.IsBlank(k) {
return false
}
// Require k to be a new variable name.
- if k.Name == nil || k.Name.Defn != n {
+ if k.Name() == nil || k.Name().Defn != n {
return false
}
- if n.Nbody.Len() != 1 {
+ if n.Body().Len() != 1 {
return false
}
- stmt := n.Nbody.First() // only stmt in body
- if stmt == nil || stmt.Op != ODELETE {
+ stmt := n.Body().First() // only stmt in body
+ if stmt == nil || stmt.Op() != ir.ODELETE {
return false
}
- m := n.Right
- if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
+ m := n.Right()
+ if !samesafeexpr(stmt.List().First(), m) || !samesafeexpr(stmt.List().Second(), k) {
return false
}
// Keys where equality is not reflexive can not be deleted from maps.
- if !isreflexive(m.Type.Key()) {
+ if !isreflexive(m.Type().Key()) {
return false
}
@@ -507,8 +509,8 @@ func isMapClear(n *Node) bool {
}
// mapClear constructs a call to runtime.mapclear for the map m.
-func mapClear(m *Node) *Node {
- t := m.Type
+func mapClear(m ir.Node) ir.Node {
+ t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
fn := syslook("mapclear")
@@ -532,8 +534,8 @@ func mapClear(m *Node) *Node {
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
-func arrayClear(n, v1, v2, a *Node) bool {
- if Debug.N != 0 || instrumenting {
+func arrayClear(n, v1, v2, a ir.Node) bool {
+ if base.Flag.N != 0 || instrumenting {
return false
}
@@ -541,21 +543,21 @@ func arrayClear(n, v1, v2, a *Node) bool {
return false
}
- if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
+ if n.Body().Len() != 1 || n.Body().First() == nil {
return false
}
- stmt := n.Nbody.First() // only stmt in body
- if stmt.Op != OAS || stmt.Left.Op != OINDEX {
+ stmt := n.Body().First() // only stmt in body
+ if stmt.Op() != ir.OAS || stmt.Left().Op() != ir.OINDEX {
return false
}
- if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
+ if !samesafeexpr(stmt.Left().Left(), a) || !samesafeexpr(stmt.Left().Right(), v1) {
return false
}
- elemsize := n.Type.Elem().Width
- if elemsize <= 0 || !isZero(stmt.Right) {
+ elemsize := n.Type().Elem().Width
+ if elemsize <= 0 || !isZero(stmt.Right()) {
return false
}
@@ -566,63 +568,63 @@ func arrayClear(n, v1, v2, a *Node) bool {
// memclr{NoHeap,Has}Pointers(hp, hn)
// i = len(a) - 1
// }
- n.Op = OIF
+ n.SetOp(ir.OIF)
- n.Nbody.Set(nil)
- n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
+ n.PtrBody().Set(nil)
+ n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)))
// hp = &a[0]
- hp := temp(types.Types[TUNSAFEPTR])
+ hp := temp(types.Types[types.TUNSAFEPTR])
- tmp := nod(OINDEX, a, nodintconst(0))
+ tmp := ir.Nod(ir.OINDEX, a, nodintconst(0))
tmp.SetBounded(true)
- tmp = nod(OADDR, tmp, nil)
- tmp = convnop(tmp, types.Types[TUNSAFEPTR])
- n.Nbody.Append(nod(OAS, hp, tmp))
+ tmp = ir.Nod(ir.OADDR, tmp, nil)
+ tmp = convnop(tmp, types.Types[types.TUNSAFEPTR])
+ n.PtrBody().Append(ir.Nod(ir.OAS, hp, tmp))
// hn = len(a) * sizeof(elem(a))
- hn := temp(types.Types[TUINTPTR])
+ hn := temp(types.Types[types.TUINTPTR])
- tmp = nod(OLEN, a, nil)
- tmp = nod(OMUL, tmp, nodintconst(elemsize))
- tmp = conv(tmp, types.Types[TUINTPTR])
- n.Nbody.Append(nod(OAS, hn, tmp))
+ tmp = ir.Nod(ir.OLEN, a, nil)
+ tmp = ir.Nod(ir.OMUL, tmp, nodintconst(elemsize))
+ tmp = conv(tmp, types.Types[types.TUINTPTR])
+ n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp))
- var fn *Node
- if a.Type.Elem().HasPointers() {
+ var fn ir.Node
+ if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
- Curfn.Func.setWBPos(stmt.Pos)
+ Curfn.Func().SetWBPos(stmt.Pos())
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
}
- n.Nbody.Append(fn)
+ n.PtrBody().Append(fn)
// i = len(a) - 1
- v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
+ v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1)))
- n.Nbody.Append(v1)
+ n.PtrBody().Append(v1)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ typecheckslice(n.Body().Slice(), ctxStmt)
n = walkstmt(n)
return true
}
// addptr returns (*T)(uintptr(p) + n).
-func addptr(p *Node, n int64) *Node {
- t := p.Type
+func addptr(p ir.Node, n int64) ir.Node {
+ t := p.Type()
- p = nod(OCONVNOP, p, nil)
- p.Type = types.Types[TUINTPTR]
+ p = ir.Nod(ir.OCONVNOP, p, nil)
+ p.SetType(types.Types[types.TUINTPTR])
- p = nod(OADD, p, nodintconst(n))
+ p = ir.Nod(ir.OADD, p, nodintconst(n))
- p = nod(OCONVNOP, p, nil)
- p.Type = t
+ p = ir.Nod(ir.OCONVNOP, p, nil)
+ p.SetType(t)
return p
}
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index 9401eba7a5..dc9efc07fe 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/gcprog"
"cmd/internal/obj"
@@ -73,10 +75,8 @@ func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
}
func makefield(name string, t *types.Type) *types.Field {
- f := types.NewField()
- f.Type = t
- f.Sym = (*types.Pkg)(nil).Lookup(name)
- return f
+ sym := (*types.Pkg)(nil).Lookup(name)
+ return types.NewField(src.NoXPos, sym, t)
}
// bmap makes the map bucket type given the type of the map.
@@ -85,7 +85,7 @@ func bmap(t *types.Type) *types.Type {
return t.MapType().Bucket
}
- bucket := types.New(TSTRUCT)
+ bucket := types.New(types.TSTRUCT)
keytype := t.Key()
elemtype := t.Elem()
dowidth(keytype)
@@ -100,7 +100,7 @@ func bmap(t *types.Type) *types.Type {
field := make([]*types.Field, 0, 5)
// The first field is: uint8 topbits[BUCKETSIZE].
- arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE)
+ arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE)
field = append(field, makefield("topbits", arr))
arr = types.NewArray(keytype, BUCKETSIZE)
@@ -121,7 +121,7 @@ func bmap(t *types.Type) *types.Type {
// See comment on hmap.overflow in runtime/map.go.
otyp := types.NewPtr(bucket)
if !elemtype.HasPointers() && !keytype.HasPointers() {
- otyp = types.Types[TUINTPTR]
+ otyp = types.Types[types.TUINTPTR]
}
overflow := makefield("overflow", otyp)
field = append(field, overflow)
@@ -133,52 +133,52 @@ func bmap(t *types.Type) *types.Type {
// Check invariants that map code depends on.
if !IsComparable(t.Key()) {
- Fatalf("unsupported map key type for %v", t)
+ base.Fatalf("unsupported map key type for %v", t)
}
if BUCKETSIZE < 8 {
- Fatalf("bucket size too small for proper alignment")
+ base.Fatalf("bucket size too small for proper alignment")
}
if keytype.Align > BUCKETSIZE {
- Fatalf("key align too big for %v", t)
+ base.Fatalf("key align too big for %v", t)
}
if elemtype.Align > BUCKETSIZE {
- Fatalf("elem align too big for %v", t)
+ base.Fatalf("elem align too big for %v", t)
}
if keytype.Width > MAXKEYSIZE {
- Fatalf("key size to large for %v", t)
+ base.Fatalf("key size to large for %v", t)
}
if elemtype.Width > MAXELEMSIZE {
- Fatalf("elem size to large for %v", t)
+ base.Fatalf("elem size to large for %v", t)
}
if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
- Fatalf("key indirect incorrect for %v", t)
+ base.Fatalf("key indirect incorrect for %v", t)
}
if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
- Fatalf("elem indirect incorrect for %v", t)
+ base.Fatalf("elem indirect incorrect for %v", t)
}
if keytype.Width%int64(keytype.Align) != 0 {
- Fatalf("key size not a multiple of key align for %v", t)
+ base.Fatalf("key size not a multiple of key align for %v", t)
}
if elemtype.Width%int64(elemtype.Align) != 0 {
- Fatalf("elem size not a multiple of elem align for %v", t)
+ base.Fatalf("elem size not a multiple of elem align for %v", t)
}
if bucket.Align%keytype.Align != 0 {
- Fatalf("bucket align not multiple of key align %v", t)
+ base.Fatalf("bucket align not multiple of key align %v", t)
}
if bucket.Align%elemtype.Align != 0 {
- Fatalf("bucket align not multiple of elem align %v", t)
+ base.Fatalf("bucket align not multiple of elem align %v", t)
}
if keys.Offset%int64(keytype.Align) != 0 {
- Fatalf("bad alignment of keys in bmap for %v", t)
+ base.Fatalf("bad alignment of keys in bmap for %v", t)
}
if elems.Offset%int64(elemtype.Align) != 0 {
- Fatalf("bad alignment of elems in bmap for %v", t)
+ base.Fatalf("bad alignment of elems in bmap for %v", t)
}
// Double-check that overflow field is final memory in struct,
// with no padding at end.
if overflow.Offset != bucket.Width-int64(Widthptr) {
- Fatalf("bad offset of overflow in bmap for %v", t)
+ base.Fatalf("bad offset of overflow in bmap for %v", t)
}
t.MapType().Bucket = bucket
@@ -210,18 +210,18 @@ func hmap(t *types.Type) *types.Type {
// }
// must match runtime/map.go:hmap.
fields := []*types.Field{
- makefield("count", types.Types[TINT]),
- makefield("flags", types.Types[TUINT8]),
- makefield("B", types.Types[TUINT8]),
- makefield("noverflow", types.Types[TUINT16]),
- makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP.
- makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
+ makefield("count", types.Types[types.TINT]),
+ makefield("flags", types.Types[types.TUINT8]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("noverflow", types.Types[types.TUINT16]),
+ makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
+ makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
makefield("oldbuckets", types.NewPtr(bmap)),
- makefield("nevacuate", types.Types[TUINTPTR]),
- makefield("extra", types.Types[TUNSAFEPTR]),
+ makefield("nevacuate", types.Types[types.TUINTPTR]),
+ makefield("extra", types.Types[types.TUNSAFEPTR]),
}
- hmap := types.New(TSTRUCT)
+ hmap := types.New(types.TSTRUCT)
hmap.SetNoalg(true)
hmap.SetFields(fields)
dowidth(hmap)
@@ -229,7 +229,7 @@ func hmap(t *types.Type) *types.Type {
// The size of hmap should be 48 bytes on 64 bit
// and 28 bytes on 32 bit platforms.
if size := int64(8 + 5*Widthptr); hmap.Width != size {
- Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
+ base.Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
}
t.MapType().Hmap = hmap
@@ -269,28 +269,28 @@ func hiter(t *types.Type) *types.Type {
fields := []*types.Field{
makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
- makefield("t", types.Types[TUNSAFEPTR]),
+ makefield("t", types.Types[types.TUNSAFEPTR]),
makefield("h", types.NewPtr(hmap)),
makefield("buckets", types.NewPtr(bmap)),
makefield("bptr", types.NewPtr(bmap)),
- makefield("overflow", types.Types[TUNSAFEPTR]),
- makefield("oldoverflow", types.Types[TUNSAFEPTR]),
- makefield("startBucket", types.Types[TUINTPTR]),
- makefield("offset", types.Types[TUINT8]),
- makefield("wrapped", types.Types[TBOOL]),
- makefield("B", types.Types[TUINT8]),
- makefield("i", types.Types[TUINT8]),
- makefield("bucket", types.Types[TUINTPTR]),
- makefield("checkBucket", types.Types[TUINTPTR]),
+ makefield("overflow", types.Types[types.TUNSAFEPTR]),
+ makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
+ makefield("startBucket", types.Types[types.TUINTPTR]),
+ makefield("offset", types.Types[types.TUINT8]),
+ makefield("wrapped", types.Types[types.TBOOL]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("i", types.Types[types.TUINT8]),
+ makefield("bucket", types.Types[types.TUINTPTR]),
+ makefield("checkBucket", types.Types[types.TUINTPTR]),
}
// build iterator struct holding the above fields
- hiter := types.New(TSTRUCT)
+ hiter := types.New(types.TSTRUCT)
hiter.SetNoalg(true)
hiter.SetFields(fields)
dowidth(hiter)
if hiter.Width != int64(12*Widthptr) {
- Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
+ base.Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
}
t.MapType().Hiter = hiter
hiter.StructType().Map = t
@@ -301,40 +301,38 @@ func hiter(t *types.Type) *types.Type {
// stksize bytes of args.
func deferstruct(stksize int64) *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
- f := types.NewField()
- f.Type = typ
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
// TODO: unify this makefield and the global one above.
- f.Sym = &types.Sym{Name: name, Pkg: localpkg}
- return f
+ sym := &types.Sym{Name: name, Pkg: ir.LocalPkg}
+ return types.NewField(src.NoXPos, sym, typ)
}
- argtype := types.NewArray(types.Types[TUINT8], stksize)
+ argtype := types.NewArray(types.Types[types.TUINT8], stksize)
argtype.Width = stksize
argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
// cmd/compile/internal/gc/ssa.go:(*state).call.
fields := []*types.Field{
- makefield("siz", types.Types[TUINT32]),
- makefield("started", types.Types[TBOOL]),
- makefield("heap", types.Types[TBOOL]),
- makefield("openDefer", types.Types[TBOOL]),
- makefield("sp", types.Types[TUINTPTR]),
- makefield("pc", types.Types[TUINTPTR]),
+ makefield("siz", types.Types[types.TUINT32]),
+ makefield("started", types.Types[types.TBOOL]),
+ makefield("heap", types.Types[types.TBOOL]),
+ makefield("openDefer", types.Types[types.TBOOL]),
+ makefield("sp", types.Types[types.TUINTPTR]),
+ makefield("pc", types.Types[types.TUINTPTR]),
// Note: the types here don't really matter. Defer structures
// are always scanned explicitly during stack copying and GC,
// so we make them uintptr type even though they are real pointers.
- makefield("fn", types.Types[TUINTPTR]),
- makefield("_panic", types.Types[TUINTPTR]),
- makefield("link", types.Types[TUINTPTR]),
- makefield("framepc", types.Types[TUINTPTR]),
- makefield("varp", types.Types[TUINTPTR]),
- makefield("fd", types.Types[TUINTPTR]),
+ makefield("fn", types.Types[types.TUINTPTR]),
+ makefield("_panic", types.Types[types.TUINTPTR]),
+ makefield("link", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("fd", types.Types[types.TUINTPTR]),
makefield("args", argtype),
}
// build struct holding the above fields
- s := types.New(TSTRUCT)
+ s := types.New(types.TSTRUCT)
s.SetNoalg(true)
s.SetFields(fields)
s.Width = widstruct(s, s, 0, 1)
@@ -349,7 +347,7 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
if receiver != nil {
inLen++
}
- in := make([]*Node, 0, inLen)
+ in := make([]ir.Node, 0, inLen)
if receiver != nil {
d := anonfield(receiver)
@@ -363,19 +361,13 @@ func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
}
outLen := f.Results().Fields().Len()
- out := make([]*Node, 0, outLen)
+ out := make([]ir.Node, 0, outLen)
for _, t := range f.Results().Fields().Slice() {
d := anonfield(t.Type)
out = append(out, d)
}
- t := functype(nil, in, out)
- if f.Nname() != nil {
- // Link to name of original method function.
- t.SetNname(f.Nname())
- }
-
- return t
+ return functype(nil, in, out)
}
// methods returns the methods of the non-interface type t, sorted by name.
@@ -401,10 +393,10 @@ func methods(t *types.Type) []*Sig {
var ms []*Sig
for _, f := range mt.AllMethods().Slice() {
if !f.IsMethod() {
- Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
+ base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
}
if f.Type.Recv() == nil {
- Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
+ base.Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
}
if f.Nointerface() {
continue
@@ -456,16 +448,16 @@ func methods(t *types.Type) []*Sig {
func imethods(t *types.Type) []*Sig {
var methods []*Sig
for _, f := range t.Fields().Slice() {
- if f.Type.Etype != TFUNC || f.Sym == nil {
+ if f.Type.Etype != types.TFUNC || f.Sym == nil {
continue
}
if f.Sym.IsBlank() {
- Fatalf("unexpected blank symbol in interface method set")
+ base.Fatalf("unexpected blank symbol in interface method set")
}
if n := len(methods); n > 0 {
last := methods[n-1]
if !last.name.Less(f.Sym) {
- Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
+ base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
}
}
@@ -498,17 +490,17 @@ func dimportpath(p *types.Pkg) {
// If we are compiling the runtime package, there are two runtime packages around
// -- localpkg and Runtimepkg. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg.
- if myimportpath == "runtime" && p == Runtimepkg {
+ if base.Ctxt.Pkgpath == "runtime" && p == Runtimepkg {
return
}
str := p.Path
- if p == localpkg {
+ if p == ir.LocalPkg {
// Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
- str = myimportpath
+ str = base.Ctxt.Pkgpath
}
- s := Ctxt.Lookup("type..importpath." + p.Prefix + ".")
+ s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
ot := dnameData(s, 0, str, "", nil, false)
ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
@@ -520,13 +512,13 @@ func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
return duintptr(s, ot, 0)
}
- if pkg == localpkg && myimportpath == "" {
+ if pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "" {
// If we don't know the full import path of the package being compiled
// (i.e. -p was not passed on the compiler command line), emit a reference to
// type..importpath.""., which the linker will rewrite using the correct import path.
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
- ns := Ctxt.Lookup(`type..importpath."".`)
+ ns := base.Ctxt.Lookup(`type..importpath."".`)
return dsymptr(s, ot, ns, 0)
}
@@ -539,13 +531,13 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil {
return duint32(s, ot, 0)
}
- if pkg == localpkg && myimportpath == "" {
+ if pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "" {
// If we don't know the full import path of the package being compiled
// (i.e. -p was not passed on the compiler command line), emit a reference to
// type..importpath.""., which the linker will rewrite using the correct import path.
// Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
- ns := Ctxt.Lookup(`type..importpath."".`)
+ ns := base.Ctxt.Lookup(`type..importpath."".`)
return dsymptrOff(s, ot, ns)
}
@@ -556,7 +548,7 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
// dnameField dumps a reflect.name for a struct field.
func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
- Fatalf("package mismatch for %v", ft.Sym)
+ base.Fatalf("package mismatch for %v", ft.Sym)
}
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
return dsymptr(lsym, ot, nsym, 0)
@@ -565,10 +557,10 @@ func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
// dnameData writes the contents of a reflect.name into s at offset ot.
func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
if len(name) > 1<<16-1 {
- Fatalf("name too long: %s", name)
+ base.Fatalf("name too long: %s", name)
}
if len(tag) > 1<<16-1 {
- Fatalf("tag too long: %s", tag)
+ base.Fatalf("tag too long: %s", tag)
}
// Encode name and tag. See reflect/type.go for details.
@@ -596,7 +588,7 @@ func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported b
copy(tb[2:], tag)
}
- ot = int(s.WriteBytes(Ctxt, int64(ot), b))
+ ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
if pkg != nil {
ot = dgopkgpathOff(s, ot, pkg)
@@ -633,7 +625,7 @@ func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
dnameCount++
}
- s := Ctxt.Lookup(sname)
+ s := base.Ctxt.Lookup(sname)
if len(s.P) > 0 {
return s
}
@@ -653,7 +645,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
}
noff := int(Rnd(int64(ot), int64(Widthptr)))
if noff != ot {
- Fatalf("unexpected alignment in dextratype for %v", t)
+ base.Fatalf("unexpected alignment in dextratype for %v", t)
}
for _, a := range m {
@@ -665,11 +657,11 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
dataAdd += uncommonSize(t)
mcount := len(m)
if mcount != int(uint16(mcount)) {
- Fatalf("too many methods on %v: %d", t, mcount)
+ base.Fatalf("too many methods on %v: %d", t, mcount)
}
xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
if dataAdd != int(uint32(dataAdd)) {
- Fatalf("methods are too far away on %v: %d", t, dataAdd)
+ base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
}
ot = duint16(lsym, ot, uint16(mcount))
@@ -683,7 +675,7 @@ func typePkg(t *types.Type) *types.Pkg {
tsym := t.Sym
if tsym == nil {
switch t.Etype {
- case TARRAY, TSLICE, TPTR, TCHAN:
+ case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN:
if t.Elem() != nil {
tsym = t.Elem().Sym
}
@@ -726,32 +718,32 @@ func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
}
var kinds = []int{
- TINT: objabi.KindInt,
- TUINT: objabi.KindUint,
- TINT8: objabi.KindInt8,
- TUINT8: objabi.KindUint8,
- TINT16: objabi.KindInt16,
- TUINT16: objabi.KindUint16,
- TINT32: objabi.KindInt32,
- TUINT32: objabi.KindUint32,
- TINT64: objabi.KindInt64,
- TUINT64: objabi.KindUint64,
- TUINTPTR: objabi.KindUintptr,
- TFLOAT32: objabi.KindFloat32,
- TFLOAT64: objabi.KindFloat64,
- TBOOL: objabi.KindBool,
- TSTRING: objabi.KindString,
- TPTR: objabi.KindPtr,
- TSTRUCT: objabi.KindStruct,
- TINTER: objabi.KindInterface,
- TCHAN: objabi.KindChan,
- TMAP: objabi.KindMap,
- TARRAY: objabi.KindArray,
- TSLICE: objabi.KindSlice,
- TFUNC: objabi.KindFunc,
- TCOMPLEX64: objabi.KindComplex64,
- TCOMPLEX128: objabi.KindComplex128,
- TUNSAFEPTR: objabi.KindUnsafePointer,
+ types.TINT: objabi.KindInt,
+ types.TUINT: objabi.KindUint,
+ types.TINT8: objabi.KindInt8,
+ types.TUINT8: objabi.KindUint8,
+ types.TINT16: objabi.KindInt16,
+ types.TUINT16: objabi.KindUint16,
+ types.TINT32: objabi.KindInt32,
+ types.TUINT32: objabi.KindUint32,
+ types.TINT64: objabi.KindInt64,
+ types.TUINT64: objabi.KindUint64,
+ types.TUINTPTR: objabi.KindUintptr,
+ types.TFLOAT32: objabi.KindFloat32,
+ types.TFLOAT64: objabi.KindFloat64,
+ types.TBOOL: objabi.KindBool,
+ types.TSTRING: objabi.KindString,
+ types.TPTR: objabi.KindPtr,
+ types.TSTRUCT: objabi.KindStruct,
+ types.TINTER: objabi.KindInterface,
+ types.TCHAN: objabi.KindChan,
+ types.TMAP: objabi.KindMap,
+ types.TARRAY: objabi.KindArray,
+ types.TSLICE: objabi.KindSlice,
+ types.TFUNC: objabi.KindFunc,
+ types.TCOMPLEX64: objabi.KindComplex64,
+ types.TCOMPLEX128: objabi.KindComplex128,
+ types.TUNSAFEPTR: objabi.KindUnsafePointer,
}
// typeptrdata returns the length in bytes of the prefix of t
@@ -762,32 +754,32 @@ func typeptrdata(t *types.Type) int64 {
}
switch t.Etype {
- case TPTR,
- TUNSAFEPTR,
- TFUNC,
- TCHAN,
- TMAP:
+ case types.TPTR,
+ types.TUNSAFEPTR,
+ types.TFUNC,
+ types.TCHAN,
+ types.TMAP:
return int64(Widthptr)
- case TSTRING:
+ case types.TSTRING:
// struct { byte *str; intgo len; }
return int64(Widthptr)
- case TINTER:
+ case types.TINTER:
// struct { Itab *tab; void *data; } or
// struct { Type *type; void *data; }
// Note: see comment in plive.go:onebitwalktype1.
return 2 * int64(Widthptr)
- case TSLICE:
+ case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
return int64(Widthptr)
- case TARRAY:
+ case types.TARRAY:
// haspointers already eliminated t.NumElem() == 0.
return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
// Find the last field that has pointers.
var lastPtrField *types.Field
for _, t1 := range t.Fields().Slice() {
@@ -798,7 +790,7 @@ func typeptrdata(t *types.Type) int64 {
return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
default:
- Fatalf("typeptrdata: unexpected type, %v", t)
+ base.Fatalf("typeptrdata: unexpected type, %v", t)
return 0
}
}
@@ -898,7 +890,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
i = 1
}
if i&(i-1) != 0 {
- Fatalf("invalid alignment %d for %v", t.Align, t)
+ base.Fatalf("invalid alignment %d for %v", t.Align, t)
}
ot = duint8(lsym, ot, t.Align) // align
ot = duint8(lsym, ot, t.Align) // fieldAlign
@@ -989,7 +981,7 @@ func typesymprefix(prefix string, t *types.Type) *types.Sym {
func typenamesym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
- Fatalf("typenamesym %v", t)
+ base.Fatalf("typenamesym %v", t)
}
s := typesym(t)
signatmu.Lock()
@@ -998,38 +990,38 @@ func typenamesym(t *types.Type) *types.Sym {
return s
}
-func typename(t *types.Type) *Node {
+func typename(t *types.Type) ir.Node {
s := typenamesym(t)
if s.Def == nil {
- n := newnamel(src.NoXPos, s)
- n.Type = types.Types[TUINT8]
- n.SetClass(PEXTERN)
+ n := ir.NewNameAt(src.NoXPos, s)
+ n.SetType(types.Types[types.TUINT8])
+ n.SetClass(ir.PEXTERN)
n.SetTypecheck(1)
- s.Def = asTypesNode(n)
+ s.Def = n
}
- n := nod(OADDR, asNode(s.Def), nil)
- n.Type = types.NewPtr(asNode(s.Def).Type)
+ n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
+ n.SetType(types.NewPtr(ir.AsNode(s.Def).Type()))
n.SetTypecheck(1)
return n
}
-func itabname(t, itype *types.Type) *Node {
+func itabname(t, itype *types.Type) ir.Node {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
- Fatalf("itabname(%v, %v)", t, itype)
+ base.Fatalf("itabname(%v, %v)", t, itype)
}
s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
if s.Def == nil {
- n := newname(s)
- n.Type = types.Types[TUINT8]
- n.SetClass(PEXTERN)
+ n := NewName(s)
+ n.SetType(types.Types[types.TUINT8])
+ n.SetClass(ir.PEXTERN)
n.SetTypecheck(1)
- s.Def = asTypesNode(n)
+ s.Def = n
itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
}
- n := nod(OADDR, asNode(s.Def), nil)
- n.Type = types.NewPtr(asNode(s.Def).Type)
+ n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
+ n.SetType(types.NewPtr(ir.AsNode(s.Def).Type()))
n.SetTypecheck(1)
return n
}
@@ -1038,35 +1030,35 @@ func itabname(t, itype *types.Type) *Node {
// That is, if x==x for all x of type t.
func isreflexive(t *types.Type) bool {
switch t.Etype {
- case TBOOL,
- TINT,
- TUINT,
- TINT8,
- TUINT8,
- TINT16,
- TUINT16,
- TINT32,
- TUINT32,
- TINT64,
- TUINT64,
- TUINTPTR,
- TPTR,
- TUNSAFEPTR,
- TSTRING,
- TCHAN:
+ case types.TBOOL,
+ types.TINT,
+ types.TUINT,
+ types.TINT8,
+ types.TUINT8,
+ types.TINT16,
+ types.TUINT16,
+ types.TINT32,
+ types.TUINT32,
+ types.TINT64,
+ types.TUINT64,
+ types.TUINTPTR,
+ types.TPTR,
+ types.TUNSAFEPTR,
+ types.TSTRING,
+ types.TCHAN:
return true
- case TFLOAT32,
- TFLOAT64,
- TCOMPLEX64,
- TCOMPLEX128,
- TINTER:
+ case types.TFLOAT32,
+ types.TFLOAT64,
+ types.TCOMPLEX64,
+ types.TCOMPLEX128,
+ types.TINTER:
return false
- case TARRAY:
+ case types.TARRAY:
return isreflexive(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
if !isreflexive(t1.Type) {
return false
@@ -1075,7 +1067,7 @@ func isreflexive(t *types.Type) bool {
return true
default:
- Fatalf("bad type for map key: %v", t)
+ base.Fatalf("bad type for map key: %v", t)
return false
}
}
@@ -1084,19 +1076,19 @@ func isreflexive(t *types.Type) bool {
// need the key to be updated.
func needkeyupdate(t *types.Type) bool {
switch t.Etype {
- case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32,
- TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN:
+ case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32,
+ types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
return false
- case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0
- TINTER,
- TSTRING: // strings might have smaller backing stores
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0
+ types.TINTER,
+ types.TSTRING: // strings might have smaller backing stores
return true
- case TARRAY:
+ case types.TARRAY:
return needkeyupdate(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
if needkeyupdate(t1.Type) {
return true
@@ -1105,7 +1097,7 @@ func needkeyupdate(t *types.Type) bool {
return false
default:
- Fatalf("bad type for map key: %v", t)
+ base.Fatalf("bad type for map key: %v", t)
return true
}
}
@@ -1113,13 +1105,13 @@ func needkeyupdate(t *types.Type) bool {
// hashMightPanic reports whether the hash of a map key of type t might panic.
func hashMightPanic(t *types.Type) bool {
switch t.Etype {
- case TINTER:
+ case types.TINTER:
return true
- case TARRAY:
+ case types.TARRAY:
return hashMightPanic(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
if hashMightPanic(t1.Type) {
return true
@@ -1145,7 +1137,7 @@ func formalType(t *types.Type) *types.Type {
func dtypesym(t *types.Type) *obj.LSym {
t = formalType(t)
if t.IsUntyped() {
- Fatalf("dtypesym %v", t)
+ base.Fatalf("dtypesym %v", t)
}
s := typesym(t)
@@ -1168,9 +1160,9 @@ func dtypesym(t *types.Type) *obj.LSym {
dupok = obj.DUPOK
}
- if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
+ if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
// named types from other files are defined only by those files
- if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
+ if tbase.Sym != nil && tbase.Sym.Pkg != ir.LocalPkg {
if i, ok := typeSymIdx[tbase]; ok {
lsym.Pkg = tbase.Sym.Pkg.Prefix
if t != tbase {
@@ -1183,7 +1175,7 @@ func dtypesym(t *types.Type) *obj.LSym {
return lsym
}
// TODO(mdempsky): Investigate whether this can happen.
- if tbase.Etype == TFORW {
+ if tbase.Etype == types.TFORW {
return lsym
}
}
@@ -1194,7 +1186,7 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
- case TARRAY:
+ case types.TARRAY:
// ../../../../runtime/type.go:/arrayType
s1 := dtypesym(t.Elem())
t2 := types.NewSlice(t.Elem())
@@ -1205,14 +1197,14 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = duintptr(lsym, ot, uint64(t.NumElem()))
ot = dextratype(lsym, ot, t, 0)
- case TSLICE:
+ case types.TSLICE:
// ../../../../runtime/type.go:/sliceType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
- case TCHAN:
+ case types.TCHAN:
// ../../../../runtime/type.go:/chanType
s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t)
@@ -1220,7 +1212,7 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = duintptr(lsym, ot, uint64(t.ChanDir()))
ot = dextratype(lsym, ot, t, 0)
- case TFUNC:
+ case types.TFUNC:
for _, t1 := range t.Recvs().Fields().Slice() {
dtypesym(t1.Type)
}
@@ -1259,7 +1251,7 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
}
- case TINTER:
+ case types.TINTER:
m := imethods(t)
n := len(m)
for _, a := range m {
@@ -1295,7 +1287,7 @@ func dtypesym(t *types.Type) *obj.LSym {
}
// ../../../../runtime/type.go:/mapType
- case TMAP:
+ case types.TMAP:
s1 := dtypesym(t.Key())
s2 := dtypesym(t.Elem())
s3 := dtypesym(bmap(t))
@@ -1335,8 +1327,8 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = duint32(lsym, ot, flags)
ot = dextratype(lsym, ot, t, 0)
- case TPTR:
- if t.Elem().Etype == TANY {
+ case types.TPTR:
+ if t.Elem().Etype == types.TANY {
// ../../../../runtime/type.go:/UnsafePointerType
ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
@@ -1353,7 +1345,7 @@ func dtypesym(t *types.Type) *obj.LSym {
// ../../../../runtime/type.go:/structType
// for security, only the exported fields.
- case TSTRUCT:
+ case types.TSTRUCT:
fields := t.Fields().Slice()
for _, t1 := range fields {
dtypesym(t1.Type)
@@ -1387,7 +1379,7 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
- Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
+ base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
}
if f.Embedded != 0 {
offsetAnon |= 1
@@ -1404,7 +1396,7 @@ func dtypesym(t *types.Type) *obj.LSym {
//
// When buildmode=shared, all types are in typelinks so the
// runtime can deduplicate type pointers.
- keep := Ctxt.Flag_dynlink
+ keep := base.Ctxt.Flag_dynlink
if !keep && t.Sym == nil {
// For an unnamed type, we only need the link if the type can
// be created at run time by reflect.PtrTo and similar
@@ -1412,7 +1404,7 @@ func dtypesym(t *types.Type) *obj.LSym {
// functions must return the existing type structure rather
// than creating a new one.
switch t.Etype {
- case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
+ case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT:
keep = true
}
}
@@ -1481,7 +1473,7 @@ func genfun(t, it *types.Type) []*obj.LSym {
}
if len(sigs) != 0 {
- Fatalf("incomplete itab")
+ base.Fatalf("incomplete itab")
}
return out
@@ -1524,11 +1516,11 @@ func addsignat(t *types.Type) {
}
}
-func addsignats(dcls []*Node) {
+func addsignats(dcls []ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
- if n.Op == OTYPE {
- addsignat(n.Type)
+ if n.Op() == ir.OTYPE {
+ addsignat(n.Type())
}
}
}
@@ -1580,9 +1572,9 @@ func dumptabs() {
}
// process ptabs
- if localpkg.Name == "main" && len(ptabs) > 0 {
+ if ir.LocalPkg.Name == "main" && len(ptabs) > 0 {
ot := 0
- s := Ctxt.Lookup("go.plugin.tabs")
+ s := base.Ctxt.Lookup("go.plugin.tabs")
for _, p := range ptabs {
// Dump ptab symbol into go.pluginsym package.
//
@@ -1601,7 +1593,7 @@ func dumptabs() {
ggloblsym(s, int32(ot), int16(obj.RODATA))
ot = 0
- s = Ctxt.Lookup("go.plugin.exports")
+ s = base.Ctxt.Lookup("go.plugin.exports")
for _, p := range ptabs {
ot = dsymptr(s, ot, p.s.Linksym(), 0)
}
@@ -1623,26 +1615,26 @@ func dumpbasictypes() {
// so this is as good as any.
// another possible choice would be package main,
// but using runtime means fewer copies in object files.
- if myimportpath == "runtime" {
- for i := types.EType(1); i <= TBOOL; i++ {
+ if base.Ctxt.Pkgpath == "runtime" {
+ for i := types.EType(1); i <= types.TBOOL; i++ {
dtypesym(types.NewPtr(types.Types[i]))
}
- dtypesym(types.NewPtr(types.Types[TSTRING]))
- dtypesym(types.NewPtr(types.Types[TUNSAFEPTR]))
+ dtypesym(types.NewPtr(types.Types[types.TSTRING]))
+ dtypesym(types.NewPtr(types.Types[types.TUNSAFEPTR]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
dtypesym(types.NewPtr(types.Errortype))
- dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])}))
+ dtypesym(functype(nil, []ir.Node{anonfield(types.Errortype)}, []ir.Node{anonfield(types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(Runtimepkg)
- if flag_race {
+ if base.Flag.Race {
dimportpath(racepkg)
}
- if flag_msan {
+ if base.Flag.MSan {
dimportpath(msanpkg)
}
dimportpath(types.NewPkg("main", ""))
@@ -1776,8 +1768,8 @@ func fillptrmask(t *types.Type, ptrmask []byte) {
// For non-trivial arrays, the program describes the full t.Width size.
func dgcprog(t *types.Type) (*obj.LSym, int64) {
dowidth(t)
- if t.Width == BADWIDTH {
- Fatalf("dgcprog: %v badwidth", t)
+ if t.Width == types.BADWIDTH {
+ base.Fatalf("dgcprog: %v badwidth", t)
}
lsym := typesymprefix(".gcprog", t).Linksym()
var p GCProg
@@ -1786,7 +1778,7 @@ func dgcprog(t *types.Type) (*obj.LSym, int64) {
offset := p.w.BitIndex() * int64(Widthptr)
p.end()
if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
- Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
+ base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
}
return lsym, offset
}
@@ -1797,13 +1789,11 @@ type GCProg struct {
w gcprog.Writer
}
-var Debug_gcprog int // set by -d gcprog
-
func (p *GCProg) init(lsym *obj.LSym) {
p.lsym = lsym
p.symoff = 4 // first 4 bytes hold program length
p.w.Init(p.writeByte)
- if Debug_gcprog > 0 {
+ if base.Debug.GCProg > 0 {
fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
p.w.Debug(os.Stderr)
}
@@ -1817,7 +1807,7 @@ func (p *GCProg) end() {
p.w.End()
duint32(p.lsym, 0, uint32(p.symoff-4))
ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
- if Debug_gcprog > 0 {
+ if base.Debug.GCProg > 0 {
fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
}
}
@@ -1833,22 +1823,22 @@ func (p *GCProg) emit(t *types.Type, offset int64) {
}
switch t.Etype {
default:
- Fatalf("GCProg.emit: unexpected type %v", t)
+ base.Fatalf("GCProg.emit: unexpected type %v", t)
- case TSTRING:
+ case types.TSTRING:
p.w.Ptr(offset / int64(Widthptr))
- case TINTER:
+ case types.TINTER:
// Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
p.w.Ptr(offset/int64(Widthptr) + 1)
- case TSLICE:
+ case types.TSLICE:
p.w.Ptr(offset / int64(Widthptr))
- case TARRAY:
+ case types.TARRAY:
if t.NumElem() == 0 {
// should have been handled by haspointers check above
- Fatalf("GCProg.emit: empty array")
+ base.Fatalf("GCProg.emit: empty array")
}
// Flatten array-of-array-of-array to just a big array by multiplying counts.
@@ -1870,7 +1860,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) {
p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
p.w.Repeat(elem.Width/int64(Widthptr), count-1)
- case TSTRUCT:
+ case types.TSTRUCT:
for _, t1 := range t.Fields().Slice() {
p.emit(t1.Type, offset+t1.Offset)
}
@@ -1879,23 +1869,23 @@ func (p *GCProg) emit(t *types.Type, offset int64) {
// zeroaddr returns the address of a symbol with at least
// size bytes of zeros.
-func zeroaddr(size int64) *Node {
+func zeroaddr(size int64) ir.Node {
if size >= 1<<31 {
- Fatalf("map elem too big %d", size)
+ base.Fatalf("map elem too big %d", size)
}
if zerosize < size {
zerosize = size
}
s := mappkg.Lookup("zero")
if s.Def == nil {
- x := newname(s)
- x.Type = types.Types[TUINT8]
- x.SetClass(PEXTERN)
+ x := NewName(s)
+ x.SetType(types.Types[types.TUINT8])
+ x.SetClass(ir.PEXTERN)
x.SetTypecheck(1)
- s.Def = asTypesNode(x)
+ s.Def = x
}
- z := nod(OADDR, asNode(s.Def), nil)
- z.Type = types.NewPtr(types.Types[TUINT8])
+ z := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
+ z.SetType(types.NewPtr(types.Types[types.TUINT8]))
z.SetTypecheck(1)
return z
}
diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go
index 5c7935aa87..fe7956d5d5 100644
--- a/src/cmd/compile/internal/gc/scc.go
+++ b/src/cmd/compile/internal/gc/scc.go
@@ -4,6 +4,8 @@
package gc
+import "cmd/compile/internal/ir"
+
// Strongly connected components.
//
// Run analysis on minimal sets of mutually recursive functions
@@ -30,10 +32,10 @@ package gc
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
- analyze func([]*Node, bool)
+ analyze func([]ir.Node, bool)
visitgen uint32
- nodeID map[*Node]uint32
- stack []*Node
+ nodeID map[ir.Node]uint32
+ stack []ir.Node
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
@@ -49,18 +51,18 @@ type bottomUpVisitor struct {
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
+func visitBottomUp(list []ir.Node, analyze func(list []ir.Node, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
- v.nodeID = make(map[*Node]uint32)
+ v.nodeID = make(map[ir.Node]uint32)
for _, n := range list {
- if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() {
+ if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() {
v.visit(n)
}
}
}
-func (v *bottomUpVisitor) visit(n *Node) uint32 {
+func (v *bottomUpVisitor) visit(n ir.Node) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
@@ -73,42 +75,46 @@ func (v *bottomUpVisitor) visit(n *Node) uint32 {
min := v.visitgen
v.stack = append(v.stack, n)
- inspectList(n.Nbody, func(n *Node) bool {
- switch n.Op {
- case ONAME:
- if n.Class() == PFUNC {
- if n.isMethodExpression() {
- n = asNode(n.Type.Nname())
- }
- if n != nil && n.Name.Defn != nil {
- if m := v.visit(n.Name.Defn); m < min {
+ ir.InspectList(n.Body(), func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC {
+ if n != nil && n.Name().Defn != nil {
+ if m := v.visit(n.Name().Defn); m < min {
min = m
}
}
}
- case ODOTMETH:
- fn := asNode(n.Type.Nname())
- if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
- if m := v.visit(fn.Name.Defn); m < min {
+ case ir.OMETHEXPR:
+ fn := methodExprName(n)
+ if fn != nil && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn); m < min {
+ min = m
+ }
+ }
+ case ir.ODOTMETH:
+ fn := methodExprName(n)
+ if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
- case OCALLPART:
- fn := asNode(callpartMethod(n).Type.Nname())
- if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
- if m := v.visit(fn.Name.Defn); m < min {
+ case ir.OCALLPART:
+ fn := ir.AsNode(callpartMethod(n).Nname)
+ if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
- case OCLOSURE:
- if m := v.visit(n.Func.Closure); m < min {
+ case ir.OCLOSURE:
+ if m := v.visit(n.Func().Decl); m < min {
min = m
}
}
return true
})
- if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
+ if (min == id || min == id+1) && !n.Func().IsHiddenClosure() {
// This node is the root of a strongly connected component.
// The original min passed to visitcodelist was v.nodeID[n]+1.
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go
index e66b859e10..fe4e1d185a 100644
--- a/src/cmd/compile/internal/gc/scope.go
+++ b/src/cmd/compile/internal/gc/scope.go
@@ -5,6 +5,8 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
@@ -13,10 +15,10 @@ import (
// See golang.org/issue/20390.
func xposBefore(p, q src.XPos) bool {
- return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q))
+ return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
}
-func findScope(marks []Mark, pos src.XPos) ScopeID {
+func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
i := sort.Search(len(marks), func(i int) bool {
return xposBefore(pos, marks[i].Pos)
})
@@ -26,20 +28,20 @@ func findScope(marks []Mark, pos src.XPos) ScopeID {
return marks[i-1].Scope
}
-func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope {
+func assembleScopes(fnsym *obj.LSym, fn ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
- dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
- for i, parent := range fn.Func.Parents {
+ dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
+ for i, parent := range fn.Func().Parents {
dwarfScopes[i+1].Parent = int32(parent)
}
scopeVariables(dwarfVars, varScopes, dwarfScopes)
- scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
+ scopePCs(fnsym, fn.Func().Marks, dwarfScopes)
return compactScopes(dwarfScopes)
}
// scopeVariables assigns DWARF variable records to their scopes.
-func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) {
+func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope) {
sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
i0 := 0
@@ -56,7 +58,7 @@ func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []d
}
// scopePCs assigns PC ranges to their scopes.
-func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
+func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
// If there aren't any child scopes (in particular, when scope
// tracking is disabled), we can skip a whole lot of work.
if len(marks) == 0 {
@@ -89,7 +91,7 @@ func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope {
type varsByScopeAndOffset struct {
vars []*dwarf.Var
- scopes []ScopeID
+ scopes []ir.ScopeID
}
func (v varsByScopeAndOffset) Len() int {
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
index 97e0424ce0..116b6f5b6e 100644
--- a/src/cmd/compile/internal/gc/select.go
+++ b/src/cmd/compile/internal/gc/select.go
@@ -4,152 +4,156 @@
package gc
-import "cmd/compile/internal/types"
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
// select
-func typecheckselect(sel *Node) {
- var def *Node
+func typecheckselect(sel ir.Node) {
+ var def ir.Node
lno := setlineno(sel)
- typecheckslice(sel.Ninit.Slice(), ctxStmt)
- for _, ncase := range sel.List.Slice() {
- if ncase.Op != OCASE {
+ typecheckslice(sel.Init().Slice(), ctxStmt)
+ for _, ncase := range sel.List().Slice() {
+ if ncase.Op() != ir.OCASE {
setlineno(ncase)
- Fatalf("typecheckselect %v", ncase.Op)
+ base.Fatalf("typecheckselect %v", ncase.Op())
}
- if ncase.List.Len() == 0 {
+ if ncase.List().Len() == 0 {
// default
if def != nil {
- yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
} else {
def = ncase
}
- } else if ncase.List.Len() > 1 {
- yyerrorl(ncase.Pos, "select cases cannot be lists")
+ } else if ncase.List().Len() > 1 {
+ base.ErrorfAt(ncase.Pos(), "select cases cannot be lists")
} else {
- ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt))
- n := ncase.List.First()
- ncase.Left = n
- ncase.List.Set(nil)
- switch n.Op {
+ ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt))
+ n := ncase.List().First()
+ ncase.SetLeft(n)
+ ncase.PtrList().Set(nil)
+ switch n.Op() {
default:
- pos := n.Pos
- if n.Op == ONAME {
+ pos := n.Pos()
+ if n.Op() == ir.ONAME {
// We don't have the right position for ONAME nodes (see #15459 and
// others). Using ncase.Pos for now as it will provide the correct
// line number (assuming the expression follows the "case" keyword
// on the same line). This matches the approach before 1.10.
- pos = ncase.Pos
+ pos = ncase.Pos()
}
- yyerrorl(pos, "select case must be receive, send or assign recv")
+ base.ErrorfAt(pos, "select case must be receive, send or assign recv")
// convert x = <-c into OSELRECV(x, <-c).
// remove implicit conversions; the eventual assignment
// will reintroduce them.
- case OAS:
- if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit() {
- n.Right = n.Right.Left
+ case ir.OAS:
+ if (n.Right().Op() == ir.OCONVNOP || n.Right().Op() == ir.OCONVIFACE) && n.Right().Implicit() {
+ n.SetRight(n.Right().Left())
}
- if n.Right.Op != ORECV {
- yyerrorl(n.Pos, "select assignment must have receive on right hand side")
+ if n.Right().Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
- n.Op = OSELRECV
+ n.SetOp(ir.OSELRECV)
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
- case OAS2RECV:
- if n.Right.Op != ORECV {
- yyerrorl(n.Pos, "select assignment must have receive on right hand side")
+ case ir.OAS2RECV:
+ if n.Right().Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
- n.Op = OSELRECV2
- n.Left = n.List.First()
- n.List.Set1(n.List.Second())
+ n.SetOp(ir.OSELRECV2)
+ n.SetLeft(n.List().First())
+ n.PtrList().Set1(n.List().Second())
// convert <-c into OSELRECV(N, <-c)
- case ORECV:
- n = nodl(n.Pos, OSELRECV, nil, n)
+ case ir.ORECV:
+ n = ir.NodAt(n.Pos(), ir.OSELRECV, nil, n)
n.SetTypecheck(1)
- ncase.Left = n
+ ncase.SetLeft(n)
- case OSEND:
+ case ir.OSEND:
break
}
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
- lineno = lno
+ base.Pos = lno
}
-func walkselect(sel *Node) {
+func walkselect(sel ir.Node) {
lno := setlineno(sel)
- if sel.Nbody.Len() != 0 {
- Fatalf("double walkselect")
+ if sel.Body().Len() != 0 {
+ base.Fatalf("double walkselect")
}
- init := sel.Ninit.Slice()
- sel.Ninit.Set(nil)
+ init := sel.Init().Slice()
+ sel.PtrInit().Set(nil)
- init = append(init, walkselectcases(&sel.List)...)
- sel.List.Set(nil)
+ init = append(init, walkselectcases(sel.PtrList())...)
+ sel.PtrList().Set(nil)
- sel.Nbody.Set(init)
- walkstmtlist(sel.Nbody.Slice())
+ sel.PtrBody().Set(init)
+ walkstmtlist(sel.Body().Slice())
- lineno = lno
+ base.Pos = lno
}
-func walkselectcases(cases *Nodes) []*Node {
+func walkselectcases(cases *ir.Nodes) []ir.Node {
ncas := cases.Len()
- sellineno := lineno
+ sellineno := base.Pos
// optimization: zero-case select
if ncas == 0 {
- return []*Node{mkcall("block", nil, nil)}
+ return []ir.Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
if ncas == 1 {
cas := cases.First()
setlineno(cas)
- l := cas.Ninit.Slice()
- if cas.Left != nil { // not default:
- n := cas.Left
- l = append(l, n.Ninit.Slice()...)
- n.Ninit.Set(nil)
- switch n.Op {
+ l := cas.Init().Slice()
+ if cas.Left() != nil { // not default:
+ n := cas.Left()
+ l = append(l, n.Init().Slice()...)
+ n.PtrInit().Set(nil)
+ switch n.Op() {
default:
- Fatalf("select %v", n.Op)
+ base.Fatalf("select %v", n.Op())
- case OSEND:
+ case ir.OSEND:
// already ok
- case OSELRECV, OSELRECV2:
- if n.Op == OSELRECV || n.List.Len() == 0 {
- if n.Left == nil {
- n = n.Right
+ case ir.OSELRECV, ir.OSELRECV2:
+ if n.Op() == ir.OSELRECV || n.List().Len() == 0 {
+ if n.Left() == nil {
+ n = n.Right()
} else {
- n.Op = OAS
+ n.SetOp(ir.OAS)
}
break
}
- if n.Left == nil {
- nblank = typecheck(nblank, ctxExpr|ctxAssign)
- n.Left = nblank
+ if n.Left() == nil {
+ ir.BlankNode = typecheck(ir.BlankNode, ctxExpr|ctxAssign)
+ n.SetLeft(ir.BlankNode)
}
- n.Op = OAS2
- n.List.Prepend(n.Left)
- n.Rlist.Set1(n.Right)
- n.Right = nil
- n.Left = nil
+ n.SetOp(ir.OAS2)
+ n.PtrList().Prepend(n.Left())
+ n.PtrRlist().Set1(n.Right())
+ n.SetRight(nil)
+ n.SetLeft(nil)
n.SetTypecheck(0)
n = typecheck(n, ctxStmt)
}
@@ -157,34 +161,34 @@ func walkselectcases(cases *Nodes) []*Node {
l = append(l, n)
}
- l = append(l, cas.Nbody.Slice()...)
- l = append(l, nod(OBREAK, nil, nil))
+ l = append(l, cas.Body().Slice()...)
+ l = append(l, ir.Nod(ir.OBREAK, nil, nil))
return l
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
- var dflt *Node
+ var dflt ir.Node
for _, cas := range cases.Slice() {
setlineno(cas)
- n := cas.Left
+ n := cas.Left()
if n == nil {
dflt = cas
continue
}
- switch n.Op {
- case OSEND:
- n.Right = nod(OADDR, n.Right, nil)
- n.Right = typecheck(n.Right, ctxExpr)
-
- case OSELRECV, OSELRECV2:
- if n.Op == OSELRECV2 && n.List.Len() == 0 {
- n.Op = OSELRECV
+ switch n.Op() {
+ case ir.OSEND:
+ n.SetRight(ir.Nod(ir.OADDR, n.Right(), nil))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+
+ case ir.OSELRECV, ir.OSELRECV2:
+ if n.Op() == ir.OSELRECV2 && n.List().Len() == 0 {
+ n.SetOp(ir.OSELRECV)
}
- if n.Left != nil {
- n.Left = nod(OADDR, n.Left, nil)
- n.Left = typecheck(n.Left, ctxExpr)
+ if n.Left() != nil {
+ n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
}
}
}
@@ -196,68 +200,68 @@ func walkselectcases(cases *Nodes) []*Node {
cas = cases.Second()
}
- n := cas.Left
+ n := cas.Left()
setlineno(n)
- r := nod(OIF, nil, nil)
- r.Ninit.Set(cas.Ninit.Slice())
- switch n.Op {
+ r := ir.Nod(ir.OIF, nil, nil)
+ r.PtrInit().Set(cas.Init().Slice())
+ switch n.Op() {
default:
- Fatalf("select %v", n.Op)
+ base.Fatalf("select %v", n.Op())
- case OSEND:
+ case ir.OSEND:
// if selectnbsend(c, v) { body } else { default body }
- ch := n.Left
- r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right)
+ ch := n.Left()
+ r.SetLeft(mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right()))
- case OSELRECV:
+ case ir.OSELRECV:
// if selectnbrecv(&v, c) { body } else { default body }
- ch := n.Right.Left
- elem := n.Left
+ ch := n.Right().Left()
+ elem := n.Left()
if elem == nil {
elem = nodnil()
}
- r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, ch)
+ r.SetLeft(mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch))
- case OSELRECV2:
+ case ir.OSELRECV2:
// if selectnbrecv2(&v, &received, c) { body } else { default body }
- ch := n.Right.Left
- elem := n.Left
+ ch := n.Right().Left()
+ elem := n.Left()
if elem == nil {
elem = nodnil()
}
- receivedp := nod(OADDR, n.List.First(), nil)
+ receivedp := ir.Nod(ir.OADDR, n.List().First(), nil)
receivedp = typecheck(receivedp, ctxExpr)
- r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, receivedp, ch)
+ r.SetLeft(mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch))
}
- r.Left = typecheck(r.Left, ctxExpr)
- r.Nbody.Set(cas.Nbody.Slice())
- r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
- return []*Node{r, nod(OBREAK, nil, nil)}
+ r.SetLeft(typecheck(r.Left(), ctxExpr))
+ r.PtrBody().Set(cas.Body().Slice())
+ r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
+ return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
- casorder := make([]*Node, ncas)
+ casorder := make([]ir.Node, ncas)
nsends, nrecvs := 0, 0
- var init []*Node
+ var init []ir.Node
// generate sel-struct
- lineno = sellineno
+ base.Pos = sellineno
selv := temp(types.NewArray(scasetype(), int64(ncas)))
- r := nod(OAS, selv, nil)
+ r := ir.Nod(ir.OAS, selv, nil)
r = typecheck(r, ctxStmt)
init = append(init, r)
// No initialization for order; runtime.selectgo is responsible for that.
- order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
+ order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
- var pc0, pcs *Node
- if flag_race {
- pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas)))
- pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
+ var pc0, pcs ir.Node
+ if base.Flag.Race {
+ pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+ pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
} else {
pc0 = nodnil()
}
@@ -266,109 +270,109 @@ func walkselectcases(cases *Nodes) []*Node {
for _, cas := range cases.Slice() {
setlineno(cas)
- init = append(init, cas.Ninit.Slice()...)
- cas.Ninit.Set(nil)
+ init = append(init, cas.Init().Slice()...)
+ cas.PtrInit().Set(nil)
- n := cas.Left
+ n := cas.Left()
if n == nil { // default:
continue
}
var i int
- var c, elem *Node
- switch n.Op {
+ var c, elem ir.Node
+ switch n.Op() {
default:
- Fatalf("select %v", n.Op)
- case OSEND:
+ base.Fatalf("select %v", n.Op())
+ case ir.OSEND:
i = nsends
nsends++
- c = n.Left
- elem = n.Right
- case OSELRECV, OSELRECV2:
+ c = n.Left()
+ elem = n.Right()
+ case ir.OSELRECV, ir.OSELRECV2:
nrecvs++
i = ncas - nrecvs
- c = n.Right.Left
- elem = n.Left
+ c = n.Right().Left()
+ elem = n.Left()
}
casorder[i] = cas
- setField := func(f string, val *Node) {
- r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
+ setField := func(f string, val ir.Node) {
+ r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
r = typecheck(r, ctxStmt)
init = append(init, r)
}
- c = convnop(c, types.Types[TUNSAFEPTR])
+ c = convnop(c, types.Types[types.TUNSAFEPTR])
setField("c", c)
if elem != nil {
- elem = convnop(elem, types.Types[TUNSAFEPTR])
+ elem = convnop(elem, types.Types[types.TUNSAFEPTR])
setField("elem", elem)
}
// TODO(mdempsky): There should be a cleaner way to
// handle this.
- if flag_race {
- r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil))
+ if base.Flag.Race {
+ r = mkcall("selectsetpc", nil, nil, ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i))), nil))
init = append(init, r)
}
}
if nsends+nrecvs != ncas {
- Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+ base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
}
// run the select
- lineno = sellineno
- chosen := temp(types.Types[TINT])
- recvOK := temp(types.Types[TBOOL])
- r = nod(OAS2, nil, nil)
- r.List.Set2(chosen, recvOK)
+ base.Pos = sellineno
+ chosen := temp(types.Types[types.TINT])
+ recvOK := temp(types.Types[types.TBOOL])
+ r = ir.Nod(ir.OAS2, nil, nil)
+ r.PtrList().Set2(chosen, recvOK)
fn := syslook("selectgo")
- r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
+ r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
r = typecheck(r, ctxStmt)
init = append(init, r)
// selv and order are no longer alive after selectgo.
- init = append(init, nod(OVARKILL, selv, nil))
- init = append(init, nod(OVARKILL, order, nil))
- if flag_race {
- init = append(init, nod(OVARKILL, pcs, nil))
+ init = append(init, ir.Nod(ir.OVARKILL, selv, nil))
+ init = append(init, ir.Nod(ir.OVARKILL, order, nil))
+ if base.Flag.Race {
+ init = append(init, ir.Nod(ir.OVARKILL, pcs, nil))
}
// dispatch cases
- dispatch := func(cond, cas *Node) {
+ dispatch := func(cond, cas ir.Node) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
- r := nod(OIF, cond, nil)
+ r := ir.Nod(ir.OIF, cond, nil)
- if n := cas.Left; n != nil && n.Op == OSELRECV2 {
- x := nod(OAS, n.List.First(), recvOK)
+ if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 {
+ x := ir.Nod(ir.OAS, n.List().First(), recvOK)
x = typecheck(x, ctxStmt)
- r.Nbody.Append(x)
+ r.PtrBody().Append(x)
}
- r.Nbody.AppendNodes(&cas.Nbody)
- r.Nbody.Append(nod(OBREAK, nil, nil))
+ r.PtrBody().AppendNodes(cas.PtrBody())
+ r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil))
init = append(init, r)
}
if dflt != nil {
setlineno(dflt)
- dispatch(nod(OLT, chosen, nodintconst(0)), dflt)
+ dispatch(ir.Nod(ir.OLT, chosen, nodintconst(0)), dflt)
}
for i, cas := range casorder {
setlineno(cas)
- dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas)
+ dispatch(ir.Nod(ir.OEQ, chosen, nodintconst(int64(i))), cas)
}
return init
}
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
-func bytePtrToIndex(n *Node, i int64) *Node {
- s := nod(OADDR, nod(OINDEX, n, nodintconst(i)), nil)
- t := types.NewPtr(types.Types[TUINT8])
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
+ s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil)
+ t := types.NewPtr(types.Types[types.TUINT8])
return convnop(s, t)
}
@@ -377,9 +381,9 @@ var scase *types.Type
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
- scase = tostruct([]*Node{
- namedfield("c", types.Types[TUNSAFEPTR]),
- namedfield("elem", types.Types[TUNSAFEPTR]),
+ scase = tostruct([]ir.Node{
+ namedfield("c", types.Types[types.TUNSAFEPTR]),
+ namedfield("elem", types.Types[types.TUNSAFEPTR]),
})
scase.SetNoalg(true)
}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index 212fcc022d..e30663cfbb 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -5,14 +5,17 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
+ "go/constant"
)
type InitEntry struct {
- Xoffset int64 // struct, array only
- Expr *Node // bytes of run-time computed expressions
+ Xoffset int64 // struct, array only
+ Expr ir.Node // bytes of run-time computed expressions
}
type InitPlan struct {
@@ -26,21 +29,21 @@ type InitPlan struct {
type InitSchedule struct {
// out is the ordered list of dynamic initialization
// statements.
- out []*Node
+ out []ir.Node
- initplans map[*Node]*InitPlan
- inittemps map[*Node]*Node
+ initplans map[ir.Node]*InitPlan
+ inittemps map[ir.Node]ir.Node
}
-func (s *InitSchedule) append(n *Node) {
+func (s *InitSchedule) append(n ir.Node) {
s.out = append(s.out, n)
}
// staticInit adds an initialization statement n to the schedule.
-func (s *InitSchedule) staticInit(n *Node) {
+func (s *InitSchedule) staticInit(n ir.Node) {
if !s.tryStaticInit(n) {
- if Debug.P != 0 {
- Dump("nonstatic", n)
+ if base.Flag.Percent != 0 {
+ ir.Dump("nonstatic", n)
}
s.append(n)
}
@@ -48,112 +51,115 @@ func (s *InitSchedule) staticInit(n *Node) {
// tryStaticInit attempts to statically execute an initialization
// statement and reports whether it succeeded.
-func (s *InitSchedule) tryStaticInit(n *Node) bool {
+func (s *InitSchedule) tryStaticInit(n ir.Node) bool {
// Only worry about simple "l = r" assignments. Multiple
// variable/expression OAS2 assignments have already been
// replaced by multiple simple OAS assignments, and the other
// OAS2* assignments mostly necessitate dynamic execution
// anyway.
- if n.Op != OAS {
+ if n.Op() != ir.OAS {
return false
}
- if n.Left.isBlank() && candiscard(n.Right) {
+ if ir.IsBlank(n.Left()) && candiscard(n.Right()) {
return true
}
lno := setlineno(n)
- defer func() { lineno = lno }()
- return s.staticassign(n.Left, n.Right)
+ defer func() { base.Pos = lno }()
+ return s.staticassign(n.Left(), n.Right())
}
// like staticassign but we are copying an already
// initialized value r.
-func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
- if r.Op != ONAME {
+func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool {
+ if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR {
return false
}
- if r.Class() == PFUNC {
+ if r.Class() == ir.PFUNC {
pfuncsym(l, r)
return true
}
- if r.Class() != PEXTERN || r.Sym.Pkg != localpkg {
+ if r.Class() != ir.PEXTERN || r.Sym().Pkg != ir.LocalPkg {
return false
}
- if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+ if r.Name().Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
return false
}
- if r.Name.Defn.Op != OAS {
+ if r.Name().Defn.Op() != ir.OAS {
return false
}
- if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675)
+ if r.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
return false
}
orig := r
- r = r.Name.Defn.Right
+ r = r.Name().Defn.Right()
- for r.Op == OCONVNOP && !types.Identical(r.Type, l.Type) {
- r = r.Left
+ for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), l.Type()) {
+ r = r.Left()
}
- switch r.Op {
- case ONAME:
+ switch r.Op() {
+ case ir.ONAME, ir.OMETHEXPR:
if s.staticcopy(l, r) {
return true
}
// We may have skipped past one or more OCONVNOPs, so
// use conv to ensure r is assignable to l (#13263).
- s.append(nod(OAS, l, conv(r, l.Type)))
+ s.append(ir.Nod(ir.OAS, l, conv(r, l.Type())))
return true
- case OLITERAL:
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
if isZero(r) {
return true
}
- litsym(l, r, int(l.Type.Width))
+ litsym(l, r, int(l.Type().Width))
return true
- case OADDR:
- if a := r.Left; a.Op == ONAME {
+ case ir.OADDR:
+ if a := r.Left(); a.Op() == ir.ONAME {
addrsym(l, a)
return true
}
- case OPTRLIT:
- switch r.Left.Op {
- case OARRAYLIT, OSLICELIT, OSTRUCTLIT, OMAPLIT:
+ case ir.OPTRLIT:
+ switch r.Left().Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
// copy pointer
addrsym(l, s.inittemps[r])
return true
}
- case OSLICELIT:
+ case ir.OSLICELIT:
// copy slice
a := s.inittemps[r]
- slicesym(l, a, r.Right.Int64Val())
+ slicesym(l, a, r.Right().Int64Val())
return true
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
p := s.initplans[r]
- n := l.copy()
+ n := ir.Copy(l)
for i := range p.E {
e := &p.E[i]
- n.Xoffset = l.Xoffset + e.Xoffset
- n.Type = e.Expr.Type
- if e.Expr.Op == OLITERAL {
- litsym(n, e.Expr, int(n.Type.Width))
+ n.SetOffset(l.Offset() + e.Xoffset)
+ n.SetType(e.Expr.Type())
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ litsym(n, e.Expr, int(n.Type().Width))
continue
}
- ll := n.sepcopy()
+ ll := ir.SepCopy(n)
if s.staticcopy(ll, e.Expr) {
continue
}
// Requires computation, but we're
// copying someone else's computation.
- rr := orig.sepcopy()
- rr.Type = ll.Type
- rr.Xoffset += e.Xoffset
+ rr := ir.SepCopy(orig)
+ rr.SetType(ll.Type())
+ rr.SetOffset(rr.Offset() + e.Xoffset)
setlineno(rr)
- s.append(nod(OAS, ll, rr))
+ s.append(ir.Nod(ir.OAS, ll, rr))
}
return true
@@ -162,59 +168,61 @@ func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
return false
}
-func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
- for r.Op == OCONVNOP {
- r = r.Left
+func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
+ for r.Op() == ir.OCONVNOP {
+ r = r.Left()
}
- switch r.Op {
- case ONAME:
+ switch r.Op() {
+ case ir.ONAME, ir.OMETHEXPR:
return s.staticcopy(l, r)
- case OLITERAL:
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
if isZero(r) {
return true
}
- litsym(l, r, int(l.Type.Width))
+ litsym(l, r, int(l.Type().Width))
return true
- case OADDR:
- var nam Node
- if stataddr(&nam, r.Left) {
- addrsym(l, &nam)
+ case ir.OADDR:
+ if nam := stataddr(r.Left()); nam != nil {
+ addrsym(l, nam)
return true
}
fallthrough
- case OPTRLIT:
- switch r.Left.Op {
- case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT:
+ case ir.OPTRLIT:
+ switch r.Left().Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
// Init pointer.
- a := staticname(r.Left.Type)
+ a := staticname(r.Left().Type())
s.inittemps[r] = a
addrsym(l, a)
// Init underlying literal.
- if !s.staticassign(a, r.Left) {
- s.append(nod(OAS, a, r.Left))
+ if !s.staticassign(a, r.Left()) {
+ s.append(ir.Nod(ir.OAS, a, r.Left()))
}
return true
}
//dump("not static ptrlit", r);
- case OSTR2BYTES:
- if l.Class() == PEXTERN && r.Left.Op == OLITERAL {
- sval := r.Left.StringVal()
+ case ir.OSTR2BYTES:
+ if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL {
+ sval := r.Left().StringVal()
slicebytes(l, sval)
return true
}
- case OSLICELIT:
+ case ir.OSLICELIT:
s.initplan(r)
// Init slice.
- bound := r.Right.Int64Val()
- ta := types.NewArray(r.Type.Elem(), bound)
+ bound := r.Right().Int64Val()
+ ta := types.NewArray(r.Type().Elem(), bound)
ta.SetNoalg(true)
a := staticname(ta)
s.inittemps[r] = a
@@ -223,96 +231,97 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
l = a
fallthrough
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
s.initplan(r)
p := s.initplans[r]
- n := l.copy()
+ n := ir.Copy(l)
for i := range p.E {
e := &p.E[i]
- n.Xoffset = l.Xoffset + e.Xoffset
- n.Type = e.Expr.Type
- if e.Expr.Op == OLITERAL {
- litsym(n, e.Expr, int(n.Type.Width))
+ n.SetOffset(l.Offset() + e.Xoffset)
+ n.SetType(e.Expr.Type())
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ litsym(n, e.Expr, int(n.Type().Width))
continue
}
setlineno(e.Expr)
- a := n.sepcopy()
+ a := ir.SepCopy(n)
if !s.staticassign(a, e.Expr) {
- s.append(nod(OAS, a, e.Expr))
+ s.append(ir.Nod(ir.OAS, a, e.Expr))
}
}
return true
- case OMAPLIT:
+ case ir.OMAPLIT:
break
- case OCLOSURE:
+ case ir.OCLOSURE:
if hasemptycvars(r) {
- if Debug_closure > 0 {
- Warnl(r.Pos, "closure converted to global")
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(r.Pos(), "closure converted to global")
}
// Closures with no captured variables are globals,
// so the assignment can be done at link time.
- pfuncsym(l, r.Func.Closure.Func.Nname)
+ pfuncsym(l, r.Func().Nname)
return true
}
closuredebugruntimecheck(r)
- case OCONVIFACE:
+ case ir.OCONVIFACE:
// This logic is mirrored in isStaticCompositeLiteral.
// If you change something here, change it there, and vice versa.
// Determine the underlying concrete type and value we are converting from.
val := r
- for val.Op == OCONVIFACE {
- val = val.Left
+ for val.Op() == ir.OCONVIFACE {
+ val = val.Left()
}
- if val.Type.IsInterface() {
+
+ if val.Type().IsInterface() {
// val is an interface type.
// If val is nil, we can statically initialize l;
// both words are zero and so there no work to do, so report success.
// If val is non-nil, we have no concrete type to record,
// and we won't be able to statically initialize its value, so report failure.
- return Isconst(val, CTNIL)
+ return val.Op() == ir.ONIL
}
- markTypeUsedInInterface(val.Type, l.Sym.Linksym())
+ markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
- var itab *Node
- if l.Type.IsEmptyInterface() {
- itab = typename(val.Type)
+ var itab ir.Node
+ if l.Type().IsEmptyInterface() {
+ itab = typename(val.Type())
} else {
- itab = itabname(val.Type, l.Type)
+ itab = itabname(val.Type(), l.Type())
}
// Create a copy of l to modify while we emit data.
- n := l.copy()
+ n := ir.Copy(l)
// Emit itab, advance offset.
- addrsym(n, itab.Left) // itab is an OADDR node
- n.Xoffset += int64(Widthptr)
+ addrsym(n, itab.Left()) // itab is an OADDR node
+ n.SetOffset(n.Offset() + int64(Widthptr))
// Emit data.
- if isdirectiface(val.Type) {
- if Isconst(val, CTNIL) {
+ if isdirectiface(val.Type()) {
+ if val.Op() == ir.ONIL {
// Nil is zero, nothing to do.
return true
}
// Copy val directly into n.
- n.Type = val.Type
+ n.SetType(val.Type())
setlineno(val)
- a := n.sepcopy()
+ a := ir.SepCopy(n)
if !s.staticassign(a, val) {
- s.append(nod(OAS, a, val))
+ s.append(ir.Nod(ir.OAS, a, val))
}
} else {
// Construct temp to hold val, write pointer to temp into n.
- a := staticname(val.Type)
+ a := staticname(val.Type())
s.inittemps[val] = a
if !s.staticassign(a, val) {
- s.append(nod(OAS, a, val))
+ s.append(ir.Nod(ir.OAS, a, val))
}
addrsym(n, a)
}
@@ -358,29 +367,29 @@ var statuniqgen int // name generator for static temps
// staticname returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
-func staticname(t *types.Type) *Node {
+func staticname(t *types.Type) ir.Node {
// Don't use lookupN; it interns the resulting string, but these are all unique.
- n := newname(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+ n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
- addvar(n, t, PEXTERN)
- n.Sym.Linksym().Set(obj.AttrLocal, true)
+ addvar(n, t, ir.PEXTERN)
+ n.Sym().Linksym().Set(obj.AttrLocal, true)
return n
}
// readonlystaticname returns a name backed by a (writable) static data symbol.
-func readonlystaticname(t *types.Type) *Node {
+func readonlystaticname(t *types.Type) ir.Node {
n := staticname(t)
n.MarkReadonly()
- n.Sym.Linksym().Set(obj.AttrContentAddressable, true)
+ n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
return n
}
-func (n *Node) isSimpleName() bool {
- return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
+func isSimpleName(n ir.Node) bool {
+ return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
}
-func litas(l *Node, r *Node, init *Nodes) {
- a := nod(OAS, l, r)
+func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
+ a := ir.Nod(ir.OAS, l, r)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
@@ -396,19 +405,19 @@ const (
// getdyn calculates the initGenType for n.
// If top is false, getdyn is recursing.
-func getdyn(n *Node, top bool) initGenType {
- switch n.Op {
+func getdyn(n ir.Node, top bool) initGenType {
+ switch n.Op() {
default:
- if n.isGoConst() {
+ if isGoConst(n) {
return initConst
}
return initDynamic
- case OSLICELIT:
+ case ir.OSLICELIT:
if !top {
return initDynamic
}
- if n.Right.Int64Val()/4 > int64(n.List.Len()) {
+ if n.Right().Int64Val()/4 > int64(n.List().Len()) {
// <25% of entries have explicit values.
// Very rough estimation, it takes 4 bytes of instructions
// to initialize 1 byte of result. So don't use a static
@@ -418,16 +427,16 @@ func getdyn(n *Node, top bool) initGenType {
return initDynamic
}
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
}
var mode initGenType
- for _, n1 := range n.List.Slice() {
- switch n1.Op {
- case OKEY:
- n1 = n1.Right
- case OSTRUCTKEY:
- n1 = n1.Left
+ for _, n1 := range n.List().Slice() {
+ switch n1.Op() {
+ case ir.OKEY:
+ n1 = n1.Right()
+ case ir.OSTRUCTKEY:
+ n1 = n1.Left()
}
mode |= getdyn(n1, false)
if mode == initDynamic|initConst {
@@ -438,42 +447,42 @@ func getdyn(n *Node, top bool) initGenType {
}
// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n *Node) bool {
- switch n.Op {
- case OSLICELIT:
+func isStaticCompositeLiteral(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OSLICELIT:
return false
- case OARRAYLIT:
- for _, r := range n.List.Slice() {
- if r.Op == OKEY {
- r = r.Right
+ case ir.OARRAYLIT:
+ for _, r := range n.List().Slice() {
+ if r.Op() == ir.OKEY {
+ r = r.Right()
}
if !isStaticCompositeLiteral(r) {
return false
}
}
return true
- case OSTRUCTLIT:
- for _, r := range n.List.Slice() {
- if r.Op != OSTRUCTKEY {
- Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
+ case ir.OSTRUCTLIT:
+ for _, r := range n.List().Slice() {
+ if r.Op() != ir.OSTRUCTKEY {
+ base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
}
- if !isStaticCompositeLiteral(r.Left) {
+ if !isStaticCompositeLiteral(r.Left()) {
return false
}
}
return true
- case OLITERAL:
+ case ir.OLITERAL, ir.ONIL:
return true
- case OCONVIFACE:
+ case ir.OCONVIFACE:
// See staticassign's OCONVIFACE case for comments.
val := n
- for val.Op == OCONVIFACE {
- val = val.Left
+ for val.Op() == ir.OCONVIFACE {
+ val = val.Left()
}
- if val.Type.IsInterface() {
- return Isconst(val, CTNIL)
+ if val.Type().IsInterface() {
+ return val.Op() == ir.ONIL
}
- if isdirectiface(val.Type) && Isconst(val, CTNIL) {
+ if isdirectiface(val.Type()) && val.Op() == ir.ONIL {
return true
}
return isStaticCompositeLiteral(val)
@@ -500,96 +509,96 @@ const (
// fixedlit handles struct, array, and slice literals.
// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) {
- isBlank := var_ == nblank
- var splitnode func(*Node) (a *Node, value *Node)
- switch n.Op {
- case OARRAYLIT, OSLICELIT:
+func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir.Nodes) {
+ isBlank := var_ == ir.BlankNode
+ var splitnode func(ir.Node) (a ir.Node, value ir.Node)
+ switch n.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
- splitnode = func(r *Node) (*Node, *Node) {
- if r.Op == OKEY {
- k = indexconst(r.Left)
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
+ if r.Op() == ir.OKEY {
+ k = indexconst(r.Left())
if k < 0 {
- Fatalf("fixedlit: invalid index %v", r.Left)
+ base.Fatalf("fixedlit: invalid index %v", r.Left())
}
- r = r.Right
+ r = r.Right()
}
- a := nod(OINDEX, var_, nodintconst(k))
+ a := ir.Nod(ir.OINDEX, var_, nodintconst(k))
k++
if isBlank {
- a = nblank
+ a = ir.BlankNode
}
return a, r
}
- case OSTRUCTLIT:
- splitnode = func(r *Node) (*Node, *Node) {
- if r.Op != OSTRUCTKEY {
- Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
+ case ir.OSTRUCTLIT:
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
+ if r.Op() != ir.OSTRUCTKEY {
+ base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
}
- if r.Sym.IsBlank() || isBlank {
- return nblank, r.Left
+ if r.Sym().IsBlank() || isBlank {
+ return ir.BlankNode, r.Left()
}
setlineno(r)
- return nodSym(ODOT, var_, r.Sym), r.Left
+ return nodSym(ir.ODOT, var_, r.Sym()), r.Left()
}
default:
- Fatalf("fixedlit bad op: %v", n.Op)
+ base.Fatalf("fixedlit bad op: %v", n.Op())
}
- for _, r := range n.List.Slice() {
+ for _, r := range n.List().Slice() {
a, value := splitnode(r)
- if a == nblank && candiscard(value) {
+ if a == ir.BlankNode && candiscard(value) {
continue
}
- switch value.Op {
- case OSLICELIT:
+ switch value.Op() {
+ case ir.OSLICELIT:
if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
slicelit(ctxt, value, a, init)
continue
}
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
fixedlit(ctxt, kind, value, a, init)
continue
}
- islit := value.isGoConst()
+ islit := isGoConst(value)
if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
continue
}
// build list of assignments: var[index] = expr
setlineno(a)
- a = nod(OAS, a, value)
+ a = ir.Nod(ir.OAS, a, value)
a = typecheck(a, ctxStmt)
switch kind {
case initKindStatic:
genAsStatic(a)
case initKindDynamic, initKindLocalCode:
- a = orderStmtInPlace(a, map[string][]*Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
default:
- Fatalf("fixedlit: bad kind %d", kind)
+ base.Fatalf("fixedlit: bad kind %d", kind)
}
}
}
-func isSmallSliceLit(n *Node) bool {
- if n.Op != OSLICELIT {
+func isSmallSliceLit(n ir.Node) bool {
+ if n.Op() != ir.OSLICELIT {
return false
}
- r := n.Right
+ r := n.Right()
- return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
+ return smallintconst(r) && (n.Type().Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type().Elem().Width)
}
-func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
+func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
// make an array type corresponding the number of elements we have
- t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
+ t := types.NewArray(n.Type().Elem(), n.Right().Int64Val())
dowidth(t)
if ctxt == inNonInitFunction {
@@ -601,11 +610,11 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// copy static to slice
var_ = typecheck(var_, ctxExpr|ctxAssign)
- var nam Node
- if !stataddr(&nam, var_) || nam.Class() != PEXTERN {
- Fatalf("slicelit: %v", var_)
+ nam := stataddr(var_)
+ if nam == nil || nam.Class() != ir.PEXTERN {
+ base.Fatalf("slicelit: %v", var_)
}
- slicesym(&nam, vstat, t.NumElem())
+ slicesym(nam, vstat, t.NumElem())
return
}
@@ -630,7 +639,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// if the literal contains constants,
// make static initialized array (1),(2)
- var vstat *Node
+ var vstat ir.Node
mode := getdyn(n, true)
if mode&initConst != 0 && !isSmallSliceLit(n) {
@@ -646,51 +655,51 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
vauto := temp(types.NewPtr(t))
// set auto to point at new temp or heap (3 assign)
- var a *Node
+ var a ir.Node
if x := prealloc[n]; x != nil {
// temp allocated during order.go for dddarg
- if !types.Identical(t, x.Type) {
+ if !types.Identical(t, x.Type()) {
panic("dotdotdot base type does not match order's assigned type")
}
if vstat == nil {
- a = nod(OAS, x, nil)
+ a = ir.Nod(ir.OAS, x, nil)
a = typecheck(a, ctxStmt)
init.Append(a) // zero new temp
} else {
// Declare that we're about to initialize all of x.
// (Which happens at the *vauto = vstat below.)
- init.Append(nod(OVARDEF, x, nil))
+ init.Append(ir.Nod(ir.OVARDEF, x, nil))
}
- a = nod(OADDR, x, nil)
- } else if n.Esc == EscNone {
+ a = ir.Nod(ir.OADDR, x, nil)
+ } else if n.Esc() == EscNone {
a = temp(t)
if vstat == nil {
- a = nod(OAS, temp(t), nil)
+ a = ir.Nod(ir.OAS, temp(t), nil)
a = typecheck(a, ctxStmt)
init.Append(a) // zero new temp
- a = a.Left
+ a = a.Left()
} else {
- init.Append(nod(OVARDEF, a, nil))
+ init.Append(ir.Nod(ir.OVARDEF, a, nil))
}
- a = nod(OADDR, a, nil)
+ a = ir.Nod(ir.OADDR, a, nil)
} else {
- a = nod(ONEW, nil, nil)
- a.List.Set1(typenod(t))
+ a = ir.Nod(ir.ONEW, nil, nil)
+ a.PtrList().Set1(typenod(t))
}
- a = nod(OAS, vauto, a)
+ a = ir.Nod(ir.OAS, vauto, a)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
if vstat != nil {
// copy static to heap (4)
- a = nod(ODEREF, vauto, nil)
+ a = ir.Nod(ir.ODEREF, vauto, nil)
- a = nod(OAS, a, vstat)
+ a = ir.Nod(ir.OAS, a, vstat)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
@@ -698,25 +707,25 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
// put dynamics into array (5)
var index int64
- for _, value := range n.List.Slice() {
- if value.Op == OKEY {
- index = indexconst(value.Left)
+ for _, value := range n.List().Slice() {
+ if value.Op() == ir.OKEY {
+ index = indexconst(value.Left())
if index < 0 {
- Fatalf("slicelit: invalid index %v", value.Left)
+ base.Fatalf("slicelit: invalid index %v", value.Left())
}
- value = value.Right
+ value = value.Right()
}
- a := nod(OINDEX, vauto, nodintconst(index))
+ a := ir.Nod(ir.OINDEX, vauto, nodintconst(index))
a.SetBounded(true)
index++
// TODO need to check bounds?
- switch value.Op {
- case OSLICELIT:
+ switch value.Op() {
+ case ir.OSLICELIT:
break
- case OARRAYLIT, OSTRUCTLIT:
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
k := initKindDynamic
if vstat == nil {
// Generate both static and dynamic initializations.
@@ -727,43 +736,43 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
continue
}
- if vstat != nil && value.isGoConst() { // already set by copy from static value
+ if vstat != nil && isGoConst(value) { // already set by copy from static value
continue
}
// build list of vauto[c] = expr
setlineno(value)
- a = nod(OAS, a, value)
+ a = ir.Nod(ir.OAS, a, value)
a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
}
// make slice out of heap (6)
- a = nod(OAS, var_, nod(OSLICE, vauto, nil))
+ a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil))
a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
}
-func maplit(n *Node, m *Node, init *Nodes) {
+func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
// make the map var
- a := nod(OMAKE, nil, nil)
- a.Esc = n.Esc
- a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len())))
+ a := ir.Nod(ir.OMAKE, nil, nil)
+ a.SetEsc(n.Esc())
+ a.PtrList().Set2(typenod(n.Type()), nodintconst(int64(n.List().Len())))
litas(m, a, init)
- entries := n.List.Slice()
+ entries := n.List().Slice()
// The order pass already removed any dynamic (runtime-computed) entries.
// All remaining entries are static. Double-check that.
for _, r := range entries {
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
- Fatalf("maplit: entry is not a literal: %v", r)
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
+ base.Fatalf("maplit: entry is not a literal: %v", r)
}
}
@@ -771,8 +780,8 @@ func maplit(n *Node, m *Node, init *Nodes) {
// For a large number of entries, put them in an array and loop.
// build types [count]Tindex and [count]Tvalue
- tk := types.NewArray(n.Type.Key(), int64(len(entries)))
- te := types.NewArray(n.Type.Elem(), int64(len(entries)))
+ tk := types.NewArray(n.Type().Key(), int64(len(entries)))
+ te := types.NewArray(n.Type().Elem(), int64(len(entries)))
tk.SetNoalg(true)
te.SetNoalg(true)
@@ -784,11 +793,11 @@ func maplit(n *Node, m *Node, init *Nodes) {
vstatk := readonlystaticname(tk)
vstate := readonlystaticname(te)
- datak := nod(OARRAYLIT, nil, nil)
- datae := nod(OARRAYLIT, nil, nil)
+ datak := ir.Nod(ir.OARRAYLIT, nil, nil)
+ datae := ir.Nod(ir.OARRAYLIT, nil, nil)
for _, r := range entries {
- datak.List.Append(r.Left)
- datae.List.Append(r.Right)
+ datak.PtrList().Append(r.Left())
+ datae.PtrList().Append(r.Right())
}
fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
@@ -797,22 +806,22 @@ func maplit(n *Node, m *Node, init *Nodes) {
// for i = 0; i < len(vstatk); i++ {
// map[vstatk[i]] = vstate[i]
// }
- i := temp(types.Types[TINT])
- rhs := nod(OINDEX, vstate, i)
+ i := temp(types.Types[types.TINT])
+ rhs := ir.Nod(ir.OINDEX, vstate, i)
rhs.SetBounded(true)
- kidx := nod(OINDEX, vstatk, i)
+ kidx := ir.Nod(ir.OINDEX, vstatk, i)
kidx.SetBounded(true)
- lhs := nod(OINDEX, m, kidx)
+ lhs := ir.Nod(ir.OINDEX, m, kidx)
- zero := nod(OAS, i, nodintconst(0))
- cond := nod(OLT, i, nodintconst(tk.NumElem()))
- incr := nod(OAS, i, nod(OADD, i, nodintconst(1)))
- body := nod(OAS, lhs, rhs)
+ zero := ir.Nod(ir.OAS, i, nodintconst(0))
+ cond := ir.Nod(ir.OLT, i, nodintconst(tk.NumElem()))
+ incr := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
+ body := ir.Nod(ir.OAS, lhs, rhs)
- loop := nod(OFOR, cond, incr)
- loop.Nbody.Set1(body)
- loop.Ninit.Set1(zero)
+ loop := ir.Nod(ir.OFOR, cond, incr)
+ loop.PtrBody().Set1(body)
+ loop.PtrInit().Set1(zero)
loop = typecheck(loop, ctxStmt)
loop = walkstmt(loop)
@@ -824,95 +833,95 @@ func maplit(n *Node, m *Node, init *Nodes) {
// Build list of var[c] = expr.
// Use temporaries so that mapassign1 can have addressable key, elem.
// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
- tmpkey := temp(m.Type.Key())
- tmpelem := temp(m.Type.Elem())
+ tmpkey := temp(m.Type().Key())
+ tmpelem := temp(m.Type().Elem())
for _, r := range entries {
- index, elem := r.Left, r.Right
+ index, elem := r.Left(), r.Right()
setlineno(index)
- a := nod(OAS, tmpkey, index)
+ a := ir.Nod(ir.OAS, tmpkey, index)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
setlineno(elem)
- a = nod(OAS, tmpelem, elem)
+ a = ir.Nod(ir.OAS, tmpelem, elem)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
setlineno(tmpelem)
- a = nod(OAS, nod(OINDEX, m, tmpkey), tmpelem)
+ a = ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, m, tmpkey), tmpelem)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
}
- a = nod(OVARKILL, tmpkey, nil)
+ a = ir.Nod(ir.OVARKILL, tmpkey, nil)
a = typecheck(a, ctxStmt)
init.Append(a)
- a = nod(OVARKILL, tmpelem, nil)
+ a = ir.Nod(ir.OVARKILL, tmpelem, nil)
a = typecheck(a, ctxStmt)
init.Append(a)
}
-func anylit(n *Node, var_ *Node, init *Nodes) {
- t := n.Type
- switch n.Op {
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
+ t := n.Type()
+ switch n.Op() {
default:
- Fatalf("anylit: not lit, op=%v node=%v", n.Op, n)
+ base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
- case ONAME:
- a := nod(OAS, var_, n)
+ case ir.ONAME, ir.OMETHEXPR:
+ a := ir.Nod(ir.OAS, var_, n)
a = typecheck(a, ctxStmt)
init.Append(a)
- case OPTRLIT:
+ case ir.OPTRLIT:
if !t.IsPtr() {
- Fatalf("anylit: not ptr")
+ base.Fatalf("anylit: not ptr")
}
- var r *Node
- if n.Right != nil {
+ var r ir.Node
+ if n.Right() != nil {
// n.Right is stack temporary used as backing store.
- init.Append(nod(OAS, n.Right, nil)) // zero backing store, just in case (#18410)
- r = nod(OADDR, n.Right, nil)
+ init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
+ r = ir.Nod(ir.OADDR, n.Right(), nil)
r = typecheck(r, ctxExpr)
} else {
- r = nod(ONEW, nil, nil)
+ r = ir.Nod(ir.ONEW, nil, nil)
r.SetTypecheck(1)
- r.Type = t
- r.Esc = n.Esc
+ r.SetType(t)
+ r.SetEsc(n.Esc())
}
r = walkexpr(r, init)
- a := nod(OAS, var_, r)
+ a := ir.Nod(ir.OAS, var_, r)
a = typecheck(a, ctxStmt)
init.Append(a)
- var_ = nod(ODEREF, var_, nil)
+ var_ = ir.Nod(ir.ODEREF, var_, nil)
var_ = typecheck(var_, ctxExpr|ctxAssign)
- anylit(n.Left, var_, init)
+ anylit(n.Left(), var_, init)
- case OSTRUCTLIT, OARRAYLIT:
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
if !t.IsStruct() && !t.IsArray() {
- Fatalf("anylit: not struct/array")
+ base.Fatalf("anylit: not struct/array")
}
- if var_.isSimpleName() && n.List.Len() > 4 {
+ if isSimpleName(var_) && n.List().Len() > 4 {
// lay out static data
vstat := readonlystaticname(t)
ctxt := inInitFunction
- if n.Op == OARRAYLIT {
+ if n.Op() == ir.OARRAYLIT {
ctxt = inNonInitFunction
}
fixedlit(ctxt, initKindStatic, n, vstat, init)
// copy static to var
- a := nod(OAS, var_, vstat)
+ a := ir.Nod(ir.OAS, var_, vstat)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
@@ -924,14 +933,14 @@ func anylit(n *Node, var_ *Node, init *Nodes) {
}
var components int64
- if n.Op == OARRAYLIT {
+ if n.Op() == ir.OARRAYLIT {
components = t.NumElem()
} else {
components = int64(t.NumFields())
}
// initialization of an array or struct with unspecified components (missing fields or arrays)
- if var_.isSimpleName() || int64(n.List.Len()) < components {
- a := nod(OAS, var_, nil)
+ if isSimpleName(var_) || int64(n.List().Len()) < components {
+ a := ir.Nod(ir.OAS, var_, nil)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
@@ -939,149 +948,150 @@ func anylit(n *Node, var_ *Node, init *Nodes) {
fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
- case OSLICELIT:
+ case ir.OSLICELIT:
slicelit(inInitFunction, n, var_, init)
- case OMAPLIT:
+ case ir.OMAPLIT:
if !t.IsMap() {
- Fatalf("anylit: not map")
+ base.Fatalf("anylit: not map")
}
maplit(n, var_, init)
}
}
-func oaslit(n *Node, init *Nodes) bool {
- if n.Left == nil || n.Right == nil {
+func oaslit(n ir.Node, init *ir.Nodes) bool {
+ if n.Left() == nil || n.Right() == nil {
// not a special composite literal assignment
return false
}
- if n.Left.Type == nil || n.Right.Type == nil {
+ if n.Left().Type() == nil || n.Right().Type() == nil {
// not a special composite literal assignment
return false
}
- if !n.Left.isSimpleName() {
+ if !isSimpleName(n.Left()) {
// not a special composite literal assignment
return false
}
- if !types.Identical(n.Left.Type, n.Right.Type) {
+ if !types.Identical(n.Left().Type(), n.Right().Type()) {
// not a special composite literal assignment
return false
}
- switch n.Right.Op {
+ switch n.Right().Op() {
default:
// not a special composite literal assignment
return false
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
- if vmatch1(n.Left, n.Right) {
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ if vmatch1(n.Left(), n.Right()) {
// not a special composite literal assignment
return false
}
- anylit(n.Right, n.Left, init)
+ anylit(n.Right(), n.Left(), init)
}
- n.Op = OEMPTY
- n.Right = nil
+ n.SetOp(ir.OEMPTY)
+ n.SetRight(nil)
return true
}
-func getlit(lit *Node) int {
+func getlit(lit ir.Node) int {
if smallintconst(lit) {
return int(lit.Int64Val())
}
return -1
}
-// stataddr sets nam to the static address of n and reports whether it succeeded.
-func stataddr(nam *Node, n *Node) bool {
+// stataddr returns the static address of n, if n has one, or else nil.
+func stataddr(n ir.Node) ir.Node {
if n == nil {
- return false
+ return nil
}
- switch n.Op {
- case ONAME:
- *nam = *n
- return true
+ switch n.Op() {
+ case ir.ONAME, ir.OMETHEXPR:
+ return ir.SepCopy(n)
- case ODOT:
- if !stataddr(nam, n.Left) {
+ case ir.ODOT:
+ nam := stataddr(n.Left())
+ if nam == nil {
break
}
- nam.Xoffset += n.Xoffset
- nam.Type = n.Type
- return true
+ nam.SetOffset(nam.Offset() + n.Offset())
+ nam.SetType(n.Type())
+ return nam
- case OINDEX:
- if n.Left.Type.IsSlice() {
+ case ir.OINDEX:
+ if n.Left().Type().IsSlice() {
break
}
- if !stataddr(nam, n.Left) {
+ nam := stataddr(n.Left())
+ if nam == nil {
break
}
- l := getlit(n.Right)
+ l := getlit(n.Right())
if l < 0 {
break
}
// Check for overflow.
- if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) {
+ if n.Type().Width != 0 && thearch.MAXWIDTH/n.Type().Width <= int64(l) {
break
}
- nam.Xoffset += int64(l) * n.Type.Width
- nam.Type = n.Type
- return true
+ nam.SetOffset(nam.Offset() + int64(l)*n.Type().Width)
+ nam.SetType(n.Type())
+ return nam
}
- return false
+ return nil
}
-func (s *InitSchedule) initplan(n *Node) {
+func (s *InitSchedule) initplan(n ir.Node) {
if s.initplans[n] != nil {
return
}
p := new(InitPlan)
s.initplans[n] = p
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("initplan")
+ base.Fatalf("initplan")
- case OARRAYLIT, OSLICELIT:
+ case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
- for _, a := range n.List.Slice() {
- if a.Op == OKEY {
- k = indexconst(a.Left)
+ for _, a := range n.List().Slice() {
+ if a.Op() == ir.OKEY {
+ k = indexconst(a.Left())
if k < 0 {
- Fatalf("initplan arraylit: invalid index %v", a.Left)
+ base.Fatalf("initplan arraylit: invalid index %v", a.Left())
}
- a = a.Right
+ a = a.Right()
}
- s.addvalue(p, k*n.Type.Elem().Width, a)
+ s.addvalue(p, k*n.Type().Elem().Width, a)
k++
}
- case OSTRUCTLIT:
- for _, a := range n.List.Slice() {
- if a.Op != OSTRUCTKEY {
- Fatalf("initplan structlit")
+ case ir.OSTRUCTLIT:
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OSTRUCTKEY {
+ base.Fatalf("initplan structlit")
}
- if a.Sym.IsBlank() {
+ if a.Sym().IsBlank() {
continue
}
- s.addvalue(p, a.Xoffset, a.Left)
+ s.addvalue(p, a.Offset(), a.Left())
}
- case OMAPLIT:
- for _, a := range n.List.Slice() {
- if a.Op != OKEY {
- Fatalf("initplan maplit")
+ case ir.OMAPLIT:
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OKEY {
+ base.Fatalf("initplan maplit")
}
- s.addvalue(p, -1, a.Right)
+ s.addvalue(p, -1, a.Right())
}
}
}
-func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) {
+func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) {
// special case: zero can be dropped entirely
if isZero(n) {
return
@@ -1103,31 +1113,25 @@ func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) {
p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
}
-func isZero(n *Node) bool {
- switch n.Op {
- case OLITERAL:
- switch u := n.Val().U.(type) {
+func isZero(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.String:
+ return constant.StringVal(u) == ""
+ case constant.Bool:
+ return !constant.BoolVal(u)
default:
- Dump("unexpected literal", n)
- Fatalf("isZero")
- case *NilVal:
- return true
- case string:
- return u == ""
- case bool:
- return !u
- case *Mpint:
- return u.CmpInt64(0) == 0
- case *Mpflt:
- return u.CmpFloat64(0) == 0
- case *Mpcplx:
- return u.Real.CmpFloat64(0) == 0 && u.Imag.CmpFloat64(0) == 0
+ return constant.Sign(u) == 0
}
- case OARRAYLIT:
- for _, n1 := range n.List.Slice() {
- if n1.Op == OKEY {
- n1 = n1.Right
+ case ir.OARRAYLIT:
+ for _, n1 := range n.List().Slice() {
+ if n1.Op() == ir.OKEY {
+ n1 = n1.Right()
}
if !isZero(n1) {
return false
@@ -1135,9 +1139,9 @@ func isZero(n *Node) bool {
}
return true
- case OSTRUCTLIT:
- for _, n1 := range n.List.Slice() {
- if !isZero(n1.Left) {
+ case ir.OSTRUCTLIT:
+ for _, n1 := range n.List().Slice() {
+ if !isZero(n1.Left()) {
return false
}
}
@@ -1147,26 +1151,26 @@ func isZero(n *Node) bool {
return false
}
-func isvaluelit(n *Node) bool {
- return n.Op == OARRAYLIT || n.Op == OSTRUCTLIT
+func isvaluelit(n ir.Node) bool {
+ return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
}
-func genAsStatic(as *Node) {
- if as.Left.Type == nil {
- Fatalf("genAsStatic as.Left not typechecked")
+func genAsStatic(as ir.Node) {
+ if as.Left().Type() == nil {
+ base.Fatalf("genAsStatic as.Left not typechecked")
}
- var nam Node
- if !stataddr(&nam, as.Left) || (nam.Class() != PEXTERN && as.Left != nblank) {
- Fatalf("genAsStatic: lhs %v", as.Left)
+ nam := stataddr(as.Left())
+ if nam == nil || (nam.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) {
+ base.Fatalf("genAsStatic: lhs %v", as.Left())
}
switch {
- case as.Right.Op == OLITERAL:
- litsym(&nam, as.Right, int(as.Right.Type.Width))
- case as.Right.Op == ONAME && as.Right.Class() == PFUNC:
- pfuncsym(&nam, as.Right)
+ case as.Right().Op() == ir.OLITERAL:
+ litsym(nam, as.Right(), int(as.Right().Type().Width))
+ case (as.Right().Op() == ir.ONAME || as.Right().Op() == ir.OMETHEXPR) && as.Right().Class() == ir.PFUNC:
+ pfuncsym(nam, as.Right())
default:
- Fatalf("genAsStatic: rhs %v", as.Right)
+ base.Fatalf("genAsStatic: rhs %v", as.Right())
}
}
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 0b38e70cd2..cb73532b48 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -7,6 +7,7 @@ package gc
import (
"encoding/binary"
"fmt"
+ "go/constant"
"html"
"os"
"path/filepath"
@@ -14,6 +15,8 @@ import (
"bufio"
"bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@@ -37,7 +40,7 @@ const ssaDumpFile = "ssa.html"
const maxOpenDefers = 8
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
-var ssaDumpInlined []*Node
+var ssaDumpInlined []ir.Node
func initssaconfig() {
types_ := ssa.NewTypes()
@@ -48,21 +51,21 @@ func initssaconfig() {
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
- _ = types.NewPtr(types.Types[TINTER]) // *interface{}
- _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
- _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
- _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
- _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
- _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
- _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
- _ = types.NewPtr(types.Types[TINT16]) // *int16
- _ = types.NewPtr(types.Types[TINT64]) // *int64
- _ = types.NewPtr(types.Errortype) // *error
+ _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
+ _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
+ _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
+ _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
+ _ = types.NewPtr(types.Types[types.TINT16]) // *int16
+ _ = types.NewPtr(types.Types[types.TINT64]) // *int64
+ _ = types.NewPtr(types.Errortype) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
- ssaConfig.Race = flag_race
- ssaCaches = make([]ssa.Cache, nBackendWorkers)
+ ssaConfig.Race = base.Flag.Race
+ ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
// Set up some runtime functions we'll need to call.
assertE2I = sysfunc("assertE2I")
@@ -183,9 +186,9 @@ func initssaconfig() {
// function/method/interface call), where the receiver of a method call is
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
-func getParam(n *Node, i int) *types.Field {
- t := n.Left.Type
- if n.Op == OCALLMETH {
+func getParam(n ir.Node, i int) *types.Field {
+ t := n.Left().Type()
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
return t.Recv()
}
@@ -239,8 +242,8 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// - Size of the argument
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
- x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer")
- s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x
+ x := base.Ctxt.Lookup(s.curfn.Func().LSym.Name + ".opendefer")
+ s.curfn.Func().LSym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
@@ -248,20 +251,20 @@ func (s *state) emitOpenDeferInfo() {
var maxargsize int64
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- argsize := r.n.Left.Type.ArgWidth()
+ argsize := r.n.Left().Type().ArgWidth()
if argsize > maxargsize {
maxargsize = argsize
}
}
off = dvarint(x, off, maxargsize)
- off = dvarint(x, off, -s.deferBitsTemp.Xoffset)
+ off = dvarint(x, off, -s.deferBitsTemp.Offset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.Left.Type.ArgWidth())
- off = dvarint(x, off, -r.closureNode.Xoffset)
+ off = dvarint(x, off, r.n.Left().Type().ArgWidth())
+ off = dvarint(x, off, -r.closureNode.Offset())
numArgs := len(r.argNodes)
if r.rcvrNode != nil {
// If there's an interface receiver, treat/place it as the first
@@ -271,13 +274,13 @@ func (s *state) emitOpenDeferInfo() {
}
off = dvarint(x, off, int64(numArgs))
if r.rcvrNode != nil {
- off = dvarint(x, off, -r.rcvrNode.Xoffset)
+ off = dvarint(x, off, -r.rcvrNode.Offset())
off = dvarint(x, off, s.config.PtrSize)
off = dvarint(x, off, 0)
}
for j, arg := range r.argNodes {
f := getParam(r.n, j)
- off = dvarint(x, off, -arg.Xoffset)
+ off = dvarint(x, off, -arg.Offset())
off = dvarint(x, off, f.Type.Size())
off = dvarint(x, off, f.Offset)
}
@@ -286,18 +289,18 @@ func (s *state) emitOpenDeferInfo() {
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn *Node, worker int) *ssa.Func {
- name := fn.funcname()
+func buildssa(fn ir.Node, worker int) *ssa.Func {
+ name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
- printssa = name == ssaDump || myimportpath+"."+name == ssaDump
+ printssa = name == ssaDump || base.Ctxt.Pkgpath+"."+name == ssaDump
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
- fdumplist(astBuf, "buildssa-enter", fn.Func.Enter)
- fdumplist(astBuf, "buildssa-body", fn.Nbody)
- fdumplist(astBuf, "buildssa-exit", fn.Func.Exit)
+ ir.FDumpList(astBuf, "buildssa-enter", fn.Func().Enter)
+ ir.FDumpList(astBuf, "buildssa-body", fn.Body())
+ ir.FDumpList(astBuf, "buildssa-exit", fn.Func().Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
@@ -305,11 +308,11 @@ func buildssa(fn *Node, worker int) *ssa.Func {
}
var s state
- s.pushLine(fn.Pos)
+ s.pushLine(fn.Pos())
defer s.popLine()
- s.hasdefer = fn.Func.HasDefer()
- if fn.Func.Pragma&CgoUnsafeArgs != 0 {
+ s.hasdefer = fn.Func().HasDefer()
+ if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
@@ -321,14 +324,14 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
- s.f.Type = fn.Type
+ s.f.Type = fn.Type()
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
- if fn.Func.Pragma&Nosplit != 0 {
+ if fn.Func().Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.panics = map[funcLine]*ssa.Block{}
@@ -336,12 +339,12 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
- s.f.Entry.Pos = fn.Pos
+ s.f.Entry.Pos = fn.Pos()
if printssa {
ssaDF := ssaDumpFile
if ssaDir != "" {
- ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html")
+ ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
ssaD := filepath.Dir(ssaDF)
os.MkdirAll(ssaD, 0755)
}
@@ -353,20 +356,20 @@ func buildssa(fn *Node, worker int) *ssa.Func {
// Allocate starting values
s.labels = map[string]*ssaLabel{}
- s.labeledNodes = map[*Node]*ssaLabel{}
- s.fwdVars = map[*Node]*ssa.Value{}
+ s.labeledNodes = map[ir.Node]*ssaLabel{}
+ s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
- s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
+ s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed()
switch {
- case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
+ case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that is generated by gencallret()
// that we don't track correctly.
s.hasOpenDefers = false
}
- if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 {
+ if s.hasOpenDefers && s.curfn.Func().Exit.Len() > 0 {
// Skip doing open defers if there is any extra exit code (likely
// copying heap-allocated return values or race detection), since
// we will not generate that code in the case of the extra
@@ -374,7 +377,7 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.hasOpenDefers = false
}
if s.hasOpenDefers &&
- s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 {
+ s.curfn.Func().NumReturns*s.curfn.Func().NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
@@ -383,54 +386,54 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.hasOpenDefers = false
}
- s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
- s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
+ s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
s.startBlock(s.f.Entry)
- s.vars[&memVar] = s.startmem
+ s.vars[memVar] = s.startmem
if s.hasOpenDefers {
// Create the deferBits variable and stack slot. deferBits is a
// bitmask showing which of the open-coded defers in this function
// have been activated.
- deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8])
+ deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
s.deferBitsTemp = deferBitsTemp
// For this value, AuxInt is initialized to zero by default
- startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8])
- s.vars[&deferBitsVar] = startDeferBits
+ startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
+ s.vars[deferBitsVar] = startDeferBits
s.deferBitsAddr = s.addr(deferBitsTemp)
- s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
// Make sure that the deferBits stack slot is kept alive (for use
// by panics) and stores to deferBits are not eliminated, even if
// all checking code on deferBits in the function exit can be
// eliminated, because the defer statements were all
// unconditional.
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
}
// Generate addresses of local declarations
- s.decladdrs = map[*Node]*ssa.Value{}
+ s.decladdrs = map[ir.Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Func().Dcl {
switch n.Class() {
- case PPARAM:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
- case PPARAMOUT:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
+ case ir.PPARAM:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())})
+ case ir.PPARAMOUT:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())})
if s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
- case PAUTO:
+ case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
- case PAUTOHEAP:
+ case ir.PAUTOHEAP:
// moved to heap - already handled by frontend
- case PFUNC:
+ case ir.PFUNC:
// local function - already handled by frontend
default:
s.Fatalf("local variable with class %v unimplemented", n.Class())
@@ -438,21 +441,21 @@ func buildssa(fn *Node, worker int) *ssa.Func {
}
// Populate SSAable arguments.
- for _, n := range fn.Func.Dcl {
- if n.Class() == PPARAM && s.canSSA(n) {
- v := s.newValue0A(ssa.OpArg, n.Type, n)
+ for _, n := range fn.Func().Dcl {
+ if n.Class() == ir.PPARAM && s.canSSA(n) {
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
}
}
// Convert the AST-based IR to the SSA-based IR
- s.stmtList(fn.Func.Enter)
- s.stmtList(fn.Nbody)
+ s.stmtList(fn.Func().Enter)
+ s.stmtList(fn.Body())
// fallthrough to exit
if s.curBlock != nil {
- s.pushLine(fn.Func.Endlineno)
+ s.pushLine(fn.Func().Endlineno)
s.exit()
s.popLine()
}
@@ -475,10 +478,10 @@ func buildssa(fn *Node, worker int) *ssa.Func {
return s.f
}
-func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) {
// Read sources of target function fn.
- fname := Ctxt.PosTable.Pos(fn.Pos).Filename()
- targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line())
+ fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
+ targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
@@ -487,14 +490,14 @@ func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
var elno src.XPos
- if fi.Name.Defn == nil {
+ if fi.Name().Defn == nil {
// Endlineno is filled from exported data.
- elno = fi.Func.Endlineno
+ elno = fi.Func().Endlineno
} else {
- elno = fi.Name.Defn.Func.Endlineno
+ elno = fi.Name().Defn.Func().Endlineno
}
- fname := Ctxt.PosTable.Pos(fi.Pos).Filename()
- fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line())
+ fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
+ fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
continue
@@ -563,24 +566,24 @@ func (s *state) updateUnsetPredPos(b *ssa.Block) {
// Information about each open-coded defer.
type openDeferInfo struct {
// The ODEFER node representing the function call of the defer
- n *Node
+ n ir.Node
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
- closureNode *Node
+ closureNode ir.Node
// If defer call is interface call, the address of the argtmp where the
// receiver is stored
rcvr *ssa.Value
// The node representing the argtmp where the receiver is stored
- rcvrNode *Node
+ rcvrNode ir.Node
// The addresses of the argtmps where the evaluated arguments of the defer
// function call are stored.
argVals []*ssa.Value
// The nodes representing the argtmps where the args of the defer are stored
- argNodes []*Node
+ argNodes []ir.Node
}
type state struct {
@@ -591,11 +594,11 @@ type state struct {
f *ssa.Func
// Node for function
- curfn *Node
+ curfn ir.Node
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
- labeledNodes map[*Node]*ssaLabel
+ labeledNodes map[ir.Node]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
@@ -607,18 +610,18 @@ type state struct {
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
- vars map[*Node]*ssa.Value
+ vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
- fwdVars map[*Node]*ssa.Value
+ fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
- defvars []map[*Node]*ssa.Value
+ defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
- decladdrs map[*Node]*ssa.Value
+ decladdrs map[ir.Node]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
@@ -626,7 +629,7 @@ type state struct {
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
- deferBitsTemp *Node
+ deferBitsTemp ir.Node
// line number stack. The current line number is top of stack
line []src.XPos
@@ -638,7 +641,7 @@ type state struct {
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
- returns []*Node
+ returns []ir.Node
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
@@ -690,18 +693,22 @@ func (s *state) Fatalf(msg string, args ...interface{}) {
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
+func ssaMarker(name string) ir.Node {
+ return NewName(&types.Sym{Name: name})
+}
+
var (
- // dummy node for the memory variable
- memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
-
- // dummy nodes for temporary variables
- ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
- lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
- newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
- capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
- typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
- okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
- deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}}
+ // marker node for the memory variable
+ memVar = ssaMarker("mem")
+
+ // marker nodes for temporary variables
+ ptrVar = ssaMarker("ptr")
+ lenVar = ssaMarker("len")
+ newlenVar = ssaMarker("newlen")
+ capVar = ssaMarker("cap")
+ typVar = ssaMarker("typ")
+ okVar = ssaMarker("ok")
+ deferBitsVar = ssaMarker("deferBits")
)
// startBlock sets the current block we're generating code in to b.
@@ -710,7 +717,7 @@ func (s *state) startBlock(b *ssa.Block) {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
- s.vars = map[*Node]*ssa.Value{}
+ s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
@@ -747,8 +754,8 @@ func (s *state) pushLine(line src.XPos) {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
- if Debug.K != 0 {
- Warn("buildssa: unknown position (line 0)")
+ if base.Flag.K != 0 {
+ base.Warn("buildssa: unknown position (line 0)")
}
} else {
s.lastPos = line
@@ -914,7 +921,7 @@ func (s *state) constEmptyString(t *types.Type) *ssa.Value {
return s.f.ConstEmptyString(t)
}
func (s *state) constBool(c bool) *ssa.Value {
- return s.f.ConstBool(types.Types[TBOOL], c)
+ return s.f.ConstBool(types.Types[types.TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(t, c)
@@ -967,7 +974,7 @@ func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Valu
}
func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
- if !s.curfn.Func.InstrumentBody() {
+ if !s.curfn.Func().InstrumentBody() {
return
}
@@ -983,13 +990,13 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
var fn *obj.LSym
needWidth := false
- if flag_msan {
+ if base.Flag.MSan {
fn = msanread
if wr {
fn = msanwrite
}
needWidth = true
- } else if flag_race && t.NumComponents(types.CountBlankFields) > 1 {
+ } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
// for composite objects we have to write every address
// because a write might happen to any subobject.
// composites with only one element don't have subobjects, though.
@@ -998,7 +1005,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
fn = racewriterange
}
needWidth = true
- } else if flag_race {
+ } else if base.Flag.Race {
// for non-composite objects we can write just the start
// address, as any write must write the first byte.
fn = raceread
@@ -1011,7 +1018,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
args := []*ssa.Value{addr}
if needWidth {
- args = append(args, s.constInt(types.Types[TUINTPTR], w))
+ args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
}
s.rtcall(fn, true, nil, args...)
}
@@ -1026,14 +1033,14 @@ func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
}
func (s *state) store(t *types.Type, dst, val *ssa.Value) {
- s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
+ s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
}
func (s *state) zero(t *types.Type, dst *ssa.Value) {
s.instrument(t, dst, true)
store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
store.Aux = t
- s.vars[&memVar] = store
+ s.vars[memVar] = store
}
func (s *state) move(t *types.Type, dst, src *ssa.Value) {
@@ -1041,52 +1048,52 @@ func (s *state) move(t *types.Type, dst, src *ssa.Value) {
s.instrument(t, dst, true)
store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
store.Aux = t
- s.vars[&memVar] = store
+ s.vars[memVar] = store
}
// stmtList converts the statement list n to SSA and adds it to s.
-func (s *state) stmtList(l Nodes) {
+func (s *state) stmtList(l ir.Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
-func (s *state) stmt(n *Node) {
- if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
+func (s *state) stmt(n ir.Node) {
+ if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
- if s.curBlock == nil && n.Op != OLABEL {
+ if s.curBlock == nil && n.Op() != ir.OLABEL {
return
}
- s.stmtList(n.Ninit)
- switch n.Op {
+ s.stmtList(n.Init())
+ switch n.Op() {
- case OBLOCK:
- s.stmtList(n.List)
+ case ir.OBLOCK:
+ s.stmtList(n.List())
// No-ops
- case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
+ case ir.OEMPTY, ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
// Expression statements
- case OCALLFUNC:
+ case ir.OCALLFUNC:
if isIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
- case OCALLMETH, OCALLINTER:
+ case ir.OCALLMETH, ir.OCALLINTER:
s.callResult(n, callNormal)
- if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
- if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
- n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC {
+ if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
@@ -1096,34 +1103,34 @@ func (s *state) stmt(n *Node) {
// go through SSA.
}
}
- case ODEFER:
- if Debug_defer > 0 {
+ case ir.ODEFER:
+ if base.Debug.Defer > 0 {
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
- } else if n.Esc == EscNever {
+ } else if n.Esc() == EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
}
- Warnl(n.Pos, "%s defer", defertype)
+ base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
- s.openDeferRecord(n.Left)
+ s.openDeferRecord(n.Left())
} else {
d := callDefer
- if n.Esc == EscNever {
+ if n.Esc() == EscNever {
d = callDeferStack
}
- s.callResult(n.Left, d)
+ s.callResult(n.Left(), d)
}
- case OGO:
- s.callResult(n.Left, callGo)
+ case ir.OGO:
+ s.callResult(n.Left(), callGo)
- case OAS2DOTTYPE:
- res, resok := s.dottype(n.Right, true)
+ case ir.OAS2DOTTYPE:
+ res, resok := s.dottype(n.Right(), true)
deref := false
- if !canSSAType(n.Right.Type) {
+ if !canSSAType(n.Right().Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
@@ -1137,33 +1144,33 @@ func (s *state) stmt(n *Node) {
deref = true
res = res.Args[0]
}
- s.assign(n.List.First(), res, deref, 0)
- s.assign(n.List.Second(), resok, false, 0)
+ s.assign(n.List().First(), res, deref, 0)
+ s.assign(n.List().Second(), resok, false, 0)
return
- case OAS2FUNC:
+ case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
- if !isIntrinsicCall(n.Right) {
- s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right)
- }
- v := s.intrinsicCall(n.Right)
- v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
- v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
- s.assign(n.List.First(), v1, false, 0)
- s.assign(n.List.Second(), v2, false, 0)
+ if !isIntrinsicCall(n.Right()) {
+ s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right())
+ }
+ v := s.intrinsicCall(n.Right())
+ v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v)
+ v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v)
+ s.assign(n.List().First(), v1, false, 0)
+ s.assign(n.List().Second(), v2, false, 0)
return
- case ODCL:
- if n.Left.Class() == PAUTOHEAP {
+ case ir.ODCL:
+ if n.Left().Class() == ir.PAUTOHEAP {
s.Fatalf("DCL %v", n)
}
- case OLABEL:
- sym := n.Sym
+ case ir.OLABEL:
+ sym := n.Sym()
lab := s.label(sym)
// Associate label with its control flow node, if any
- if ctl := n.labeledControl(); ctl != nil {
+ if ctl := labeledControl(n); ctl != nil {
s.labeledNodes[ctl] = lab
}
@@ -1180,8 +1187,8 @@ func (s *state) stmt(n *Node) {
}
s.startBlock(lab.target)
- case OGOTO:
- sym := n.Sym
+ case ir.OGOTO:
+ sym := n.Sym()
lab := s.label(sym)
if lab.target == nil {
@@ -1192,8 +1199,8 @@ func (s *state) stmt(n *Node) {
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(lab.target)
- case OAS:
- if n.Left == n.Right && n.Left.Op == ONAME {
+ case ir.OAS:
+ if n.Left() == n.Right() && n.Left().Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
@@ -1205,10 +1212,10 @@ func (s *state) stmt(n *Node) {
}
// Evaluate RHS.
- rhs := n.Right
+ rhs := n.Right()
if rhs != nil {
- switch rhs.Op {
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
+ switch rhs.Op() {
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
@@ -1216,31 +1223,31 @@ func (s *state) stmt(n *Node) {
s.Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
- case OAPPEND:
+ case ir.OAPPEND:
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
- if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
+ if !samesafeexpr(n.Left(), rhs.List().First()) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
- if s.canSSA(n.Left) {
- if Debug_append > 0 { // replicating old diagnostic message
- Warnl(n.Pos, "append: len-only update (in local slice)")
+ if s.canSSA(n.Left()) {
+ if base.Debug.Append > 0 { // replicating old diagnostic message
+ base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
break
}
- if Debug_append > 0 {
- Warnl(n.Pos, "append: len-only update")
+ if base.Debug.Append > 0 {
+ base.WarnfAt(n.Pos(), "append: len-only update")
}
s.append(rhs, true)
return
}
}
- if n.Left.isBlank() {
+ if ir.IsBlank(n.Left()) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
@@ -1250,10 +1257,10 @@ func (s *state) stmt(n *Node) {
}
var t *types.Type
- if n.Right != nil {
- t = n.Right.Type
+ if n.Right() != nil {
+ t = n.Right().Type()
} else {
- t = n.Left.Type
+ t = n.Left().Type()
}
var r *ssa.Value
@@ -1273,11 +1280,11 @@ func (s *state) stmt(n *Node) {
}
var skip skipMask
- if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
+ if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.Left(), n.Left()) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
- if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) {
+ if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) {
// [0:...] is the same as [:...]
i = nil
}
@@ -1302,15 +1309,15 @@ func (s *state) stmt(n *Node) {
}
}
- s.assign(n.Left, r, deref, skip)
+ s.assign(n.Left(), r, deref, skip)
- case OIF:
- if Isconst(n.Left, CTBOOL) {
- s.stmtList(n.Left.Ninit)
- if n.Left.BoolVal() {
- s.stmtList(n.Nbody)
+ case ir.OIF:
+ if ir.IsConst(n.Left(), constant.Bool) {
+ s.stmtList(n.Left().Init())
+ if n.Left().BoolVal() {
+ s.stmtList(n.Body())
} else {
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
}
break
}
@@ -1321,64 +1328,64 @@ func (s *state) stmt(n *Node) {
likely = 1
}
var bThen *ssa.Block
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
- s.condBranch(n.Left, bThen, bElse, likely)
+ s.condBranch(n.Left(), bThen, bElse, likely)
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
s.startBlock(bThen)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
s.startBlock(bElse)
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
- case ORETURN:
- s.stmtList(n.List)
+ case ir.ORETURN:
+ s.stmtList(n.List())
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
- case ORETJMP:
- s.stmtList(n.List)
+ case ir.ORETJMP:
+ s.stmtList(n.List())
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = n.Sym.Linksym()
+ b.Aux = n.Sym().Linksym()
- case OCONTINUE, OBREAK:
+ case ir.OCONTINUE, ir.OBREAK:
var to *ssa.Block
- if n.Sym == nil {
+ if n.Sym() == nil {
// plain break/continue
- switch n.Op {
- case OCONTINUE:
+ switch n.Op() {
+ case ir.OCONTINUE:
to = s.continueTo
- case OBREAK:
+ case ir.OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
- sym := n.Sym
+ sym := n.Sym()
lab := s.label(sym)
- switch n.Op {
- case OCONTINUE:
+ switch n.Op() {
+ case ir.OCONTINUE:
to = lab.continueTarget
- case OBREAK:
+ case ir.OBREAK:
to = lab.breakTarget
}
}
@@ -1387,7 +1394,7 @@ func (s *state) stmt(n *Node) {
b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
b.AddEdgeTo(to)
- case OFOR, OFORUNTIL:
+ case ir.OFOR, ir.OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// cond (Left); body (Nbody); incr (Right)
//
@@ -1399,16 +1406,16 @@ func (s *state) stmt(n *Node) {
bEnd := s.f.NewBlock(ssa.BlockPlain)
// ensure empty for loops have correct position; issue #30167
- bBody.Pos = n.Pos
+ bBody.Pos = n.Pos()
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
- if n.Op == OFOR {
+ if n.Op() == ir.OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
- if n.Left != nil {
- s.condBranch(n.Left, bBody, bEnd, 1)
+ if n.Left() != nil {
+ s.condBranch(n.Left(), bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
@@ -1433,7 +1440,7 @@ func (s *state) stmt(n *Node) {
// generate body
s.startBlock(bBody)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// tear down continue/break
s.continueTo = prevContinue
@@ -1450,15 +1457,15 @@ func (s *state) stmt(n *Node) {
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
- if n.Right != nil {
- s.stmt(n.Right)
+ if n.Right() != nil {
+ s.stmt(n.Right())
}
- if n.Op == OFOR {
+ if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
// It can happen that bIncr ends in a block containing only VARKILL,
// and that muddles the debugging experience.
- if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
+ if n.Op() != ir.OFORUNTIL && b.Pos == src.NoXPos {
b.Pos = bCond.Pos
}
}
@@ -1466,16 +1473,16 @@ func (s *state) stmt(n *Node) {
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
- s.condBranch(n.Left, bLateIncr, bEnd, 1)
+ s.condBranch(n.Left(), bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
- s.stmtList(n.List)
+ s.stmtList(n.List())
s.endBlock().AddEdgeTo(bBody)
}
s.startBlock(bEnd)
- case OSWITCH, OSELECT:
+ case ir.OSWITCH, ir.OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
@@ -1489,7 +1496,7 @@ func (s *state) stmt(n *Node) {
}
// generate body code
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
s.breakTo = prevBreak
if lab != nil {
@@ -1506,40 +1513,40 @@ func (s *state) stmt(n *Node) {
}
s.startBlock(bEnd)
- case OVARDEF:
- if !s.canSSA(n.Left) {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
+ case ir.OVARDEF:
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left(), s.mem(), false)
}
- case OVARKILL:
+ case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
- if !s.canSSA(n.Left) {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left(), s.mem(), false)
}
- case OVARLIVE:
+ case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
- if !n.Left.Name.Addrtaken() {
- s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
+ if !n.Left().Name().Addrtaken() {
+ s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left())
}
- switch n.Left.Class() {
- case PAUTO, PPARAM, PPARAMOUT:
+ switch n.Left().Class() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
- s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
+ s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left())
}
- s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left(), s.mem())
- case OCHECKNIL:
- p := s.expr(n.Left)
+ case ir.OCHECKNIL:
+ p := s.expr(n.Left())
s.nilCheck(p)
- case OINLMARK:
- s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem())
+ case ir.OINLMARK:
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem())
default:
- s.Fatalf("unhandled stmt %v", n.Op)
+ s.Fatalf("unhandled stmt %v", n.Op())
}
}
@@ -1569,14 +1576,14 @@ func (s *state) exit() *ssa.Block {
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
- s.stmtList(s.curfn.Func.Exit)
+ s.stmtList(s.curfn.Func().Exit)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
- val := s.variable(n, n.Type)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
- s.store(n.Type, addr, val)
+ val := s.variable(n, n.Type())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.store(n.Type(), addr, val)
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
@@ -1594,180 +1601,180 @@ func (s *state) exit() *ssa.Block {
}
type opAndType struct {
- op Op
+ op ir.Op
etype types.EType
}
var opToSSA = map[opAndType]ssa.Op{
- opAndType{OADD, TINT8}: ssa.OpAdd8,
- opAndType{OADD, TUINT8}: ssa.OpAdd8,
- opAndType{OADD, TINT16}: ssa.OpAdd16,
- opAndType{OADD, TUINT16}: ssa.OpAdd16,
- opAndType{OADD, TINT32}: ssa.OpAdd32,
- opAndType{OADD, TUINT32}: ssa.OpAdd32,
- opAndType{OADD, TINT64}: ssa.OpAdd64,
- opAndType{OADD, TUINT64}: ssa.OpAdd64,
- opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
- opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
-
- opAndType{OSUB, TINT8}: ssa.OpSub8,
- opAndType{OSUB, TUINT8}: ssa.OpSub8,
- opAndType{OSUB, TINT16}: ssa.OpSub16,
- opAndType{OSUB, TUINT16}: ssa.OpSub16,
- opAndType{OSUB, TINT32}: ssa.OpSub32,
- opAndType{OSUB, TUINT32}: ssa.OpSub32,
- opAndType{OSUB, TINT64}: ssa.OpSub64,
- opAndType{OSUB, TUINT64}: ssa.OpSub64,
- opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
- opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
-
- opAndType{ONOT, TBOOL}: ssa.OpNot,
-
- opAndType{ONEG, TINT8}: ssa.OpNeg8,
- opAndType{ONEG, TUINT8}: ssa.OpNeg8,
- opAndType{ONEG, TINT16}: ssa.OpNeg16,
- opAndType{ONEG, TUINT16}: ssa.OpNeg16,
- opAndType{ONEG, TINT32}: ssa.OpNeg32,
- opAndType{ONEG, TUINT32}: ssa.OpNeg32,
- opAndType{ONEG, TINT64}: ssa.OpNeg64,
- opAndType{ONEG, TUINT64}: ssa.OpNeg64,
- opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F,
- opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F,
-
- opAndType{OBITNOT, TINT8}: ssa.OpCom8,
- opAndType{OBITNOT, TUINT8}: ssa.OpCom8,
- opAndType{OBITNOT, TINT16}: ssa.OpCom16,
- opAndType{OBITNOT, TUINT16}: ssa.OpCom16,
- opAndType{OBITNOT, TINT32}: ssa.OpCom32,
- opAndType{OBITNOT, TUINT32}: ssa.OpCom32,
- opAndType{OBITNOT, TINT64}: ssa.OpCom64,
- opAndType{OBITNOT, TUINT64}: ssa.OpCom64,
-
- opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
- opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
- opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
- opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
-
- opAndType{OMUL, TINT8}: ssa.OpMul8,
- opAndType{OMUL, TUINT8}: ssa.OpMul8,
- opAndType{OMUL, TINT16}: ssa.OpMul16,
- opAndType{OMUL, TUINT16}: ssa.OpMul16,
- opAndType{OMUL, TINT32}: ssa.OpMul32,
- opAndType{OMUL, TUINT32}: ssa.OpMul32,
- opAndType{OMUL, TINT64}: ssa.OpMul64,
- opAndType{OMUL, TUINT64}: ssa.OpMul64,
- opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
- opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
-
- opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
- opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
-
- opAndType{ODIV, TINT8}: ssa.OpDiv8,
- opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
- opAndType{ODIV, TINT16}: ssa.OpDiv16,
- opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
- opAndType{ODIV, TINT32}: ssa.OpDiv32,
- opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
- opAndType{ODIV, TINT64}: ssa.OpDiv64,
- opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
-
- opAndType{OMOD, TINT8}: ssa.OpMod8,
- opAndType{OMOD, TUINT8}: ssa.OpMod8u,
- opAndType{OMOD, TINT16}: ssa.OpMod16,
- opAndType{OMOD, TUINT16}: ssa.OpMod16u,
- opAndType{OMOD, TINT32}: ssa.OpMod32,
- opAndType{OMOD, TUINT32}: ssa.OpMod32u,
- opAndType{OMOD, TINT64}: ssa.OpMod64,
- opAndType{OMOD, TUINT64}: ssa.OpMod64u,
-
- opAndType{OAND, TINT8}: ssa.OpAnd8,
- opAndType{OAND, TUINT8}: ssa.OpAnd8,
- opAndType{OAND, TINT16}: ssa.OpAnd16,
- opAndType{OAND, TUINT16}: ssa.OpAnd16,
- opAndType{OAND, TINT32}: ssa.OpAnd32,
- opAndType{OAND, TUINT32}: ssa.OpAnd32,
- opAndType{OAND, TINT64}: ssa.OpAnd64,
- opAndType{OAND, TUINT64}: ssa.OpAnd64,
-
- opAndType{OOR, TINT8}: ssa.OpOr8,
- opAndType{OOR, TUINT8}: ssa.OpOr8,
- opAndType{OOR, TINT16}: ssa.OpOr16,
- opAndType{OOR, TUINT16}: ssa.OpOr16,
- opAndType{OOR, TINT32}: ssa.OpOr32,
- opAndType{OOR, TUINT32}: ssa.OpOr32,
- opAndType{OOR, TINT64}: ssa.OpOr64,
- opAndType{OOR, TUINT64}: ssa.OpOr64,
-
- opAndType{OXOR, TINT8}: ssa.OpXor8,
- opAndType{OXOR, TUINT8}: ssa.OpXor8,
- opAndType{OXOR, TINT16}: ssa.OpXor16,
- opAndType{OXOR, TUINT16}: ssa.OpXor16,
- opAndType{OXOR, TINT32}: ssa.OpXor32,
- opAndType{OXOR, TUINT32}: ssa.OpXor32,
- opAndType{OXOR, TINT64}: ssa.OpXor64,
- opAndType{OXOR, TUINT64}: ssa.OpXor64,
-
- opAndType{OEQ, TBOOL}: ssa.OpEqB,
- opAndType{OEQ, TINT8}: ssa.OpEq8,
- opAndType{OEQ, TUINT8}: ssa.OpEq8,
- opAndType{OEQ, TINT16}: ssa.OpEq16,
- opAndType{OEQ, TUINT16}: ssa.OpEq16,
- opAndType{OEQ, TINT32}: ssa.OpEq32,
- opAndType{OEQ, TUINT32}: ssa.OpEq32,
- opAndType{OEQ, TINT64}: ssa.OpEq64,
- opAndType{OEQ, TUINT64}: ssa.OpEq64,
- opAndType{OEQ, TINTER}: ssa.OpEqInter,
- opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
- opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
- opAndType{OEQ, TMAP}: ssa.OpEqPtr,
- opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
- opAndType{OEQ, TPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
- opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
- opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
-
- opAndType{ONE, TBOOL}: ssa.OpNeqB,
- opAndType{ONE, TINT8}: ssa.OpNeq8,
- opAndType{ONE, TUINT8}: ssa.OpNeq8,
- opAndType{ONE, TINT16}: ssa.OpNeq16,
- opAndType{ONE, TUINT16}: ssa.OpNeq16,
- opAndType{ONE, TINT32}: ssa.OpNeq32,
- opAndType{ONE, TUINT32}: ssa.OpNeq32,
- opAndType{ONE, TINT64}: ssa.OpNeq64,
- opAndType{ONE, TUINT64}: ssa.OpNeq64,
- opAndType{ONE, TINTER}: ssa.OpNeqInter,
- opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
- opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
- opAndType{ONE, TMAP}: ssa.OpNeqPtr,
- opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
- opAndType{ONE, TPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
- opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
- opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
-
- opAndType{OLT, TINT8}: ssa.OpLess8,
- opAndType{OLT, TUINT8}: ssa.OpLess8U,
- opAndType{OLT, TINT16}: ssa.OpLess16,
- opAndType{OLT, TUINT16}: ssa.OpLess16U,
- opAndType{OLT, TINT32}: ssa.OpLess32,
- opAndType{OLT, TUINT32}: ssa.OpLess32U,
- opAndType{OLT, TINT64}: ssa.OpLess64,
- opAndType{OLT, TUINT64}: ssa.OpLess64U,
- opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
- opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
-
- opAndType{OLE, TINT8}: ssa.OpLeq8,
- opAndType{OLE, TUINT8}: ssa.OpLeq8U,
- opAndType{OLE, TINT16}: ssa.OpLeq16,
- opAndType{OLE, TUINT16}: ssa.OpLeq16U,
- opAndType{OLE, TINT32}: ssa.OpLeq32,
- opAndType{OLE, TUINT32}: ssa.OpLeq32U,
- opAndType{OLE, TINT64}: ssa.OpLeq64,
- opAndType{OLE, TUINT64}: ssa.OpLeq64U,
- opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
- opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
+ opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
+ opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
+
+ opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
+ opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
+
+ opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
+
+ opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
+ opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
+
+ opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
+ opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
+
+ opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
+ opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
+ opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
+ opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
+
+ opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
+ opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
+
+ opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
+ opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
+
+ opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
+ opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
+ opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
+ opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
+ opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
+ opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
+ opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
+ opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
+
+ opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
+ opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
+ opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
+ opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
+ opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
+ opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
+ opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
+ opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
+
+ opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
+ opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
+
+ opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
+ opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
+
+ opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
+ opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
+
+ opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
+ opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
+ opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
+ opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
+ opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
+
+ opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
+ opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
+ opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
+ opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
+ opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
+
+ opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
+ opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
+ opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
+ opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
+ opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
+ opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
+ opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
+ opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
+ opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
+ opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
+
+ opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
+ opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
+ opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
+ opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
+ opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
+ opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
+ opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
+ opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
+ opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
+ opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
}
func (s *state) concreteEtype(t *types.Type) types.EType {
@@ -1775,25 +1782,25 @@ func (s *state) concreteEtype(t *types.Type) types.EType {
switch e {
default:
return e
- case TINT:
+ case types.TINT:
if s.config.PtrSize == 8 {
- return TINT64
+ return types.TINT64
}
- return TINT32
- case TUINT:
+ return types.TINT32
+ case types.TUINT:
if s.config.PtrSize == 8 {
- return TUINT64
+ return types.TUINT64
}
- return TUINT32
- case TUINTPTR:
+ return types.TUINT32
+ case types.TUINTPTR:
if s.config.PtrSize == 8 {
- return TUINT64
+ return types.TUINT64
}
- return TUINT32
+ return types.TUINT32
}
}
-func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
+func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
@@ -1804,28 +1811,28 @@ func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
func floatForComplex(t *types.Type) *types.Type {
switch t.Etype {
- case TCOMPLEX64:
- return types.Types[TFLOAT32]
- case TCOMPLEX128:
- return types.Types[TFLOAT64]
+ case types.TCOMPLEX64:
+ return types.Types[types.TFLOAT32]
+ case types.TCOMPLEX128:
+ return types.Types[types.TFLOAT64]
}
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
return nil
}
func complexForFloat(t *types.Type) *types.Type {
switch t.Etype {
- case TFLOAT32:
- return types.Types[TCOMPLEX64]
- case TFLOAT64:
- return types.Types[TCOMPLEX128]
+ case types.TFLOAT32:
+ return types.Types[types.TCOMPLEX64]
+ case types.TFLOAT64:
+ return types.Types[types.TCOMPLEX128]
}
- Fatalf("unexpected type: %v", t)
+ base.Fatalf("unexpected type: %v", t)
return nil
}
type opAndTwoTypes struct {
- op Op
+ op ir.Op
etype1 types.EType
etype2 types.EType
}
@@ -1843,145 +1850,145 @@ type twoOpsAndType struct {
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
- twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
- twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
-
- twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
- twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
-
- twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
- twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
-
- twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
- twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
+ twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
+
+ twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
+
+ twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
+
+ twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
// unsigned
- twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
- twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
- twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
-
- twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
- twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
- twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
-
- twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
- twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
-
- twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
- twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
- twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
- twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
+ twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
+
+ twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
+
+ twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
+
+ twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
- twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
- twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
- twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
- twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
+ twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
+ twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
- twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
- twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
- twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
- twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
}
// uint64<->float conversions, only on machines that have instructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
- twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
- twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
- twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
- twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
- opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
- opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
- opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
- opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
- opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
- opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
- opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
- opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
-
- opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
- opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
- opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
- opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
- opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
- opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
- opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
- opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
-
- opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
- opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
- opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
- opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
- opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
- opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
- opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
- opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
-
- opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
- opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
- opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
- opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
- opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
- opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
- opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
- opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
-
- opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
- opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
- opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
- opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
- opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
- opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
- opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
- opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
-
- opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
- opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
- opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
- opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
- opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
- opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
- opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
- opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
-
- opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
- opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
- opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
- opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
- opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
- opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
- opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
- opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
-
- opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
- opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
- opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
- opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
- opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
- opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
- opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
- opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
-}
-
-func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
@@ -1992,117 +1999,121 @@ func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
-func (s *state) expr(n *Node) *ssa.Value {
- if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
+func (s *state) expr(n ir.Node) *ssa.Value {
+ if hasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
- s.stmtList(n.Ninit)
- switch n.Op {
- case OBYTES2STRTMP:
- slice := s.expr(n.Left)
+ s.stmtList(n.Init())
+ switch n.Op() {
+ case ir.OBYTES2STRTMP:
+ slice := s.expr(n.Left())
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
- return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
- case OSTR2BYTESTMP:
- str := s.expr(n.Left)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
+ case ir.OSTR2BYTESTMP:
+ str := s.expr(n.Left())
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
- return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
- case OCFUNC:
- aux := n.Left.Sym.Linksym()
- return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
- case ONAME:
- if n.Class() == PFUNC {
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
+ case ir.OCFUNC:
+ aux := n.Left().Sym().Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
+ case ir.OMETHEXPR:
+ sym := funcsym(n.Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC {
// "value" of a function is the address of the function's closure
- sym := funcsym(n.Sym).Linksym()
- return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
+ sym := funcsym(n.Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
addr := s.addr(n)
- return s.load(n.Type, addr)
- case OCLOSUREVAR:
+ return s.load(n.Type(), addr)
+ case ir.OCLOSUREVAR:
addr := s.addr(n)
- return s.load(n.Type, addr)
- case OLITERAL:
- switch u := n.Val().U.(type) {
- case *Mpint:
- i := u.Int64()
- switch n.Type.Size() {
+ return s.load(n.Type(), addr)
+ case ir.ONIL:
+ t := n.Type()
+ switch {
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsInterface():
+ return s.constInterface(t)
+ default:
+ return s.constNil(t)
+ }
+ case ir.OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.Int:
+ i := ir.Int64Val(n.Type(), u)
+ switch n.Type().Size() {
case 1:
- return s.constInt8(n.Type, int8(i))
+ return s.constInt8(n.Type(), int8(i))
case 2:
- return s.constInt16(n.Type, int16(i))
+ return s.constInt16(n.Type(), int16(i))
case 4:
- return s.constInt32(n.Type, int32(i))
+ return s.constInt32(n.Type(), int32(i))
case 8:
- return s.constInt64(n.Type, i)
+ return s.constInt64(n.Type(), i)
default:
- s.Fatalf("bad integer size %d", n.Type.Size())
+ s.Fatalf("bad integer size %d", n.Type().Size())
return nil
}
- case string:
- if u == "" {
- return s.constEmptyString(n.Type)
+ case constant.String:
+ i := constant.StringVal(u)
+ if i == "" {
+ return s.constEmptyString(n.Type())
}
- return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
- case bool:
- return s.constBool(u)
- case *NilVal:
- t := n.Type
- switch {
- case t.IsSlice():
- return s.constSlice(t)
- case t.IsInterface():
- return s.constInterface(t)
- default:
- return s.constNil(t)
- }
- case *Mpflt:
- switch n.Type.Size() {
+ return s.entryNewValue0A(ssa.OpConstString, n.Type(), i)
+ case constant.Bool:
+ return s.constBool(constant.BoolVal(u))
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch n.Type().Size() {
case 4:
- return s.constFloat32(n.Type, u.Float32())
+ return s.constFloat32(n.Type(), f)
case 8:
- return s.constFloat64(n.Type, u.Float64())
+ return s.constFloat64(n.Type(), f)
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad float size %d", n.Type().Size())
return nil
}
- case *Mpcplx:
- r := &u.Real
- i := &u.Imag
- switch n.Type.Size() {
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch n.Type().Size() {
case 8:
- pt := types.Types[TFLOAT32]
- return s.newValue2(ssa.OpComplexMake, n.Type,
- s.constFloat32(pt, r.Float32()),
- s.constFloat32(pt, i.Float32()))
+ pt := types.Types[types.TFLOAT32]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat32(pt, re),
+ s.constFloat32(pt, im))
case 16:
- pt := types.Types[TFLOAT64]
- return s.newValue2(ssa.OpComplexMake, n.Type,
- s.constFloat64(pt, r.Float64()),
- s.constFloat64(pt, i.Float64()))
+ pt := types.Types[types.TFLOAT64]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat64(pt, re),
+ s.constFloat64(pt, im))
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad complex size %d", n.Type().Size())
return nil
}
-
default:
- s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
+ s.Fatalf("unhandled OLITERAL %v", u.Kind())
return nil
}
- case OCONVNOP:
- to := n.Type
- from := n.Left.Type
+ case ir.OCONVNOP:
+ to := n.Type()
+ from := n.Left().Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
- x := s.expr(n.Left)
+ x := s.expr(n.Left())
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
@@ -2115,7 +2126,7 @@ func (s *state) expr(n *Node) *ssa.Value {
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
- if to.Etype == TFUNC && from.IsPtrShaped() {
+ if to.Etype == types.TFUNC && from.IsPtrShaped() {
return v
}
@@ -2130,7 +2141,7 @@ func (s *state) expr(n *Node) *ssa.Value {
}
// map <--> *hmap
- if to.Etype == TMAP && from.IsPtr() &&
+ if to.Etype == types.TMAP && from.IsPtr() &&
to.MapType().Hmap == from.Elem() {
return v
}
@@ -2161,13 +2172,13 @@ func (s *state) expr(n *Node) *ssa.Value {
// integer, same width, same sign
return v
- case OCONV:
- x := s.expr(n.Left)
- ft := n.Left.Type // from type
- tt := n.Type // to type
- if ft.IsBoolean() && tt.IsKind(TUINT8) {
+ case ir.OCONV:
+ x := s.expr(n.Left())
+ ft := n.Left().Type() // from type
+ tt := n.Type() // to type
+ if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
- return s.newValue1(ssa.OpCopy, n.Type, x)
+ return s.newValue1(ssa.OpCopy, n.Type(), x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
@@ -2228,7 +2239,7 @@ func (s *state) expr(n *Node) *ssa.Value {
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
- return s.newValue1(op, n.Type, x)
+ return s.newValue1(op, n.Type(), x)
}
if ft.IsFloat() || tt.IsFloat() {
@@ -2275,12 +2286,12 @@ func (s *state) expr(n *Node) *ssa.Value {
if op2 == ssa.OpCopy {
return x
}
- return s.newValueOrSfCall1(op2, n.Type, x)
+ return s.newValueOrSfCall1(op2, n.Type(), x)
}
if op2 == ssa.OpCopy {
- return s.newValueOrSfCall1(op1, n.Type, x)
+ return s.newValueOrSfCall1(op1, n.Type(), x)
}
- return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
+ return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
@@ -2329,56 +2340,56 @@ func (s *state) expr(n *Node) *ssa.Value {
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
- s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
+ s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Etype, n.Type().Etype)
return nil
- case ODOTTYPE:
+ case ir.ODOTTYPE:
res, _ := s.dottype(n, false)
return res
// binary ops
- case OLT, OEQ, ONE, OLE, OGE, OGT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Left.Type.IsComplex() {
- pt := floatForComplex(n.Left.Type)
- op := s.ssaOp(OEQ, pt)
- r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
- i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
- c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
- switch n.Op {
- case OEQ:
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Left().Type().IsComplex() {
+ pt := floatForComplex(n.Left().Type())
+ op := s.ssaOp(ir.OEQ, pt)
+ r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
+ switch n.Op() {
+ case ir.OEQ:
return c
- case ONE:
- return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
+ case ir.ONE:
+ return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
default:
- s.Fatalf("ordered complex compare %v", n.Op)
+ s.Fatalf("ordered complex compare %v", n.Op())
}
}
// Convert OGE and OGT into OLE and OLT.
- op := n.Op
+ op := n.Op()
switch op {
- case OGE:
- op, a, b = OLE, b, a
- case OGT:
- op, a, b = OLT, b, a
+ case ir.OGE:
+ op, a, b = ir.OLE, b, a
+ case ir.OGT:
+ op, a, b = ir.OLT, b, a
}
- if n.Left.Type.IsFloat() {
+ if n.Left().Type().IsFloat() {
// float comparison
- return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
+ return s.newValueOrSfCall2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
- return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
- case OMUL:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
+ case ir.OMUL:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
@@ -2400,19 +2411,19 @@ func (s *state) expr(n *Node) *ssa.Value {
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
- case ODIV:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ case ir.ODIV:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
@@ -2420,8 +2431,8 @@ func (s *state) expr(n *Node) *ssa.Value {
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
- wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
@@ -2450,50 +2461,50 @@ func (s *state) expr(n *Node) *ssa.Value {
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.intDivide(n, a, b)
- case OMOD:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ case ir.OMOD:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
return s.intDivide(n, a, b)
- case OADD, OSUB:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
- pt := floatForComplex(n.Type)
- op := s.ssaOp(n.Op, pt)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ case ir.OADD, ir.OSUB:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
+ pt := floatForComplex(n.Type())
+ op := s.ssaOp(n.Op(), pt)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- }
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- case OAND, OOR, OXOR:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
- case OANDNOT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
- return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
- case OLSH, ORSH:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OAND, ir.OOR, ir.OXOR:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OANDNOT:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
+ case ir.OLSH, ir.ORSH:
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
bt := b.Type
if bt.IsSigned() {
- cmp := s.newValue2(s.ssaOp(OLE, bt), types.Types[TBOOL], s.zeroVal(bt), b)
+ cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
s.check(cmp, panicshift)
bt = bt.ToUnsigned()
}
- return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b)
- case OANDAND, OOROR:
+ return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
+ case ir.OANDAND, ir.OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
@@ -2507,7 +2518,7 @@ func (s *state) expr(n *Node) *ssa.Value {
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
- el := s.expr(n.Left)
+ el := s.expr(n.Left())
s.vars[n] = el
b := s.endBlock()
@@ -2520,83 +2531,83 @@ func (s *state) expr(n *Node) *ssa.Value {
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
- if n.Op == OANDAND {
+ if n.Op() == ir.OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
- } else if n.Op == OOROR {
+ } else if n.Op() == ir.OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
- er := s.expr(n.Right)
+ er := s.expr(n.Right())
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
- return s.variable(n, types.Types[TBOOL])
- case OCOMPLEX:
- r := s.expr(n.Left)
- i := s.expr(n.Right)
- return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
+ return s.variable(n, types.Types[types.TBOOL])
+ case ir.OCOMPLEX:
+ r := s.expr(n.Left())
+ i := s.expr(n.Right())
+ return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
- case ONEG:
- a := s.expr(n.Left)
- if n.Type.IsComplex() {
- tp := floatForComplex(n.Type)
- negop := s.ssaOp(n.Op, tp)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ case ir.ONEG:
+ a := s.expr(n.Left())
+ if n.Type().IsComplex() {
+ tp := floatForComplex(n.Type())
+ negop := s.ssaOp(n.Op(), tp)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
- case ONOT, OBITNOT:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
- case OIMAG, OREAL:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
- case OPLUS:
- return s.expr(n.Left)
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.ONOT, ir.OBITNOT:
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.OIMAG, ir.OREAL:
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a)
+ case ir.OPLUS:
+ return s.expr(n.Left())
- case OADDR:
- return s.addr(n.Left)
+ case ir.OADDR:
+ return s.addr(n.Left())
- case ORESULT:
+ case ir.ORESULT:
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
// Do the old thing
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- if canSSAType(n.Type) {
- return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
+ if canSSAType(n.Type()) {
+ return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall)
} else {
- addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
- return s.rawLoad(n.Type, addr)
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall)
+ return s.rawLoad(n.Type(), addr)
}
- case ODEREF:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.load(n.Type, p)
+ case ir.ODEREF:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.load(n.Type(), p)
- case ODOT:
- if n.Left.Op == OSTRUCTLIT {
+ case ir.ODOT:
+ if n.Left().Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
- if !isZero(n.Left) {
- s.Fatalf("literal with nonzero value in SSA: %v", n.Left)
+ if !isZero(n.Left()) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.Left())
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
@@ -2604,110 +2615,110 @@ func (s *state) expr(n *Node) *ssa.Value {
// instrumentation.
if islvalue(n) && !s.canSSA(n) {
p := s.addr(n)
- return s.load(n.Type, p)
+ return s.load(n.Type(), p)
}
- v := s.expr(n.Left)
- return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
+ v := s.expr(n.Left())
+ return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
- case ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
- return s.load(n.Type, p)
+ case ir.ODOTPTR:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+ return s.load(n.Type(), p)
- case OINDEX:
+ case ir.OINDEX:
switch {
- case n.Left.Type.IsString():
- if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
+ case n.Left().Type().IsString():
+ if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
+ return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left().StringVal()[n.Right().Int64Val()])))
}
- a := s.expr(n.Left)
- i := s.expr(n.Right)
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
- if Isconst(n.Right, CTINT) {
- ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
+ if ir.IsConst(n.Right(), constant.Int) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right().Int64Val(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
- return s.load(types.Types[TUINT8], ptr)
- case n.Left.Type.IsSlice():
+ return s.load(types.Types[types.TUINT8], ptr)
+ case n.Left().Type().IsSlice():
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
- case n.Left.Type.IsArray():
- if canSSAType(n.Left.Type) {
+ return s.load(n.Left().Type().Elem(), p)
+ case n.Left().Type().IsArray():
+ if canSSAType(n.Left().Type()) {
// SSA can handle arrays of length at most 1.
- bound := n.Left.Type.NumElem()
- a := s.expr(n.Left)
- i := s.expr(n.Right)
+ bound := n.Left().Type().NumElem()
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
- z := s.constInt(types.Types[TINT], 0)
+ z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
// The return value won't be live, return junk.
- return s.newValue0(ssa.OpUnknown, n.Type)
+ return s.newValue0(ssa.OpUnknown, n.Type())
}
- len := s.constInt(types.Types[TINT], bound)
+ len := s.constInt(types.Types[types.TINT], bound)
s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
- return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
+ return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
+ return s.load(n.Left().Type().Elem(), p)
default:
- s.Fatalf("bad type for index %v", n.Left.Type)
+ s.Fatalf("bad type for index %v", n.Left().Type())
return nil
}
- case OLEN, OCAP:
+ case ir.OLEN, ir.OCAP:
switch {
- case n.Left.Type.IsSlice():
+ case n.Left().Type().IsSlice():
op := ssa.OpSliceLen
- if n.Op == OCAP {
+ if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
- return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
- case n.Left.Type.IsString(): // string; not reachable for OCAP
- return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
- case n.Left.Type.IsMap(), n.Left.Type.IsChan():
- return s.referenceTypeBuiltin(n, s.expr(n.Left))
+ return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsMap(), n.Left().Type().IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.Left()))
default: // array
- return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ return s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
}
- case OSPTR:
- a := s.expr(n.Left)
- if n.Left.Type.IsSlice() {
- return s.newValue1(ssa.OpSlicePtr, n.Type, a)
+ case ir.OSPTR:
+ a := s.expr(n.Left())
+ if n.Left().Type().IsSlice() {
+ return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
- return s.newValue1(ssa.OpStringPtr, n.Type, a)
+ return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
- case OITAB:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpITab, n.Type, a)
+ case ir.OITAB:
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpITab, n.Type(), a)
- case OIDATA:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpIData, n.Type, a)
+ case ir.OIDATA:
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpIData, n.Type(), a)
- case OEFACE:
- tab := s.expr(n.Left)
- data := s.expr(n.Right)
- return s.newValue2(ssa.OpIMake, n.Type, tab, data)
+ case ir.OEFACE:
+ tab := s.expr(n.Left())
+ data := s.expr(n.Right())
+ return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
- case OSLICEHEADER:
- p := s.expr(n.Left)
- l := s.expr(n.List.First())
- c := s.expr(n.List.Second())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ case ir.OSLICEHEADER:
+ p := s.expr(n.Left())
+ l := s.expr(n.List().First())
+ c := s.expr(n.List().Second())
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
- case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
- v := s.expr(n.Left)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
+ v := s.expr(n.Left())
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
@@ -2720,10 +2731,10 @@ func (s *state) expr(n *Node) *ssa.Value {
k = s.expr(max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
- case OSLICESTR:
- v := s.expr(n.Left)
+ case ir.OSLICESTR:
+ v := s.expr(n.Left())
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
@@ -2733,42 +2744,42 @@ func (s *state) expr(n *Node) *ssa.Value {
j = s.expr(high)
}
p, l, _ := s.slice(v, i, j, nil, n.Bounded())
- return s.newValue2(ssa.OpStringMake, n.Type, p, l)
+ return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
- case OCALLFUNC:
+ case ir.OCALLFUNC:
if isIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
- case OCALLINTER, OCALLMETH:
+ case ir.OCALLINTER, ir.OCALLMETH:
return s.callResult(n, callNormal)
- case OGETG:
- return s.newValue1(ssa.OpGetG, n.Type, s.mem())
+ case ir.OGETG:
+ return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
- case OAPPEND:
+ case ir.OAPPEND:
return s.append(n, false)
- case OSTRUCTLIT, OARRAYLIT:
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !isZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
- case ONEWOBJ:
- if n.Type.Elem().Size() == 0 {
- return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb)
+ case ir.ONEWOBJ:
+ if n.Type().Elem().Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb)
}
- typ := s.expr(n.Left)
- vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ)
+ typ := s.expr(n.Left())
+ vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ)
return vv[0]
default:
- s.Fatalf("unhandled expr %v", n.Op)
+ s.Fatalf("unhandled expr %v", n.Op())
return nil
}
}
@@ -2779,7 +2790,7 @@ func (s *state) expr(n *Node) *ssa.Value {
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
-func (s *state) append(n *Node, inplace bool) *ssa.Value {
+func (s *state) append(n ir.Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
@@ -2813,16 +2824,16 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
- et := n.Type.Elem()
+ et := n.Type().Elem()
pt := types.NewPtr(et)
// Evaluate slice
- sn := n.List.First() // the slice node is the first in the list
+ sn := n.List().First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn)
- slice = s.load(n.Type, addr)
+ slice = s.load(n.Type(), addr)
} else {
slice = s.expr(sn)
}
@@ -2832,20 +2843,20 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
- nargs := int64(n.List.Len() - 1)
+ nargs := int64(n.List().Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
- l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
- c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
- nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+ l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
+ nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
- cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[TBOOL], c, nl)
- s.vars[&ptrVar] = p
+ cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
+ s.vars[ptrVar] = p
if !inplace {
- s.vars[&newlenVar] = nl
- s.vars[&capVar] = c
+ s.vars[newlenVar] = nl
+ s.vars[capVar] = c
} else {
- s.vars[&lenVar] = l
+ s.vars[lenVar] = l
}
b := s.endBlock()
@@ -2857,24 +2868,24 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
// Call growslice
s.startBlock(grow)
- taddr := s.expr(n.Left)
- r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
+ taddr := s.expr(n.Left())
+ r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
- if sn.Op == ONAME && sn.Class() != PEXTERN {
+ if sn.Op() == ir.ONAME && sn.Class() != ir.PEXTERN {
// Tell liveness we're about to build a new slice
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr)
- s.store(types.Types[TINT], capaddr, r[2])
+ s.store(types.Types[types.TINT], capaddr, r[2])
s.store(pt, addr, r[0])
// load the value we just stored to avoid having to spill it
- s.vars[&ptrVar] = s.load(pt, addr)
- s.vars[&lenVar] = r[1] // avoid a spill in the fast path
+ s.vars[ptrVar] = s.load(pt, addr)
+ s.vars[lenVar] = r[1] // avoid a spill in the fast path
} else {
- s.vars[&ptrVar] = r[0]
- s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
- s.vars[&capVar] = r[2]
+ s.vars[ptrVar] = r[0]
+ s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
+ s.vars[capVar] = r[2]
}
b = s.endBlock()
@@ -2884,10 +2895,10 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
s.startBlock(assign)
if inplace {
- l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
- nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+ l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
+ nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr)
- s.store(types.Types[TINT], lenaddr, nl)
+ s.store(types.Types[types.TINT], lenaddr, nl)
}
// Evaluate args
@@ -2898,8 +2909,8 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
store bool
}
args := make([]argRec, 0, nargs)
- for _, n := range n.List.Slice()[1:] {
- if canSSAType(n.Type) {
+ for _, n := range n.List().Slice()[1:] {
+ if canSSAType(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
@@ -2907,14 +2918,14 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
}
}
- p = s.variable(&ptrVar, pt) // generates phi for ptr
+ p = s.variable(ptrVar, pt) // generates phi for ptr
if !inplace {
- nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
- c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap
+ nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
+ c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
- addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
+ addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0, true)
} else {
@@ -2922,29 +2933,29 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value {
}
}
- delete(s.vars, &ptrVar)
+ delete(s.vars, ptrVar)
if inplace {
- delete(s.vars, &lenVar)
+ delete(s.vars, lenVar)
return nil
}
- delete(s.vars, &newlenVar)
- delete(s.vars, &capVar)
+ delete(s.vars, newlenVar)
+ delete(s.vars, capVar)
// make result
- return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
-func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
- switch cond.Op {
- case OANDAND:
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
+ switch cond.Op() {
+ case ir.OANDAND:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, mid, no, max8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), mid, no, max8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
@@ -2952,19 +2963,19 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
- case OOROR:
+ case ir.OOROR:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, yes, mid, min8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), yes, mid, min8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
- case ONOT:
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, no, yes, -likely)
+ case ir.ONOT:
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), no, yes, -likely)
return
}
c := s.expr(cond)
@@ -2989,17 +3000,17 @@ const (
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
-func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
- if left.Op == ONAME && left.isBlank() {
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+ if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
- t := left.Type
+ t := left.Type()
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
- if left.Op == ODOT {
+ if left.Op() == ir.ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
@@ -3010,12 +3021,12 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
- t := left.Left.Type
+ t := left.Left().Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
- old := s.expr(left.Left)
+ old := s.expr(left.Left())
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
@@ -3030,23 +3041,23 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
}
// Recursively assign the new value we've made to the base of the dot op.
- s.assign(left.Left, new, false, 0)
+ s.assign(left.Left(), new, false, 0)
// TODO: do we need to update named values here?
return
}
- if left.Op == OINDEX && left.Left.Type.IsArray() {
- s.pushLine(left.Pos)
+ if left.Op() == ir.OINDEX && left.Left().Type().IsArray() {
+ s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
- t := left.Left.Type
+ t := left.Left().Type()
n := t.NumElem()
- i := s.expr(left.Right) // index
+ i := s.expr(left.Right()) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
- z := s.constInt(types.Types[TINT], 0)
+ z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
return
}
@@ -3054,10 +3065,10 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
- len := s.constInt(types.Types[TINT], 1)
+ len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
- s.assign(left.Left, v, false, 0)
+ s.assign(left.Left(), v, false, 0)
return
}
// Update variable assignment.
@@ -3068,8 +3079,8 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
- if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp())
+ if base := clobberBase(left); base.Op() == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
// Left is not ssa-able. Compute its address.
@@ -3080,7 +3091,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
- t = types.Types[TUNSAFEPTR]
+ t = types.Types[types.TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
@@ -3092,7 +3103,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask)
return
}
// Treat as a store.
- s.storeType(t, addr, right, skip, !left.IsAutoTmp())
+ s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
}
// zeroVal returns the zero value for type t.
@@ -3123,10 +3134,10 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value {
case t.IsComplex():
switch t.Size() {
case 8:
- z := s.constFloat32(types.Types[TFLOAT32], 0)
+ z := s.constFloat32(types.Types[types.TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
- z := s.constFloat64(types.Types[TFLOAT64], 0)
+ z := s.constFloat64(types.Types[types.TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
@@ -3180,38 +3191,38 @@ var softFloatOps map[ssa.Op]sfRtCallDef
func softfloatInit() {
// Some of these operations get transformed by sfcall.
softFloatOps = map[ssa.Op]sfRtCallDef{
- ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
- ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
- ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
- ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
- ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
- ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
- ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
- ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
-
- ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
- ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
- ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
- ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
- ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
- ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
- ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL},
- ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL},
-
- ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
- ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32},
- ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
- ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64},
- ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
- ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
- ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
- ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32},
- ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
- ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64},
- ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
- ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
- ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
- ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
+ ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
+ ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
+ ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), types.TFLOAT32},
+ ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), types.TFLOAT64},
+ ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), types.TFLOAT32},
+ ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), types.TFLOAT64},
+ ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), types.TFLOAT32},
+ ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), types.TFLOAT64},
+
+ ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
+ ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
+ ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), types.TBOOL},
+ ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), types.TBOOL},
+ ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), types.TBOOL},
+ ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), types.TBOOL},
+ ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), types.TBOOL},
+ ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), types.TBOOL},
+
+ ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), types.TINT32},
+ ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), types.TINT64},
+ ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), types.TUINT64},
+ ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), types.TINT32},
+ ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), types.TINT64},
+ ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), types.TUINT64},
+ ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), types.TFLOAT32},
}
}
@@ -3227,7 +3238,7 @@ func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
args[0], args[1] = args[1], args[0]
case ssa.OpSub32F,
ssa.OpSub64F:
- args[1] = s.newValue1(s.ssaOp(ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
+ args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
@@ -3243,7 +3254,7 @@ var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
-type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
+type intrinsicBuilder func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
@@ -3308,173 +3319,173 @@ func init() {
/******** runtime ********/
if !instrumenting {
add("runtime", "slicebytetostringtmp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
- return s.newValue2(ssa.OpStringMake, n.Type, args[0], args[1])
+ return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
},
all...)
}
addF("runtime/internal/math", "MulUintptr",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
- return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
- s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
+ s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
- type atomicOpEmitter func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType)
+ type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
- v := s.load(types.Types[TBOOL], addr)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3497,7 +3508,7 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- if rtyp == TNIL {
+ if rtyp == types.TNIL {
return nil
} else {
return s.variable(n, types.Types[rtyp])
@@ -3505,115 +3516,115 @@ func init() {
}
}
- atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Xchg",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xchg64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Xadd64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
- return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
- atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
- v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
- s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
}
addF("runtime/internal/atomic", "Cas",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, TUINT32, TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Cas64",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, TUINT64, TBOOL, atomicCasEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
- atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
- s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
+ atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
addF("runtime/internal/atomic", "And8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "And",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or8",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
addF("runtime/internal/atomic", "Or",
- makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
sys.ARM64)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
@@ -3648,57 +3659,57 @@ func init() {
/******** math ********/
addF("math", "Sqrt",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
addF("math", "Copysign",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.Wasm)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasFMA)
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasFMA)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3711,7 +3722,7 @@ func init() {
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3721,17 +3732,17 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
},
sys.AMD64)
addF("math", "FMA",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
- addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), armHasVFPv4, s.sb)
- v := s.load(types.Types[TBOOL], addr)
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), armHasVFPv4, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3744,7 +3755,7 @@ func init() {
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3754,13 +3765,13 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
},
sys.ARM)
- makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasSSE41)
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3773,7 +3784,7 @@ func init() {
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
- s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
+ s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3783,7 +3794,7 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TFLOAT64])
+ return s.variable(n, types.Types[types.TFLOAT64])
}
}
addF("math", "RoundToEven",
@@ -3801,55 +3812,55 @@ func init() {
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
- c := s.constInt32(types.Types[TUINT32], 1<<16)
- y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<16)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
- c := s.constInt64(types.Types[TUINT64], 1<<16)
- y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<16)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
- c := s.constInt32(types.Types[TUINT32], 1<<8)
- y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
- return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<8)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
- c := s.constInt64(types.Types[TUINT64], 1<<8)
- y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
- return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<8)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
},
sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
@@ -3857,116 +3868,116 @@ func init() {
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
- x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
}
- x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
- return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
- return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
- return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
- makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasPOPCNT)
+ makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
@@ -3983,7 +3994,7 @@ func init() {
if s.config.PtrSize == 4 {
op = op32
}
- s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
+ s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
@@ -3993,67 +4004,67 @@ func init() {
// Merge results.
s.startBlock(bEnd)
- return s.variable(n, types.Types[TINT])
+ return s.variable(n, types.Types[types.TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "Mul64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
addF("math/bits", "Add64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
addF("math/bits", "Sub64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
- cmpZero := s.newValue2(s.ssaOp(ONE, types.Types[TUINT64]), types.Types[TBOOL], args[2], s.zeroVal(types.Types[TUINT64]))
+ cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, panicdivide)
- cmpOverflow := s.newValue2(s.ssaOp(OLT, types.Types[TUINT64]), types.Types[TBOOL], args[0], args[2])
+ cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
s.check(cmpOverflow, panicoverflow)
- return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64)
alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
@@ -4107,8 +4118,8 @@ func init() {
/******** math/big ********/
add("math/big", "mulWW",
- func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
- return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
}
@@ -4120,10 +4131,10 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
return nil
}
pkg := sym.Pkg.Path
- if sym.Pkg == localpkg {
- pkg = myimportpath
+ if sym.Pkg == ir.LocalPkg {
+ pkg = base.Ctxt.Pkgpath
}
- if flag_race && pkg == "sync/atomic" {
+ if base.Flag.Race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
@@ -4145,16 +4156,16 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
-func isIntrinsicCall(n *Node) bool {
- if n == nil || n.Left == nil {
+func isIntrinsicCall(n ir.Node) bool {
+ if n == nil || n.Left() == nil {
return false
}
- return findIntrinsic(n.Left.Sym) != nil
+ return findIntrinsic(n.Left().Sym()) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
-func (s *state) intrinsicCall(n *Node) *ssa.Value {
- v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
+func (s *state) intrinsicCall(n ir.Node) *ssa.Value {
+ v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
@@ -4163,29 +4174,29 @@ func (s *state) intrinsicCall(n *Node) *ssa.Value {
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
- Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Left().Sym().Name, x.LongString())
}
return v
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
-func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
+func (s *state) intrinsicArgs(n ir.Node) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
- temps := map[*Node]*ssa.Value{}
- for _, a := range n.List.Slice() {
- if a.Op != OAS {
- s.Fatalf("non-assignment as a temp function argument %v", a.Op)
+ temps := map[ir.Node]*ssa.Value{}
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OAS {
+ s.Fatalf("non-assignment as a temp function argument %v", a.Op())
}
- l, r := a.Left, a.Right
- if l.Op != ONAME {
- s.Fatalf("non-ONAME temp function argument %v", a.Op)
+ l, r := a.Left(), a.Right()
+ if l.Op() != ir.ONAME {
+ s.Fatalf("non-ONAME temp function argument %v", a.Op())
}
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
}
- args := make([]*ssa.Value, n.Rlist.Len())
- for i, n := range n.Rlist.Slice() {
+ args := make([]*ssa.Value, n.Rlist().Len())
+ for i, n := range n.Rlist().Slice() {
// Store a value to an argument slot.
if x, ok := temps[n]; ok {
// This is a previously computed temporary.
@@ -4204,62 +4215,62 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
// call. We will also record funcdata information on where the args are stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
-func (s *state) openDeferRecord(n *Node) {
+func (s *state) openDeferRecord(n ir.Node) {
// Do any needed expression evaluation for the args (including the
// receiver, if any). This may be evaluating something like 'autotmp_3 =
// once.mutex'. Such a statement will create a mapping in s.vars[] from
// the autotmp name to the evaluated SSA arg value, but won't do any
// stores to the stack.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var args []*ssa.Value
- var argNodes []*Node
+ var argNodes []ir.Node
opendefer := &openDeferInfo{
n: n,
}
- fn := n.Left
- if n.Op == OCALLFUNC {
+ fn := n.Left()
+ if n.Op() == ir.OCALLFUNC {
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
- closure := s.openDeferSave(nil, fn.Type, closureVal)
- opendefer.closureNode = closure.Aux.(*Node)
- if !(fn.Op == ONAME && fn.Class() == PFUNC) {
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(ir.Node)
+ if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) {
opendefer.closure = closure
}
- } else if n.Op == OCALLMETH {
- if fn.Op != ODOTMETH {
- Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
+ } else if n.Op() == ir.OCALLMETH {
+ if fn.Op() != ir.ODOTMETH {
+ base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
closureVal := s.getMethodClosure(fn)
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the method directly.
- closure := s.openDeferSave(nil, fn.Type, closureVal)
- opendefer.closureNode = closure.Aux.(*Node)
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(ir.Node)
} else {
- if fn.Op != ODOTINTER {
- Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ if fn.Op() != ir.ODOTINTER {
+ base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
closure, rcvr := s.getClosureAndRcvr(fn)
opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
- opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*Node)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*Node)
+ opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
+ opendefer.closureNode = opendefer.closure.Aux.(ir.Node)
+ opendefer.rcvrNode = opendefer.rcvr.Aux.(ir.Node)
}
- for _, argn := range n.Rlist.Slice() {
+ for _, argn := range n.Rlist().Slice() {
var v *ssa.Value
- if canSSAType(argn.Type) {
- v = s.openDeferSave(nil, argn.Type, s.expr(argn))
+ if canSSAType(argn.Type()) {
+ v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
- v = s.openDeferSave(argn, argn.Type, nil)
+ v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*Node))
+ argNodes = append(argNodes, v.Aux.(ir.Node))
}
opendefer.argVals = args
opendefer.argNodes = argNodes
@@ -4268,10 +4279,10 @@ func (s *state) openDeferRecord(n *Node) {
// Update deferBits only after evaluation and storage to stack of
// args/receiver/interface is successful.
- bitvalue := s.constInt8(types.Types[TUINT8], 1<<uint(index))
- newDeferBits := s.newValue2(ssa.OpOr8, types.Types[TUINT8], s.variable(&deferBitsVar, types.Types[TUINT8]), bitvalue)
- s.vars[&deferBitsVar] = newDeferBits
- s.store(types.Types[TUINT8], s.deferBitsAddr, newDeferBits)
+ bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
+ newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
+ s.vars[deferBitsVar] = newDeferBits
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
}
// openDeferSave generates SSA nodes to store a value (with type t) for an
@@ -4281,16 +4292,16 @@ func (s *state) openDeferRecord(n *Node) {
// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value {
+func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := canSSAType(t)
var pos src.XPos
if canSSA {
pos = val.Pos
} else {
- pos = n.Pos
+ pos = n.Pos()
}
argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.Name.SetOpenDeferSlot(true)
+ argTemp.Name().SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// removed by dead-store elimination
@@ -4299,16 +4310,16 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
// declared in the entry block, so that it will be live for the
// defer exit code (which will actually access it only if the
// associated defer call has been activated).
- s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
- s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
- addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][&memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
+ addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
- addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
+ addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
}
if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
@@ -4316,7 +4327,7 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
- argTemp.Name.SetNeedzero(true)
+ argTemp.Name().SetNeedzero(true)
}
if !canSSA {
a := s.addr(n)
@@ -4339,7 +4350,7 @@ func (s *state) openDeferExit() {
s.startBlock(deferExit)
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
- zeroval := s.constInt8(types.Types[TUINT8], 0)
+ zeroval := s.constInt8(types.Types[types.TUINT8], 0)
testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
@@ -4347,12 +4358,12 @@ func (s *state) openDeferExit() {
bCond := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
- deferBits := s.variable(&deferBitsVar, types.Types[TUINT8])
+ deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
// Generate code to check if the bit associated with the current
// defer is set.
- bitval := s.constInt8(types.Types[TUINT8], 1<<uint(i))
- andval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, bitval)
- eqVal := s.newValue2(ssa.OpEq8, types.Types[TBOOL], andval, zeroval)
+ bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
+ andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
+ eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(eqVal)
@@ -4363,19 +4374,19 @@ func (s *state) openDeferExit() {
// Clear this bit in deferBits and force store back to stack, so
// we will not try to re-run this defer call if this defer call panics.
- nbitval := s.newValue1(ssa.OpCom8, types.Types[TUINT8], bitval)
- maskedval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, nbitval)
- s.store(types.Types[TUINT8], s.deferBitsAddr, maskedval)
+ nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
+ maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
// Use this value for following tests, so we keep previous
// bits cleared.
- s.vars[&deferBitsVar] = maskedval
+ s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
// closure/receiver/args that were stored in argtmps at the point
// of the defer statement.
- argStart := Ctxt.FixedFrameSize()
- fn := r.n.Left
- stksize := fn.Type.ArgWidth()
+ argStart := base.Ctxt.FixedFrameSize()
+ fn := r.n.Left()
+ stksize := fn.Type().ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
@@ -4383,11 +4394,11 @@ func (s *state) openDeferExit() {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, v)
} else {
- s.store(types.Types[TUINTPTR], addr, v)
+ s.store(types.Types[types.TUINTPTR], addr, v)
}
}
for j, argAddrVal := range r.argVals {
@@ -4416,7 +4427,7 @@ func (s *state) openDeferExit() {
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
- codeptr := s.rawLoad(types.Types[TUINTPTR], v)
+ codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, s.mem())
@@ -4426,7 +4437,7 @@ func (s *state) openDeferExit() {
call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
}
} else {
- aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(fn.Sym().Linksym(), ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
@@ -4438,25 +4449,25 @@ func (s *state) openDeferExit() {
}
call.AuxInt = stksize
if testLateExpansion {
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
// for the deferreturn, so we want all stack slots to be live.
if r.closureNode != nil {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
- if r.rcvrNode.Type.HasPointers() {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
+ if r.rcvrNode.Type().HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
- if argNode.Type.HasPointers() {
- s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
+ if argNode.Type().HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
@@ -4465,42 +4476,42 @@ func (s *state) openDeferExit() {
}
}
-func (s *state) callResult(n *Node, k callKind) *ssa.Value {
+func (s *state) callResult(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, false)
}
-func (s *state) callAddr(n *Node, k callKind) *ssa.Value {
+func (s *state) callAddr(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
-func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
+func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
- fn := n.Left
+ fn := n.Left()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
- res := n.Left.Type.Results()
+ res := n.Left().Type().Results()
if k == callNormal {
nf := res.NumFields()
for i := 0; i < nf; i++ {
fp := res.Field(i)
- ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())})
+ ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + base.Ctxt.FixedFrameSize())})
}
}
testLateExpansion := false
- switch n.Op {
- case OCALLFUNC:
+ switch n.Op() {
+ case ir.OCALLFUNC:
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
- if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
- sym = fn.Sym
+ if k == callNormal && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC {
+ sym = fn.Sym()
break
}
closure = s.expr(fn)
@@ -4509,54 +4520,54 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
- case OCALLMETH:
- if fn.Op != ODOTMETH {
+ case ir.OCALLMETH:
+ if fn.Op() != ir.ODOTMETH {
s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal {
- sym = fn.Sym
+ sym = fn.Sym()
break
}
closure = s.getMethodClosure(fn)
// Note: receiver is already present in n.Rlist, so we don't
// want to set it here.
- case OCALLINTER:
- if fn.Op != ODOTINTER {
- s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ case ir.OCALLINTER:
+ if fn.Op() != ir.ODOTINTER {
+ s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
- codeptr = s.load(types.Types[TUINTPTR], iclosure)
+ codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
} else {
closure = iclosure
}
}
- dowidth(fn.Type)
- stksize := fn.Type.ArgWidth() // includes receiver, args, and results
+ dowidth(fn.Type())
+ stksize := fn.Type().ArgWidth() // includes receiver, args, and results
// Run all assignments of temps.
// The temps are introduced to avoid overwriting argument
// slots when arguments themselves require function calls.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var call *ssa.Value
if k == callDeferStack {
testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
- d := tempAt(n.Pos, s.curfn, t)
+ d := tempAt(n.Pos(), s.curfn, t)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
// 0: siz
- s.store(types.Types[TUINT32],
- s.newValue1I(ssa.OpOffPtr, types.Types[TUINT32].PtrTo(), t.FieldOff(0), addr),
- s.constInt32(types.Types[TUINT32], int32(stksize)))
+ s.store(types.Types[types.TUINT32],
+ s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
+ s.constInt32(types.Types[types.TUINT32], int32(stksize)))
// 1: started, set in deferprocStack
// 2: heap, set in deferprocStack
// 3: openDefer
@@ -4573,17 +4584,17 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// 11: fd
// Then, store all the arguments of the defer call.
- ft := fn.Type
+ ft := fn.Type()
off := t.FieldOff(12)
- args := n.Rlist.Slice()
+ args := n.Rlist().Slice()
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
- s.store(types.Types[TUINTPTR], p, rcvr)
+ s.store(types.Types[types.TUINTPTR], p, rcvr)
}
// Set receiver (for method calls).
- if n.Op == OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
f := ft.Recv()
s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
args = args[1:]
@@ -4595,15 +4606,15 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
}
// Call runtime.deferprocStack with pointer to _defer record.
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
- arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
- s.store(types.Types[TUINTPTR], arg0, addr)
+ arg0 := s.constOffPtrSP(types.Types[types.TUINTPTR], base.Ctxt.FixedFrameSize())
+ s.store(types.Types[types.TUINTPTR], arg0, addr)
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
}
if stksize < int64(Widthptr) {
@@ -4616,24 +4627,24 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
- argStart := Ctxt.FixedFrameSize()
+ argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
// Write argsize and closure (args to newproc/deferproc).
- argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINT32], Offset: int32(argStart)})
+ argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINT32], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, argsize)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
- s.store(types.Types[TUINT32], addr, argsize)
+ s.store(types.Types[types.TUINT32], addr, argsize)
}
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
if testLateExpansion {
callArgs = append(callArgs, closure)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
- s.store(types.Types[TUINTPTR], addr, closure)
+ s.store(types.Types[types.TUINTPTR], addr, closure)
}
stksize += 2 * int64(Widthptr)
argStart += 2 * int64(Widthptr)
@@ -4642,18 +4653,18 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// Set receiver (for interface calls).
if rcvr != nil {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
- ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, rcvr)
} else {
- s.store(types.Types[TUINTPTR], addr, rcvr)
+ s.store(types.Types[types.TUINTPTR], addr, rcvr)
}
}
// Write args.
- t := n.Left.Type
- args := n.Rlist.Slice()
- if n.Op == OCALLMETH {
+ t := n.Left().Type()
+ args := n.Rlist().Slice()
+ if n.Op() == ir.OCALLMETH {
f := t.Recv()
ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
ACArgs = append(ACArgs, ACArg)
@@ -4693,7 +4704,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
// can't always figure that out currently, and it's
// critical that we not clobber any arguments already
// stored onto the stack.
- codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
+ codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
if testLateExpansion {
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
@@ -4718,18 +4729,18 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
}
default:
- s.Fatalf("bad call type %v %v", n.Op, n)
+ s.Fatalf("bad call type %v %v", n.Op(), n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
if testLateExpansion {
s.prevCall = call
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
// Insert OVARLIVE nodes
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// Finish block for defers
if k == callDefer || k == callDeferStack {
@@ -4757,13 +4768,13 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
if testLateExpansion {
return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
}
- return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize())
+ return s.constOffPtrSP(pt, fp.Offset+base.Ctxt.FixedFrameSize())
}
if testLateExpansion {
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
- return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()))
+ return s.load(n.Type(), s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize()))
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
@@ -4777,28 +4788,28 @@ func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
}
// getMethodClosure returns a value representing the closure for a method call
-func (s *state) getMethodClosure(fn *Node) *ssa.Value {
+func (s *state) getMethodClosure(fn ir.Node) *ssa.Value {
// Make a name n2 for the function.
// fn.Sym might be sync.(*Mutex).Unlock.
// Make a PFUNC node out of that, then evaluate it.
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
- n2 := newnamel(fn.Pos, fn.Sym)
- n2.Name.Curfn = s.curfn
- n2.SetClass(PFUNC)
+ n2 := ir.NewNameAt(fn.Pos(), fn.Sym())
+ n2.Name().Curfn = s.curfn
+ n2.SetClass(ir.PFUNC)
// n2.Sym already existed, so it's already marked as a function.
- n2.Pos = fn.Pos
- n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
+ n2.SetPos(fn.Pos())
+ n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it.
return s.expr(n2)
}
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
-func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
- i := s.expr(fn.Left)
- itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
+func (s *state) getClosureAndRcvr(fn ir.Node) (*ssa.Value, *ssa.Value) {
+ i := s.expr(fn.Left())
+ itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
- itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
+ itabidx := fn.Offset() + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
@@ -4808,9 +4819,9 @@ func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e types.EType) int8 {
switch e {
- case TINT8, TINT16, TINT32, TINT64, TINT:
+ case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
return -1
- case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
+ case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
return +1
}
return 0
@@ -4818,25 +4829,25 @@ func etypesign(e types.EType) int8 {
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
-func (s *state) addr(n *Node) *ssa.Value {
- if n.Op != ONAME {
- s.pushLine(n.Pos)
+func (s *state) addr(n ir.Node) *ssa.Value {
+ if n.Op() != ir.ONAME {
+ s.pushLine(n.Pos())
defer s.popLine()
}
- t := types.NewPtr(n.Type)
- switch n.Op {
- case ONAME:
+ t := types.NewPtr(n.Type())
+ switch n.Op() {
+ case ir.ONAME:
switch n.Class() {
- case PEXTERN:
+ case ir.PEXTERN:
// global variable
- v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
+ v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
- if n.Xoffset != 0 {
- v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
+ if n.Offset() != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Offset(), v)
}
return v
- case PPARAM:
+ case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
@@ -4848,10 +4859,10 @@ func (s *state) addr(n *Node) *ssa.Value {
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
- case PAUTO:
- return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp())
+ case ir.PAUTO:
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
- case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
+ case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
@@ -4859,51 +4870,51 @@ func (s *state) addr(n *Node) *ssa.Value {
s.Fatalf("variable address class %v not implemented", n.Class())
return nil
}
- case ORESULT:
+ case ir.ORESULT:
// load return from callee
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
return x
- case OINDEX:
- if n.Left.Type.IsSlice() {
- a := s.expr(n.Left)
- i := s.expr(n.Right)
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
+ case ir.OINDEX:
+ if n.Left().Type().IsSlice() {
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
- a := s.addr(n.Left)
- i := s.expr(n.Right)
- len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ a := s.addr(n.Left())
+ i := s.expr(n.Right())
+ len := s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
- return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
- }
- case ODEREF:
- return s.exprPtr(n.Left, n.Bounded(), n.Pos)
- case ODOT:
- p := s.addr(n.Left)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
- case ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
- case OCLOSUREVAR:
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i)
+ }
+ case ir.ODEREF:
+ return s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ case ir.ODOT:
+ p := s.addr(n.Left())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.ODOTPTR:
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.OCLOSUREVAR:
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(),
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
- case OCONVNOP:
- addr := s.addr(n.Left)
+ case ir.OCONVNOP:
+ addr := s.addr(n.Left())
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
- case OCALLFUNC, OCALLINTER, OCALLMETH:
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
return s.callAddr(n, callNormal)
- case ODOTTYPE:
+ case ir.ODOTTYPE:
v, _ := s.dottype(n, false)
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
@@ -4913,36 +4924,36 @@ func (s *state) addr(n *Node) *ssa.Value {
}
return v.Args[0]
default:
- s.Fatalf("unhandled addr %v", n.Op)
+ s.Fatalf("unhandled addr %v", n.Op())
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
-func (s *state) canSSA(n *Node) bool {
- if Debug.N != 0 {
+func (s *state) canSSA(n ir.Node) bool {
+ if base.Flag.N != 0 {
return false
}
- for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
- n = n.Left
+ for n.Op() == ir.ODOT || (n.Op() == ir.OINDEX && n.Left().Type().IsArray()) {
+ n = n.Left()
}
- if n.Op != ONAME {
+ if n.Op() != ir.ONAME {
return false
}
- if n.Name.Addrtaken() {
+ if n.Name().Addrtaken() {
return false
}
- if n.isParamHeapCopy() {
+ if isParamHeapCopy(n) {
return false
}
- if n.Class() == PAUTOHEAP {
+ if n.Class() == ir.PAUTOHEAP {
s.Fatalf("canSSA of PAUTOHEAP %v", n)
}
switch n.Class() {
- case PEXTERN:
+ case ir.PEXTERN:
return false
- case PPARAMOUT:
+ case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
@@ -4957,13 +4968,13 @@ func (s *state) canSSA(n *Node) bool {
return false
}
}
- if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
+ if n.Class() == ir.PPARAM && n.Sym() != nil && n.Sym().Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARAMOUT?
return false
}
- return canSSAType(n.Type)
+ return canSSAType(n.Type())
// TODO: try to make more variables SSAable?
}
@@ -4977,7 +4988,7 @@ func canSSAType(t *types.Type) bool {
return false
}
switch t.Etype {
- case TARRAY:
+ case types.TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
@@ -4985,7 +4996,7 @@ func canSSAType(t *types.Type) bool {
return canSSAType(t.Elem())
}
return false
- case TSTRUCT:
+ case types.TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
@@ -5001,7 +5012,7 @@ func canSSAType(t *types.Type) bool {
}
// exprPtr evaluates n to a pointer and nil-checks it.
-func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
@@ -5017,7 +5028,7 @@ func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
- if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
+ if base.Debug.DisableNil != 0 || s.curfn.Func().NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
@@ -5032,7 +5043,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
idx = s.extendIndex(idx, len, kind, bounded)
- if bounded || Debug.B != 0 {
+ if bounded || base.Flag.B != 0 {
// If bounded or bounds checking is flag-disabled, then no check necessary,
// just return the extended index.
//
@@ -5082,9 +5093,9 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
var cmp *ssa.Value
if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
- cmp = s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
+ cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
} else {
- cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
+ cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
}
b := s.endBlock()
b.Kind = ssa.BlockIf
@@ -5105,12 +5116,12 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
s.startBlock(bNext)
// In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
- if spectreIndex {
+ if base.Flag.Cfg.SpectreIndex {
op := ssa.OpSpectreIndex
if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
op = ssa.OpSpectreSliceIndex
}
- idx = s.newValue2(op, types.Types[TINT], idx, len)
+ idx = s.newValue2(op, types.Types[types.TINT], idx, len)
}
return idx
@@ -5124,7 +5135,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
- pos := Ctxt.PosTable.Pos(line)
+ pos := base.Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
@@ -5140,7 +5151,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
s.startBlock(bNext)
}
-func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
@@ -5150,10 +5161,10 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
}
if needcheck {
// do a size-appropriate check for zero
- cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
+ cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
s.check(cmp, panicdivide)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
@@ -5163,7 +5174,7 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
- off := Ctxt.FixedFrameSize()
+ off := base.Ctxt.FixedFrameSize()
testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
var ACArgs []ssa.Param
var ACResults []ssa.Param
@@ -5199,10 +5210,10 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
- s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- s.vars[&memVar] = call
+ s.vars[memVar] = call
}
if !returns {
@@ -5210,7 +5221,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
- call.AuxInt = off - Ctxt.FixedFrameSize()
+ call.AuxInt = off - base.Ctxt.FixedFrameSize()
if len(results) > 0 {
s.Fatalf("panic call can't have results")
}
@@ -5252,7 +5263,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask,
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
- s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
+ s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
}
@@ -5281,24 +5292,24 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski
if skip&skipLen != 0 {
return
}
- len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.store(types.Types[TINT], lenAddr, len)
+ s.store(types.Types[types.TINT], lenAddr, len)
case t.IsSlice():
if skip&skipLen == 0 {
- len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
- s.store(types.Types[TINT], lenAddr, len)
+ s.store(types.Types[types.TINT], lenAddr, len)
}
if skip&skipCap == 0 {
- cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
+ cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
- s.store(types.Types[TINT], capAddr, cap)
+ s.store(types.Types[types.TINT], capAddr, cap)
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
- s.store(types.Types[TUINTPTR], left, itab)
+ s.store(types.Types[types.TUINTPTR], left, itab)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
@@ -5359,7 +5370,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
-func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
+func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
if !canSSAType(t) {
@@ -5373,7 +5384,7 @@ func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bo
return ssa.Param{Type: t, Offset: int32(off)}, a
}
-func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
@@ -5402,11 +5413,11 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
switch {
case t.IsSlice():
ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
- len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
- cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
+ len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+ cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
case t.IsString():
- ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[TUINT8]), v)
- len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
+ ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
+ len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
@@ -5414,7 +5425,7 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
}
s.nilCheck(v)
ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
- len = s.constInt(types.Types[TINT], t.Elem().NumElem())
+ len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
@@ -5422,7 +5433,7 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
// Set default values
if i == nil {
- i = s.constInt(types.Types[TINT], 0)
+ i = s.constInt(types.Types[types.TINT], 0)
}
if j == nil {
j = len
@@ -5460,18 +5471,18 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
}
// Word-sized integer operations.
- subOp := s.ssaOp(OSUB, types.Types[TINT])
- mulOp := s.ssaOp(OMUL, types.Types[TINT])
- andOp := s.ssaOp(OAND, types.Types[TINT])
+ subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
+ mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
+ andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
// Calculate the length (rlen) and capacity (rcap) of the new slice.
// For strings the capacity of the result is unimportant. However,
// we use rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
- rlen := s.newValue2(subOp, types.Types[TINT], j, i)
+ rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
rcap := rlen
if j != k && !t.IsString() {
- rcap = s.newValue2(subOp, types.Types[TINT], k, i)
+ rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
}
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
@@ -5493,15 +5504,15 @@ func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value)
//
// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
// of the element type.
- stride := s.constInt(types.Types[TINT], ptr.Type.Elem().Width)
+ stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Width)
// The delta is the number of bytes to offset ptr by.
- delta := s.newValue2(mulOp, types.Types[TINT], i, stride)
+ delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
- mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
- delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
+ mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
+ delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
// Compute rptr = ptr + delta.
rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
@@ -5534,15 +5545,15 @@ var u64_f32 = u642fcvtTab{
one: (*state).constInt64,
}
-func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
-func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
@@ -5568,7 +5579,7 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
- cmp := s.newValue2(cvttab.leq, types.Types[TBOOL], s.zeroVal(ft), x)
+ cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5598,7 +5609,7 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
type u322fcvtTab struct {
@@ -5615,21 +5626,21 @@ var u32_f32 = u322fcvtTab{
cvtF2F: ssa.OpCvt64Fto32F,
}
-func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
-func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
- cmp := s.newValue2(ssa.OpLeq32, types.Types[TBOOL], s.zeroVal(ft), x)
+ cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5648,9 +5659,9 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
- twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
- a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
+ a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
+ twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
+ a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
@@ -5658,12 +5669,12 @@ func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
-func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
- if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
+func (s *state) referenceTypeBuiltin(n ir.Node, x *ssa.Value) *ssa.Value {
+ if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
@@ -5674,9 +5685,9 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
// // cap
// return *(((*int)n)+1)
// }
- lenType := n.Type
- nilValue := s.constNil(types.Types[TUINTPTR])
- cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
+ lenType := n.Type()
+ nilValue := s.constNil(types.Types[types.TUINTPTR])
+ cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5695,11 +5706,11 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- switch n.Op {
- case OLEN:
+ switch n.Op() {
+ case ir.OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.load(lenType, x)
- case OCAP:
+ case ir.OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.load(lenType, sw)
@@ -5760,22 +5771,22 @@ var f64_u32 = f2uCvtTab{
cutoff: 1 << 31,
}
-func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
-func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
-func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
-func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
@@ -5785,7 +5796,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *ty
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
- cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
+ cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5813,31 +5824,31 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *ty
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
-func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
- iface := s.expr(n.Left) // input interface
- target := s.expr(n.Right) // target type
+func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.Left()) // input interface
+ target := s.expr(n.Right()) // target type
byteptr := s.f.Config.Types.BytePtr
- if n.Type.IsInterface() {
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsInterface() {
+ if n.Type().IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
- cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
- if n.Left.Type.IsEmptyInterface() && commaok {
+ if n.Left().Type().IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
@@ -5859,15 +5870,15 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
typ := s.load(byteptr, off)
- idata := s.newValue1(ssa.OpIData, n.Type, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
+ idata := s.newValue1(ssa.OpIData, n.Type(), iface)
+ res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
return
}
@@ -5875,12 +5886,12 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
- s.vars[&typVar] = s.load(byteptr, off)
+ s.vars[typVar] = s.load(byteptr, off)
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
- s.vars[&typVar] = itab
+ s.vars[typVar] = itab
s.endBlock()
// Merge point.
@@ -5888,60 +5899,60 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
- idata := s.newValue1(ssa.OpIData, n.Type, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
+ idata := s.newValue1(ssa.OpIData, n.Type(), iface)
+ res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
resok = cond
- delete(s.vars, &typVar)
+ delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion not inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion not inlined")
}
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
if commaok {
- call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
if commaok {
- call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ call := s.rtcall(assertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Converting to a concrete type.
- direct := isdirectiface(n.Type)
+ direct := isdirectiface(n.Type())
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
- if Debug_typeassert > 0 {
- Warnl(n.Pos, "type assertion inlined")
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
var targetITab *ssa.Value
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
- targetITab = s.expr(n.List.First())
+ targetITab = s.expr(n.List().First())
}
- var tmp *Node // temporary for use with large types
+ var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
- if commaok && !canSSAType(n.Type) {
+ if commaok && !canSSAType(n.Type()) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
- tmp = tempAt(n.Pos, s.curfn, n.Type)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
+ tmp = tempAt(n.Pos(), s.curfn, n.Type())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
addr = s.addr(tmp)
}
- cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
@@ -5955,8 +5966,8 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
- taddr := s.expr(n.Right.Right)
- if n.Left.Type.IsEmptyInterface() {
+ taddr := s.expr(n.Right().Right())
+ if n.Left().Type().IsEmptyInterface() {
s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
@@ -5965,10 +5976,10 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
// on success, return data from interface
s.startBlock(bOk)
if direct {
- return s.newValue1(ssa.OpIData, n.Type, iface), nil
+ return s.newValue1(ssa.OpIData, n.Type(), iface), nil
}
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- return s.load(n.Type, p), nil
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ return s.load(n.Type(), p), nil
}
// commaok is the more complicated case because we have
@@ -5976,52 +5987,52 @@ func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
- valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
+ valVar := ssaMarker("val")
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
- s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
+ s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.vars[valVar] = s.load(n.Type, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.vars[valVar] = s.load(n.Type(), p)
}
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.move(n.Type, addr, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.move(n.Type(), addr, p)
}
- s.vars[&okVar] = s.constBool(true)
+ s.vars[okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
- s.vars[valVar] = s.zeroVal(n.Type)
+ s.vars[valVar] = s.zeroVal(n.Type())
} else {
- s.zero(n.Type, addr)
+ s.zero(n.Type(), addr)
}
- s.vars[&okVar] = s.constBool(false)
+ s.vars[okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
- res = s.variable(valVar, n.Type)
+ res = s.variable(valVar, n.Type())
delete(s.vars, valVar)
} else {
- res = s.load(n.Type, addr)
- s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
+ res = s.load(n.Type(), addr)
+ s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
}
- resok = s.variable(&okVar, types.Types[TBOOL])
- delete(s.vars, &okVar)
+ resok = s.variable(okVar, types.Types[types.TBOOL])
+ delete(s.vars, okVar)
return res, resok
}
// variable returns the value of a variable at the current location.
-func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
+func (s *state) variable(name ir.Node, t *types.Type) *ssa.Value {
v := s.vars[name]
if v != nil {
return v
@@ -6044,27 +6055,27 @@ func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
}
func (s *state) mem() *ssa.Value {
- return s.variable(&memVar, types.TypeMem)
+ return s.variable(memVar, types.TypeMem)
}
-func (s *state) addNamedValue(n *Node, v *ssa.Value) {
- if n.Class() == Pxxx {
- // Don't track our dummy nodes (&memVar etc.).
+func (s *state) addNamedValue(n ir.Node, v *ssa.Value) {
+ if n.Class() == ir.Pxxx {
+ // Don't track our marker nodes (memVar etc.).
return
}
- if n.IsAutoTmp() {
+ if ir.IsAutoTmp(n) {
// Don't track temporary variables.
return
}
- if n.Class() == PPARAMOUT {
+ if n.Class() == ir.PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
- if n.Class() == PAUTO && n.Xoffset != 0 {
- s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
+ if n.Class() == ir.PAUTO && n.Offset() != 0 {
+ s.Fatalf("AUTO var with offset %v %d", n, n.Offset())
}
- loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
+ loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
@@ -6100,7 +6111,7 @@ type SSAGenState struct {
bstart []*obj.Prog
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
- ScratchFpMem *Node
+ ScratchFpMem ir.Node
maxarg int64 // largest frame size for arguments to calls made by the function
@@ -6183,16 +6194,16 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
}
// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
-type byXoffset []*Node
+type byXoffset []ir.Node
func (s byXoffset) Len() int { return len(s) }
-func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
+func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
- var vars []*Node
- for _, n := range e.curfn.Func.Dcl {
- if livenessShouldTrack(n) && n.Name.Addrtaken() {
+ var vars []ir.Node
+ for _, n := range e.curfn.Func().Dcl {
+ if livenessShouldTrack(n) && n.Name().Addrtaken() {
vars = append(vars, n)
}
}
@@ -6205,18 +6216,18 @@ func emitStackObjects(e *ssafn, pp *Progs) {
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.Func.lsym.Func().StackObjects
+ x := e.curfn.Func().LSym.Func().StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
- off = duintptr(x, off, uint64(v.Xoffset))
- if !typesym(v.Type).Siggen() {
- e.Fatalf(v.Pos, "stack object's type symbol not generated for type %s", v.Type)
+ off = duintptr(x, off, uint64(v.Offset()))
+ if !typesym(v.Type()).Siggen() {
+ e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
- off = dsymptr(x, off, dtypesym(v.Type), 0)
+ off = dsymptr(x, off, dtypesym(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
@@ -6226,9 +6237,9 @@ func emitStackObjects(e *ssafn, pp *Progs) {
p.To.Name = obj.NAME_EXTERN
p.To.Sym = x
- if debuglive != 0 {
+ if base.Flag.Live != 0 {
for _, v := range vars {
- Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
+ base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String())
}
}
}
@@ -6242,7 +6253,7 @@ func genssa(f *ssa.Func, pp *Progs) {
s.livenessMap = liveness(e, f, pp)
emitStackObjects(e, pp)
- openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo
+ openDeferInfo := e.curfn.Func().LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
@@ -6268,7 +6279,7 @@ func genssa(f *ssa.Func, pp *Progs) {
s.ScratchFpMem = e.scratchFpMem
- if Ctxt.Flag_locationlists {
+ if base.Ctxt.Flag_locationlists {
if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
}
@@ -6364,7 +6375,7 @@ func genssa(f *ssa.Func, pp *Progs) {
thearch.SSAGenValue(&s, v)
}
- if Ctxt.Flag_locationlists {
+ if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.next
}
@@ -6388,7 +6399,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
// Emit control flow instructions for block
var next *ssa.Block
- if i < len(f.Blocks)-1 && Debug.N == 0 {
+ if i < len(f.Blocks)-1 && base.Flag.N == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
@@ -6447,7 +6458,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m])
+ pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
@@ -6459,18 +6470,19 @@ func genssa(f *ssa.Func, pp *Progs) {
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p])
+ pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
- if Ctxt.Flag_locationlists {
- e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset)
+ if base.Ctxt.Flag_locationlists {
+ debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset)
+ e.curfn.Func().DebugInfo = debugInfo
bstart := s.bstart
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
// be done later.
- e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
+ debugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
if b == f.Entry.ID {
@@ -6479,7 +6491,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
- return e.curfn.Func.lsym.Size
+ return e.curfn.Func().LSym.Size
default:
return valueToProgAfter[v].Pc
}
@@ -6563,7 +6575,7 @@ func defframe(s *SSAGenState, e *ssafn) {
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
- pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
+ pp.Text.To.Val = int32(Rnd(e.curfn.Type().ArgWidth(), int64(Widthreg)))
pp.Text.To.Offset = frame
// Insert code to zero ambiguously live variables so that the
@@ -6577,20 +6589,20 @@ func defframe(s *SSAGenState, e *ssafn) {
var state uint32
// Iterate through declarations. They are sorted in decreasing Xoffset order.
- for _, n := range e.curfn.Func.Dcl {
- if !n.Name.Needzero() {
+ for _, n := range e.curfn.Func().Dcl {
+ if !n.Name().Needzero() {
continue
}
- if n.Class() != PAUTO {
- e.Fatalf(n.Pos, "needzero class %d", n.Class())
+ if n.Class() != ir.PAUTO {
+ e.Fatalf(n.Pos(), "needzero class %d", n.Class())
}
- if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
- e.Fatalf(n.Pos, "var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
+ if n.Type().Size()%int64(Widthptr) != 0 || n.Offset()%int64(Widthptr) != 0 || n.Type().Size() == 0 {
+ e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset())
}
- if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
+ if lo != hi && n.Offset()+n.Type().Size() >= lo-int64(2*Widthreg) {
// Merge with range we already have.
- lo = n.Xoffset
+ lo = n.Offset()
continue
}
@@ -6598,8 +6610,8 @@ func defframe(s *SSAGenState, e *ssafn) {
p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
- lo = n.Xoffset
- hi = lo + n.Type.Size()
+ lo = n.Offset()
+ hi = lo + n.Type().Size()
}
// Zero final range.
@@ -6665,16 +6677,16 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
- case *Node:
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ case ir.Node:
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
- a.Sym = n.Orig.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = n.Orig().Sym().Linksym()
+ a.Offset += n.Offset()
break
}
a.Name = obj.NAME_AUTO
- a.Sym = n.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = n.Sym().Linksym()
+ a.Offset += n.Offset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
@@ -6692,17 +6704,17 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
// high word and branch to out-of-bounds failure if it is not 0.
var lo *ssa.Value
if idx.Type.IsSigned() {
- lo = s.newValue1(ssa.OpInt64Lo, types.Types[TINT], idx)
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
} else {
- lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
}
- if bounded || Debug.B != 0 {
+ if bounded || base.Flag.B != 0 {
return lo
}
bNext := s.f.NewBlock(ssa.BlockPlain)
bPanic := s.f.NewBlock(ssa.BlockExit)
- hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], idx)
- cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
+ hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
+ cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
if !idx.Type.IsSigned() {
switch kind {
case ssa.BoundsIndex:
@@ -6771,7 +6783,7 @@ func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
s.Fatalf("bad unsigned index extension %s", idx.Type)
}
}
- return s.newValue1(op, types.Types[TINT], idx)
+ return s.newValue1(op, types.Types[types.TINT], idx)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
@@ -6798,27 +6810,27 @@ func CheckLoweredPhi(v *ssa.Value) {
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block || entry.Values[0] != v {
- Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
-func AutoVar(v *ssa.Value) (*Node, int64) {
+func AutoVar(v *ssa.Value) (ir.Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
- return loc.N.(*Node), loc.Off
+ return loc.N, loc.Off
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := AutoVar(v)
a.Type = obj.TYPE_MEM
- a.Sym = n.Sym.Linksym()
+ a.Sym = n.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = n.Xoffset + off
- if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ a.Offset = n.Offset() + off
+ if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
@@ -6831,9 +6843,9 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) {
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
- a.Sym = s.ScratchFpMem.Sym.Linksym()
+ a.Sym = s.ScratchFpMem.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = s.ScratchFpMem.Xoffset
+ a.Offset = s.ScratchFpMem.Offset()
}
// Call returns a new CALL instruction for the SSA value v.
@@ -6860,7 +6872,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
- Fatalf("unknown indirect call family")
+ base.Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
@@ -6875,7 +6887,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) {
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
- Fatalf("missing stack map index for %v", v.LongString())
+ base.Fatalf("missing stack map index for %v", v.LongString())
}
}
@@ -6915,9 +6927,9 @@ func (s *SSAGenState) UseArgs(n int64) {
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
-func fieldIdx(n *Node) int {
- t := n.Left.Type
- f := n.Sym
+func fieldIdx(n ir.Node) int {
+ t := n.Left().Type()
+ f := n.Sym()
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
@@ -6928,7 +6940,7 @@ func fieldIdx(n *Node) int {
i++
continue
}
- if t1.Offset != n.Xoffset {
+ if t1.Offset != n.Offset() {
panic("field offset doesn't match")
}
return i
@@ -6942,9 +6954,9 @@ func fieldIdx(n *Node) int {
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
- curfn *Node
+ curfn ir.Node
strings map[string]*obj.LSym // map from constant string to data symbols
- scratchFpMem *Node // temp for floating point register / memory moves on some architectures
+ scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
@@ -6959,19 +6971,19 @@ func (e *ssafn) StringData(s string) *obj.LSym {
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
- data := stringsym(e.curfn.Pos, s)
+ data := stringsym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) ir.Node {
n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- ptrType := types.NewPtr(types.Types[TUINT8])
- lenType := types.Types[TINT]
+ ptrType := types.NewPtr(types.Types[types.TUINT8])
+ lenType := types.Types[types.TINT]
// Split this string up into two separate variables.
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
@@ -6979,12 +6991,12 @@ func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
- n := name.N.(*Node)
- u := types.Types[TUINTPTR]
- t := types.NewPtr(types.Types[TUINT8])
+ n := name.N
+ u := types.Types[types.TUINTPTR]
+ t := types.NewPtr(types.Types[types.TUINT8])
// Split this interface up into two separate variables.
f := ".itab"
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsEmptyInterface() {
f = ".type"
}
c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
@@ -6994,7 +7006,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
ptrType := types.NewPtr(name.Type.Elem())
- lenType := types.Types[TINT]
+ lenType := types.Types[types.TINT]
p := e.SplitSlot(&name, ".ptr", 0, ptrType)
l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
@@ -7005,9 +7017,9 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
- t = types.Types[TFLOAT64]
+ t = types.Types[types.TFLOAT64]
} else {
- t = types.Types[TFLOAT32]
+ t = types.Types[types.TFLOAT32]
}
r := e.SplitSlot(&name, ".real", 0, t)
i := e.SplitSlot(&name, ".imag", t.Size(), t)
@@ -7017,14 +7029,14 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
var t *types.Type
if name.Type.IsSigned() {
- t = types.Types[TINT32]
+ t = types.Types[types.TINT32]
} else {
- t = types.Types[TUINT32]
+ t = types.Types[types.TUINT32]
}
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
+ return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32])
}
- return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
+ return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32])
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
@@ -7036,10 +7048,10 @@ func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
- n := name.N.(*Node)
+ n := name.N
at := name.Type
if at.NumElem() != 1 {
- e.Fatalf(n.Pos, "bad array size")
+ e.Fatalf(n.Pos(), "bad array size")
}
et := at.Elem()
return e.SplitSlot(&name, "[0]", 0, et)
@@ -7051,30 +7063,22 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
- node := parent.N.(*Node)
+ node := parent.N
- if node.Class() != PAUTO || node.Name.Addrtaken() {
+ if node.Class() != ir.PAUTO || node.Name().Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
- s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
-
- n := &Node{
- Name: new(Name),
- Op: ONAME,
- Pos: parent.N.(*Node).Pos,
- }
- n.Orig = n
-
- s.Def = asTypesNode(n)
- asNode(s.Def).Name.SetUsed(true)
- n.Sym = s
- n.Type = t
- n.SetClass(PAUTO)
- n.Esc = EscNever
- n.Name.Curfn = e.curfn
- e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
+ s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: ir.LocalPkg}
+ n := ir.NewNameAt(parent.N.Pos(), s)
+ s.Def = n
+ ir.AsNode(s.Def).Name().SetUsed(true)
+ n.SetType(t)
+ n.SetClass(ir.PAUTO)
+ n.SetEsc(EscNever)
+ n.Name().Curfn = e.curfn
+ e.curfn.Func().Dcl = append(e.curfn.Func().Dcl, n)
dowidth(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
@@ -7084,7 +7088,7 @@ func (e *ssafn) CanSSA(t *types.Type) bool {
}
func (e *ssafn) Line(pos src.XPos) string {
- return linestr(pos)
+ return base.FmtPos(pos)
}
// Log logs a message from the compiler.
@@ -7100,23 +7104,23 @@ func (e *ssafn) Log() bool {
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
- lineno = pos
- nargs := append([]interface{}{e.curfn.funcname()}, args...)
- Fatalf("'%s': "+msg, nargs...)
+ base.Pos = pos
+ nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
+ base.Fatalf("'%s': "+msg, nargs...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
- Warnl(pos, fmt_, args...)
+ base.WarnfAt(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
- return Debug_checknil != 0
+ return base.Debug.Nil != 0
}
func (e *ssafn) UseWriteBarrier() bool {
- return use_writebarrier
+ return base.Flag.WB
}
func (e *ssafn) Syslook(name string) *obj.LSym {
@@ -7137,36 +7141,19 @@ func (e *ssafn) Syslook(name string) *obj.LSym {
}
func (e *ssafn) SetWBPos(pos src.XPos) {
- e.curfn.Func.setWBPos(pos)
+ e.curfn.Func().SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
- return myimportpath
-}
-
-func (n *Node) Typ() *types.Type {
- return n.Type
-}
-func (n *Node) StorageClass() ssa.StorageClass {
- switch n.Class() {
- case PPARAM:
- return ssa.ClassParam
- case PPARAMOUT:
- return ssa.ClassParamOut
- case PAUTO:
- return ssa.ClassAuto
- default:
- Fatalf("untranslatable storage class for %v: %s", n, n.Class())
- return 0
- }
+ return base.Ctxt.Pkgpath
}
-func clobberBase(n *Node) *Node {
- if n.Op == ODOT && n.Left.Type.NumFields() == 1 {
- return clobberBase(n.Left)
+func clobberBase(n ir.Node) ir.Node {
+ if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 {
+ return clobberBase(n.Left())
}
- if n.Op == OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 {
- return clobberBase(n.Left)
+ if n.Op() == ir.OINDEX && n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 {
+ return clobberBase(n.Left())
}
return n
}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index defefd76b3..fcda219737 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -5,14 +5,14 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
- "cmd/internal/objabi"
"cmd/internal/src"
"crypto/md5"
"encoding/binary"
"fmt"
- "os"
- "runtime/debug"
+ "go/constant"
"sort"
"strconv"
"strings"
@@ -21,13 +21,6 @@ import (
"unicode/utf8"
)
-type Error struct {
- pos src.XPos
- msg string
-}
-
-var errors []Error
-
// largeStack is info about a function whose stack frame is too large (rare).
type largeStack struct {
locals int64
@@ -41,189 +34,25 @@ var (
largeStackFrames []largeStack
)
-func errorexit() {
- flusherrors()
- if outfile != "" {
- os.Remove(outfile)
- }
- os.Exit(2)
-}
-
-func adderrorname(n *Node) {
- if n.Op != ODOT {
- return
- }
- old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
- if len(errors) > 0 && errors[len(errors)-1].pos.Line() == n.Pos.Line() && errors[len(errors)-1].msg == old {
- errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
- }
-}
-
-func adderr(pos src.XPos, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- // Only add the position if know the position.
- // See issue golang.org/issue/11361.
- if pos.IsKnown() {
- msg = fmt.Sprintf("%v: %s", linestr(pos), msg)
- }
- errors = append(errors, Error{
- pos: pos,
- msg: msg + "\n",
- })
-}
-
-// byPos sorts errors by source position.
-type byPos []Error
-
-func (x byPos) Len() int { return len(x) }
-func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
-func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-// flusherrors sorts errors seen so far by line number, prints them to stdout,
-// and empties the errors array.
-func flusherrors() {
- Ctxt.Bso.Flush()
- if len(errors) == 0 {
- return
- }
- sort.Stable(byPos(errors))
- for i, err := range errors {
- if i == 0 || err.msg != errors[i-1].msg {
- fmt.Printf("%s", err.msg)
- }
- }
- errors = errors[:0]
-}
-
-func hcrash() {
- if Debug.h != 0 {
- flusherrors()
- if outfile != "" {
- os.Remove(outfile)
- }
- var x *int
- *x = 0
- }
-}
-
-func linestr(pos src.XPos) string {
- return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1)
-}
-
-// lasterror keeps track of the most recently issued error.
-// It is used to avoid multiple error messages on the same
-// line.
-var lasterror struct {
- syntax src.XPos // source position of last syntax error
- other src.XPos // source position of last non-syntax error
- msg string // error message of last non-syntax error
-}
-
-// sameline reports whether two positions a, b are on the same line.
-func sameline(a, b src.XPos) bool {
- p := Ctxt.PosTable.Pos(a)
- q := Ctxt.PosTable.Pos(b)
- return p.Base() == q.Base() && p.Line() == q.Line()
-}
-
-func yyerrorl(pos src.XPos, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
-
- if strings.HasPrefix(msg, "syntax error") {
- nsyntaxerrors++
- // only one syntax error per line, no matter what error
- if sameline(lasterror.syntax, pos) {
- return
- }
- lasterror.syntax = pos
- } else {
- // only one of multiple equal non-syntax errors per line
- // (flusherrors shows only one of them, so we filter them
- // here as best as we can (they may not appear in order)
- // so that we don't count them here and exit early, and
- // then have nothing to show for.)
- if sameline(lasterror.other, pos) && lasterror.msg == msg {
- return
- }
- lasterror.other = pos
- lasterror.msg = msg
- }
-
- adderr(pos, "%s", msg)
-
- hcrash()
- nerrors++
- if nsavederrors+nerrors >= 10 && Debug.e == 0 {
- flusherrors()
- fmt.Printf("%v: too many errors\n", linestr(pos))
- errorexit()
- }
-}
-
-func yyerrorv(lang string, format string, args ...interface{}) {
- what := fmt.Sprintf(format, args...)
- yyerrorl(lineno, "%s requires %s or later (-lang was set to %s; check go.mod)", what, lang, flag_lang)
-}
-
-func yyerror(format string, args ...interface{}) {
- yyerrorl(lineno, format, args...)
-}
-
-func Warn(fmt_ string, args ...interface{}) {
- Warnl(lineno, fmt_, args...)
-}
-
-func Warnl(line src.XPos, fmt_ string, args ...interface{}) {
- adderr(line, fmt_, args...)
- if Debug.m != 0 {
- flusherrors()
- }
-}
-
-func Fatalf(fmt_ string, args ...interface{}) {
- flusherrors()
-
- if Debug_panic != 0 || nsavederrors+nerrors == 0 {
- fmt.Printf("%v: internal compiler error: ", linestr(lineno))
- fmt.Printf(fmt_, args...)
- fmt.Printf("\n")
-
- // If this is a released compiler version, ask for a bug report.
- if strings.HasPrefix(objabi.Version, "go") {
- fmt.Printf("\n")
- fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
- fmt.Printf("https://golang.org/issue/new\n")
- } else {
- // Not a release; dump a stack trace, too.
- fmt.Println()
- os.Stdout.Write(debug.Stack())
- fmt.Println()
- }
- }
-
- hcrash()
- errorexit()
-}
-
// hasUniquePos reports whether n has a unique position that can be
// used for reporting error messages.
//
// It's primarily used to distinguish references to named objects,
// whose Pos will point back to their declaration position rather than
// their usage position.
-func hasUniquePos(n *Node) bool {
- switch n.Op {
- case ONAME, OPACK:
+func hasUniquePos(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME, ir.OPACK:
return false
- case OLITERAL, OTYPE:
- if n.Sym != nil {
+ case ir.OLITERAL, ir.ONIL, ir.OTYPE:
+ if n.Sym() != nil {
return false
}
}
- if !n.Pos.IsKnown() {
- if Debug.K != 0 {
- Warn("setlineno: unknown position (line 0)")
+ if !n.Pos().IsKnown() {
+ if base.Flag.K != 0 {
+ base.Warn("setlineno: unknown position (line 0)")
}
return false
}
@@ -231,16 +60,16 @@ func hasUniquePos(n *Node) bool {
return true
}
-func setlineno(n *Node) src.XPos {
- lno := lineno
+func setlineno(n ir.Node) src.XPos {
+ lno := base.Pos
if n != nil && hasUniquePos(n) {
- lineno = n.Pos
+ base.Pos = n.Pos()
}
return lno
}
func lookup(name string) *types.Sym {
- return localpkg.Lookup(name)
+ return ir.LocalPkg.Lookup(name)
}
// lookupN looks up the symbol starting with prefix and ending with
@@ -249,7 +78,7 @@ func lookupN(prefix string, n int) *types.Sym {
var buf [20]byte // plenty long enough for all current users
copy(buf[:], prefix)
b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
- return localpkg.LookupBytes(b)
+ return ir.LocalPkg.LookupBytes(b)
}
// autolabel generates a new Name node for use with
@@ -260,20 +89,20 @@ func lookupN(prefix string, n int) *types.Sym {
// user labels.
func autolabel(prefix string) *types.Sym {
if prefix[0] != '.' {
- Fatalf("autolabel prefix must start with '.', have %q", prefix)
+ base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
}
fn := Curfn
if Curfn == nil {
- Fatalf("autolabel outside function")
+ base.Fatalf("autolabel outside function")
}
- n := fn.Func.Label
- fn.Func.Label++
+ n := fn.Func().Label
+ fn.Func().Label++
return lookupN(prefix, int(n))
}
// find all the exported symbols in package opkg
// and make them available in the current package
-func importdot(opkg *types.Pkg, pack *Node) {
+func importdot(opkg *types.Pkg, pack ir.Node) {
n := 0
for _, s := range opkg.Syms {
if s.Def == nil {
@@ -285,138 +114,48 @@ func importdot(opkg *types.Pkg, pack *Node) {
s1 := lookup(s.Name)
if s1.Def != nil {
pkgerror := fmt.Sprintf("during import %q", opkg.Path)
- redeclare(lineno, s1, pkgerror)
+ redeclare(base.Pos, s1, pkgerror)
continue
}
s1.Def = s.Def
s1.Block = s.Block
- if asNode(s1.Def).Name == nil {
- Dump("s1def", asNode(s1.Def))
- Fatalf("missing Name")
+ if ir.AsNode(s1.Def).Name() == nil {
+ ir.Dump("s1def", ir.AsNode(s1.Def))
+ base.Fatalf("missing Name")
}
- asNode(s1.Def).Name.Pack = pack
+ ir.AsNode(s1.Def).Name().Pack = pack
s1.Origpkg = opkg
n++
}
if n == 0 {
// can't possibly be used - there were no symbols
- yyerrorl(pack.Pos, "imported and not used: %q", opkg.Path)
+ base.ErrorfAt(pack.Pos(), "imported and not used: %q", opkg.Path)
}
}
-func nod(op Op, nleft, nright *Node) *Node {
- return nodl(lineno, op, nleft, nright)
-}
-
-func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node {
- var n *Node
- switch op {
- case OCLOSURE, ODCLFUNC:
- var x struct {
- n Node
- f Func
- }
- n = &x.n
- n.Func = &x.f
- case ONAME:
- Fatalf("use newname instead")
- case OLABEL, OPACK:
- var x struct {
- n Node
- m Name
- }
- n = &x.n
- n.Name = &x.m
- default:
- n = new(Node)
- }
- n.Op = op
- n.Left = nleft
- n.Right = nright
- n.Pos = pos
- n.Xoffset = BADWIDTH
- n.Orig = n
- return n
-}
-
// newname returns a new ONAME Node associated with symbol s.
-func newname(s *types.Sym) *Node {
- n := newnamel(lineno, s)
- n.Name.Curfn = Curfn
- return n
-}
-
-// newnamel returns a new ONAME Node associated with symbol s at position pos.
-// The caller is responsible for setting n.Name.Curfn.
-func newnamel(pos src.XPos, s *types.Sym) *Node {
- if s == nil {
- Fatalf("newnamel nil")
- }
-
- var x struct {
- n Node
- m Name
- p Param
- }
- n := &x.n
- n.Name = &x.m
- n.Name.Param = &x.p
-
- n.Op = ONAME
- n.Pos = pos
- n.Orig = n
-
- n.Sym = s
+func NewName(s *types.Sym) ir.Node {
+ n := ir.NewNameAt(base.Pos, s)
+ n.Name().Curfn = Curfn
return n
}
// nodSym makes a Node with Op op and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op Op, left *Node, sym *types.Sym) *Node {
- return nodlSym(lineno, op, left, sym)
+func nodSym(op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
+ return nodlSym(base.Pos, op, left, sym)
}
// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodlSym(pos src.XPos, op Op, left *Node, sym *types.Sym) *Node {
- n := nodl(pos, op, left, nil)
- n.Sym = sym
+func nodlSym(pos src.XPos, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
+ n := ir.NodAt(pos, op, left, nil)
+ n.SetSym(sym)
return n
}
-// rawcopy returns a shallow copy of n.
-// Note: copy or sepcopy (rather than rawcopy) is usually the
-// correct choice (see comment with Node.copy, below).
-func (n *Node) rawcopy() *Node {
- copy := *n
- return &copy
-}
-
-// sepcopy returns a separate shallow copy of n, with the copy's
-// Orig pointing to itself.
-func (n *Node) sepcopy() *Node {
- copy := *n
- copy.Orig = &copy
- return &copy
-}
-
-// copy returns shallow copy of n and adjusts the copy's Orig if
-// necessary: In general, if n.Orig points to itself, the copy's
-// Orig should point to itself as well. Otherwise, if n is modified,
-// the copy's Orig node appears modified, too, and then doesn't
-// represent the original node anymore.
-// (This caused the wrong complit Op to be used when printing error
-// messages; see issues #26855, #27765).
-func (n *Node) copy() *Node {
- copy := *n
- if n.Orig == n {
- copy.Orig = &copy
- }
- return &copy
-}
-
// methcmp sorts methods by symbol.
type methcmp []*types.Field
@@ -424,67 +163,60 @@ func (x methcmp) Len() int { return len(x) }
func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-func nodintconst(v int64) *Node {
- u := new(Mpint)
- u.SetInt64(v)
- return nodlit(Val{u})
+func nodintconst(v int64) ir.Node {
+ return ir.NewLiteral(constant.MakeInt64(v))
}
-func nodnil() *Node {
- return nodlit(Val{new(NilVal)})
+func nodnil() ir.Node {
+ n := ir.Nod(ir.ONIL, nil, nil)
+ n.SetType(types.Types[types.TNIL])
+ return n
}
-func nodbool(b bool) *Node {
- return nodlit(Val{b})
+func nodbool(b bool) ir.Node {
+ return ir.NewLiteral(constant.MakeBool(b))
}
-func nodstr(s string) *Node {
- return nodlit(Val{s})
+func nodstr(s string) ir.Node {
+ return ir.NewLiteral(constant.MakeString(s))
}
// treecopy recursively copies n, with the exception of
// ONAME, OLITERAL, OTYPE, and ONONAME leaves.
// If pos.IsKnown(), it sets the source position of newly
// allocated nodes to pos.
-func treecopy(n *Node, pos src.XPos) *Node {
+func treecopy(n ir.Node, pos src.XPos) ir.Node {
if n == nil {
return nil
}
- switch n.Op {
+ switch n.Op() {
default:
- m := n.sepcopy()
- m.Left = treecopy(n.Left, pos)
- m.Right = treecopy(n.Right, pos)
- m.List.Set(listtreecopy(n.List.Slice(), pos))
+ m := ir.SepCopy(n)
+ m.SetLeft(treecopy(n.Left(), pos))
+ m.SetRight(treecopy(n.Right(), pos))
+ m.PtrList().Set(listtreecopy(n.List().Slice(), pos))
if pos.IsKnown() {
- m.Pos = pos
+ m.SetPos(pos)
}
- if m.Name != nil && n.Op != ODCLFIELD {
- Dump("treecopy", n)
- Fatalf("treecopy Name")
+ if m.Name() != nil && n.Op() != ir.ODCLFIELD {
+ ir.Dump("treecopy", n)
+ base.Fatalf("treecopy Name")
}
return m
- case OPACK:
+ case ir.OPACK:
// OPACK nodes are never valid in const value declarations,
// but allow them like any other declared symbol to avoid
// crashing (golang.org/issue/11361).
fallthrough
- case ONAME, ONONAME, OLITERAL, OTYPE:
+ case ir.ONAME, ir.ONONAME, ir.OLITERAL, ir.ONIL, ir.OTYPE:
return n
}
}
-// isNil reports whether n represents the universal untyped zero value "nil".
-func (n *Node) isNil() bool {
- // Check n.Orig because constant propagation may produce typed nil constants,
- // which don't exist in the Go spec.
- return Isconst(n.Orig, CTNIL)
-}
-
func isptrto(t *types.Type, et types.EType) bool {
if t == nil {
return false
@@ -502,13 +234,6 @@ func isptrto(t *types.Type, et types.EType) bool {
return true
}
-func (n *Node) isBlank() bool {
- if n == nil {
- return false
- }
- return n.Sym.IsBlank()
-}
-
// methtype returns the underlying type, if any,
// that owns methods with receiver parameter t.
// The result is either a named type or an anonymous struct.
@@ -538,7 +263,7 @@ func methtype(t *types.Type) *types.Type {
return t
}
switch t.Etype {
- case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
+ case types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRING, types.TSTRUCT:
return t
}
return nil
@@ -548,17 +273,17 @@ func methtype(t *types.Type) *types.Type {
// If so, return op code to use in conversion.
// If not, return OXXX. In this case, the string return parameter may
// hold a reason why. In all other cases, it'll be the empty string.
-func assignop(src, dst *types.Type) (Op, string) {
+func assignop(src, dst *types.Type) (ir.Op, string) {
if src == dst {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
- if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
- return OXXX, ""
+ if src == nil || dst == nil || src.Etype == types.TFORW || dst.Etype == types.TFORW || src.Orig == nil || dst.Orig == nil {
+ return ir.OXXX, ""
}
// 1. src type is identical to dst.
if types.Identical(src, dst) {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
// 2. src and dst have identical underlying types
@@ -572,31 +297,31 @@ func assignop(src, dst *types.Type) (Op, string) {
if src.IsEmptyInterface() {
// Conversion between two empty interfaces
// requires no code.
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() {
// Conversion between two types, at least one unnamed,
// needs no conversion. The exception is nonempty interfaces
// which need to have their itab updated.
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
}
// 3. dst is an interface type and src implements dst.
- if dst.IsInterface() && src.Etype != TNIL {
+ if dst.IsInterface() && src.Etype != types.TNIL {
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
- return OCONVIFACE, ""
+ return ir.OCONVIFACE, ""
}
// we'll have complained about this method anyway, suppress spurious messages.
if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
- return OCONVIFACE, ""
+ return ir.OCONVIFACE, ""
}
var why string
- if isptrto(src, TINTER) {
+ if isptrto(src, types.TINTER) {
why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
} else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
@@ -612,22 +337,22 @@ func assignop(src, dst *types.Type) (Op, string) {
why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
}
- return OXXX, why
+ return ir.OXXX, why
}
- if isptrto(dst, TINTER) {
+ if isptrto(dst, types.TINTER) {
why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
- return OXXX, why
+ return ir.OXXX, why
}
- if src.IsInterface() && dst.Etype != TBLANK {
+ if src.IsInterface() && dst.Etype != types.TBLANK {
var missing, have *types.Field
var ptr int
var why string
if implements(dst, src, &missing, &have, &ptr) {
why = ": need type assertion"
}
- return OXXX, why
+ return ir.OXXX, why
}
// 4. src is a bidirectional channel value, dst is a channel type,
@@ -635,31 +360,31 @@ func assignop(src, dst *types.Type) (Op, string) {
// either src or dst is not a named type.
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
}
// 5. src is the predeclared identifier nil and dst is a nillable type.
- if src.Etype == TNIL {
+ if src.Etype == types.TNIL {
switch dst.Etype {
- case TPTR,
- TFUNC,
- TMAP,
- TCHAN,
- TINTER,
- TSLICE:
- return OCONVNOP, ""
+ case types.TPTR,
+ types.TFUNC,
+ types.TMAP,
+ types.TCHAN,
+ types.TINTER,
+ types.TSLICE:
+ return ir.OCONVNOP, ""
}
}
// 6. rule about untyped constants - already converted by defaultlit.
// 7. Any typed value can be assigned to the blank identifier.
- if dst.Etype == TBLANK {
- return OCONVNOP, ""
+ if dst.Etype == types.TBLANK {
+ return ir.OCONVNOP, ""
}
- return OXXX, ""
+ return ir.OXXX, ""
}
// Can we convert a value of type src to a value of type dst?
@@ -667,12 +392,12 @@ func assignop(src, dst *types.Type) (Op, string) {
// If not, return OXXX. In this case, the string return parameter may
// hold a reason why. In all other cases, it'll be the empty string.
// srcConstant indicates whether the value of type src is a constant.
-func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
+func convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
if src == dst {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
if src == nil || dst == nil {
- return OXXX, ""
+ return ir.OXXX, ""
}
// Conversions from regular to go:notinheap are not allowed
@@ -681,17 +406,17 @@ func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
- return OXXX, why
+ return ir.OXXX, why
}
// (b) Disallow string to []T where T is go:notinheap.
if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
- return OXXX, why
+ return ir.OXXX, why
}
// 1. src can be assigned to dst.
op, why := assignop(src, dst)
- if op != OXXX {
+ if op != ir.OXXX {
return op, why
}
@@ -700,57 +425,57 @@ func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
// with the good message from assignop.
// Otherwise clear the error.
if src.IsInterface() || dst.IsInterface() {
- return OXXX, why
+ return ir.OXXX, why
}
// 2. Ignoring struct tags, src and dst have identical underlying types.
if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
// 3. src and dst are unnamed pointer types and, ignoring struct tags,
// their base types have identical underlying types.
if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
}
// 4. src and dst are both integer or floating point types.
if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
- return OCONV, ""
+ return ir.OCONV, ""
}
// 5. src and dst are both complex types.
if src.IsComplex() && dst.IsComplex() {
if simtype[src.Etype] == simtype[dst.Etype] {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
- return OCONV, ""
+ return ir.OCONV, ""
}
// Special case for constant conversions: any numeric
// conversion is potentially okay. We'll validate further
// within evconst. See #38117.
if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
- return OCONV, ""
+ return ir.OCONV, ""
}
// 6. src is an integer or has type []byte or []rune
// and dst is a string type.
if src.IsInteger() && dst.IsString() {
- return ORUNESTR, ""
+ return ir.ORUNESTR, ""
}
if src.IsSlice() && dst.IsString() {
if src.Elem().Etype == types.Bytetype.Etype {
- return OBYTES2STR, ""
+ return ir.OBYTES2STR, ""
}
if src.Elem().Etype == types.Runetype.Etype {
- return ORUNES2STR, ""
+ return ir.ORUNES2STR, ""
}
}
@@ -758,202 +483,128 @@ func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
// String to slice.
if src.IsString() && dst.IsSlice() {
if dst.Elem().Etype == types.Bytetype.Etype {
- return OSTR2BYTES, ""
+ return ir.OSTR2BYTES, ""
}
if dst.Elem().Etype == types.Runetype.Etype {
- return OSTR2RUNES, ""
+ return ir.OSTR2RUNES, ""
}
}
// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
// src is map and dst is a pointer to corresponding hmap.
// This rule is needed for the implementation detail that
// go gc maps are implemented as a pointer to a hmap struct.
- if src.Etype == TMAP && dst.IsPtr() &&
+ if src.Etype == types.TMAP && dst.IsPtr() &&
src.MapType().Hmap == dst.Elem() {
- return OCONVNOP, ""
+ return ir.OCONVNOP, ""
}
- return OXXX, ""
+ return ir.OXXX, ""
}
-func assignconv(n *Node, t *types.Type, context string) *Node {
+func assignconv(n ir.Node, t *types.Type, context string) ir.Node {
return assignconvfn(n, t, func() string { return context })
}
// Convert node n for assignment to type t.
-func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
- if n == nil || n.Type == nil || n.Type.Broke() {
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
+ if n == nil || n.Type() == nil || n.Type().Broke() {
return n
}
- if t.Etype == TBLANK && n.Type.Etype == TNIL {
- yyerror("use of untyped nil")
+ if t.Etype == types.TBLANK && n.Type().Etype == types.TNIL {
+ base.Errorf("use of untyped nil")
}
n = convlit1(n, t, false, context)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
- if t.Etype == TBLANK {
+ if t.Etype == types.TBLANK {
return n
}
// Convert ideal bool from comparison to plain bool
// if the next step is non-bool (like interface{}).
- if n.Type == types.UntypedBool && !t.IsBoolean() {
- if n.Op == ONAME || n.Op == OLITERAL {
- r := nod(OCONVNOP, n, nil)
- r.Type = types.Types[TBOOL]
+ if n.Type() == types.UntypedBool && !t.IsBoolean() {
+ if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
+ r := ir.Nod(ir.OCONVNOP, n, nil)
+ r.SetType(types.Types[types.TBOOL])
r.SetTypecheck(1)
r.SetImplicit(true)
n = r
}
}
- if types.Identical(n.Type, t) {
+ if types.Identical(n.Type(), t) {
return n
}
- op, why := assignop(n.Type, t)
- if op == OXXX {
- yyerror("cannot use %L as type %v in %s%s", n, t, context(), why)
- op = OCONV
+ op, why := assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
+ op = ir.OCONV
}
- r := nod(op, n, nil)
- r.Type = t
+ r := ir.Nod(op, n, nil)
+ r.SetType(t)
r.SetTypecheck(1)
r.SetImplicit(true)
- r.Orig = n.Orig
+ r.SetOrig(n.Orig())
return r
}
-// IsMethod reports whether n is a method.
-// n must be a function or a method.
-func (n *Node) IsMethod() bool {
- return n.Type.Recv() != nil
-}
-
-// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
-// n must be a slice expression. max is nil if n is a simple slice expression.
-func (n *Node) SliceBounds() (low, high, max *Node) {
- if n.List.Len() == 0 {
- return nil, nil, nil
- }
-
- switch n.Op {
- case OSLICE, OSLICEARR, OSLICESTR:
- s := n.List.Slice()
- return s[0], s[1], nil
- case OSLICE3, OSLICE3ARR:
- s := n.List.Slice()
- return s[0], s[1], s[2]
- }
- Fatalf("SliceBounds op %v: %v", n.Op, n)
- return nil, nil, nil
-}
-
-// SetSliceBounds sets n's slice bounds, where n is a slice expression.
-// n must be a slice expression. If max is non-nil, n must be a full slice expression.
-func (n *Node) SetSliceBounds(low, high, max *Node) {
- switch n.Op {
- case OSLICE, OSLICEARR, OSLICESTR:
- if max != nil {
- Fatalf("SetSliceBounds %v given three bounds", n.Op)
- }
- s := n.List.Slice()
- if s == nil {
- if low == nil && high == nil {
- return
- }
- n.List.Set2(low, high)
- return
- }
- s[0] = low
- s[1] = high
- return
- case OSLICE3, OSLICE3ARR:
- s := n.List.Slice()
- if s == nil {
- if low == nil && high == nil && max == nil {
- return
- }
- n.List.Set3(low, high, max)
- return
- }
- s[0] = low
- s[1] = high
- s[2] = max
- return
- }
- Fatalf("SetSliceBounds op %v: %v", n.Op, n)
-}
-
-// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
-// o must be a slicing op.
-func (o Op) IsSlice3() bool {
- switch o {
- case OSLICE, OSLICEARR, OSLICESTR:
- return false
- case OSLICE3, OSLICE3ARR:
- return true
- }
- Fatalf("IsSlice3 op %v", o)
- return false
-}
-
// backingArrayPtrLen extracts the pointer and length from a slice or string.
// This constructs two nodes referring to n, so n must be a cheapexpr.
-func (n *Node) backingArrayPtrLen() (ptr, len *Node) {
- var init Nodes
+func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) {
+ var init ir.Nodes
c := cheapexpr(n, &init)
if c != n || init.Len() != 0 {
- Fatalf("backingArrayPtrLen not cheap: %v", n)
+ base.Fatalf("backingArrayPtrLen not cheap: %v", n)
}
- ptr = nod(OSPTR, n, nil)
- if n.Type.IsString() {
- ptr.Type = types.Types[TUINT8].PtrTo()
+ ptr = ir.Nod(ir.OSPTR, n, nil)
+ if n.Type().IsString() {
+ ptr.SetType(types.Types[types.TUINT8].PtrTo())
} else {
- ptr.Type = n.Type.Elem().PtrTo()
+ ptr.SetType(n.Type().Elem().PtrTo())
}
- len = nod(OLEN, n, nil)
- len.Type = types.Types[TINT]
+ len = ir.Nod(ir.OLEN, n, nil)
+ len.SetType(types.Types[types.TINT])
return ptr, len
}
// labeledControl returns the control flow Node (for, switch, select)
// associated with the label n, if any.
-func (n *Node) labeledControl() *Node {
- if n.Op != OLABEL {
- Fatalf("labeledControl %v", n.Op)
+func labeledControl(n ir.Node) ir.Node {
+ if n.Op() != ir.OLABEL {
+ base.Fatalf("labeledControl %v", n.Op())
}
- ctl := n.Name.Defn
+ ctl := n.Name().Defn
if ctl == nil {
return nil
}
- switch ctl.Op {
- case OFOR, OFORUNTIL, OSWITCH, OSELECT:
+ switch ctl.Op() {
+ case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT:
return ctl
}
return nil
}
-func syslook(name string) *Node {
+func syslook(name string) ir.Node {
s := Runtimepkg.Lookup(name)
if s == nil || s.Def == nil {
- Fatalf("syslook: can't find runtime.%s", name)
+ base.Fatalf("syslook: can't find runtime.%s", name)
}
- return asNode(s.Def)
+ return ir.AsNode(s.Def)
}
// typehash computes a hash value for type t to use in type switch statements.
@@ -967,64 +618,64 @@ func typehash(t *types.Type) uint32 {
// updateHasCall checks whether expression n contains any function
// calls and sets the n.HasCall flag if so.
-func updateHasCall(n *Node) {
+func updateHasCall(n ir.Node) {
if n == nil {
return
}
n.SetHasCall(calcHasCall(n))
}
-func calcHasCall(n *Node) bool {
- if n.Ninit.Len() != 0 {
+func calcHasCall(n ir.Node) bool {
+ if n.Init().Len() != 0 {
// TODO(mdempsky): This seems overly conservative.
return true
}
- switch n.Op {
- case OLITERAL, ONAME, OTYPE:
+ switch n.Op() {
+ case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE:
if n.HasCall() {
- Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
+ base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
}
return false
- case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return true
- case OANDAND, OOROR:
+ case ir.OANDAND, ir.OOROR:
// hard with instrumented code
if instrumenting {
return true
}
- case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR,
- ODEREF, ODOTPTR, ODOTTYPE, ODIV, OMOD:
+ case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
+ ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
// These ops might panic, make sure they are done
// before we start marshaling args for a call. See issue 16760.
return true
// When using soft-float, these ops might be rewritten to function calls
// so we ensure they are evaluated first.
- case OADD, OSUB, ONEG, OMUL:
- if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) {
+ case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL:
+ if thearch.SoftFloat && (isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) {
return true
}
- case OLT, OEQ, ONE, OLE, OGE, OGT:
- if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) {
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ if thearch.SoftFloat && (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype]) {
return true
}
- case OCONV:
- if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) {
+ case ir.OCONV:
+ if thearch.SoftFloat && ((isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) || (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype])) {
return true
}
}
- if n.Left != nil && n.Left.HasCall() {
+ if n.Left() != nil && n.Left().HasCall() {
return true
}
- if n.Right != nil && n.Right.HasCall() {
+ if n.Right() != nil && n.Right().HasCall() {
return true
}
return false
}
-func badtype(op Op, tl, tr *types.Type) {
+func badtype(op ir.Op, tl, tr *types.Type) {
var s string
if tl != nil {
s += fmt.Sprintf("\n\t%v", tl)
@@ -1042,101 +693,101 @@ func badtype(op Op, tl, tr *types.Type) {
}
}
- yyerror("illegal types for operand: %v%s", op, s)
+ base.Errorf("illegal types for operand: %v%s", op, s)
}
// brcom returns !(op).
// For example, brcom(==) is !=.
-func brcom(op Op) Op {
+func brcom(op ir.Op) ir.Op {
switch op {
- case OEQ:
- return ONE
- case ONE:
- return OEQ
- case OLT:
- return OGE
- case OGT:
- return OLE
- case OLE:
- return OGT
- case OGE:
- return OLT
- }
- Fatalf("brcom: no com for %v\n", op)
+ case ir.OEQ:
+ return ir.ONE
+ case ir.ONE:
+ return ir.OEQ
+ case ir.OLT:
+ return ir.OGE
+ case ir.OGT:
+ return ir.OLE
+ case ir.OLE:
+ return ir.OGT
+ case ir.OGE:
+ return ir.OLT
+ }
+ base.Fatalf("brcom: no com for %v\n", op)
return op
}
// brrev returns reverse(op).
// For example, Brrev(<) is >.
-func brrev(op Op) Op {
+func brrev(op ir.Op) ir.Op {
switch op {
- case OEQ:
- return OEQ
- case ONE:
- return ONE
- case OLT:
- return OGT
- case OGT:
- return OLT
- case OLE:
- return OGE
- case OGE:
- return OLE
- }
- Fatalf("brrev: no rev for %v\n", op)
+ case ir.OEQ:
+ return ir.OEQ
+ case ir.ONE:
+ return ir.ONE
+ case ir.OLT:
+ return ir.OGT
+ case ir.OGT:
+ return ir.OLT
+ case ir.OLE:
+ return ir.OGE
+ case ir.OGE:
+ return ir.OLE
+ }
+ base.Fatalf("brrev: no rev for %v\n", op)
return op
}
// return side effect-free n, appending side effects to init.
// result is assignable if n is.
-func safeexpr(n *Node, init *Nodes) *Node {
+func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return nil
}
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
- switch n.Op {
- case ONAME, OLITERAL:
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
- case ODOT, OLEN, OCAP:
- l := safeexpr(n.Left, init)
- if l == n.Left {
+ case ir.ODOT, ir.OLEN, ir.OCAP:
+ l := safeexpr(n.Left(), init)
+ if l == n.Left() {
return n
}
- r := n.copy()
- r.Left = l
+ r := ir.Copy(n)
+ r.SetLeft(l)
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
return r
- case ODOTPTR, ODEREF:
- l := safeexpr(n.Left, init)
- if l == n.Left {
+ case ir.ODOTPTR, ir.ODEREF:
+ l := safeexpr(n.Left(), init)
+ if l == n.Left() {
return n
}
- a := n.copy()
- a.Left = l
+ a := ir.Copy(n)
+ a.SetLeft(l)
a = walkexpr(a, init)
return a
- case OINDEX, OINDEXMAP:
- l := safeexpr(n.Left, init)
- r := safeexpr(n.Right, init)
- if l == n.Left && r == n.Right {
+ case ir.OINDEX, ir.OINDEXMAP:
+ l := safeexpr(n.Left(), init)
+ r := safeexpr(n.Right(), init)
+ if l == n.Left() && r == n.Right() {
return n
}
- a := n.copy()
- a.Left = l
- a.Right = r
+ a := ir.Copy(n)
+ a.SetLeft(l)
+ a.SetRight(r)
a = walkexpr(a, init)
return a
- case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
if isStaticCompositeLiteral(n) {
return n
}
@@ -1144,14 +795,14 @@ func safeexpr(n *Node, init *Nodes) *Node {
// make a copy; must not be used as an lvalue
if islvalue(n) {
- Fatalf("missing lvalue case in safeexpr: %v", n)
+ base.Fatalf("missing lvalue case in safeexpr: %v", n)
}
return cheapexpr(n, init)
}
-func copyexpr(n *Node, t *types.Type, init *Nodes) *Node {
+func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
l := temp(t)
- a := nod(OAS, l, n)
+ a := ir.Nod(ir.OAS, l, n)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
@@ -1160,13 +811,13 @@ func copyexpr(n *Node, t *types.Type, init *Nodes) *Node {
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
-func cheapexpr(n *Node, init *Nodes) *Node {
- switch n.Op {
- case ONAME, OLITERAL:
+func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
}
- return copyexpr(n, n.Type, init)
+ return copyexpr(n, n.Type(), init)
}
// Code to resolve elided DOTs in embedded types.
@@ -1306,21 +957,21 @@ func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (
// find missing fields that
// will give shortest unique addressing.
// modify the tree with missing type names.
-func adddot(n *Node) *Node {
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
- if n.Left.Diag() {
+func adddot(n ir.Node) ir.Node {
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
+ if n.Left().Diag() {
n.SetDiag(true)
}
- t := n.Left.Type
+ t := n.Left().Type()
if t == nil {
return n
}
- if n.Left.Op == OTYPE {
+ if n.Left().Op() == ir.OTYPE {
return n
}
- s := n.Sym
+ s := n.Sym()
if s == nil {
return n
}
@@ -1329,12 +980,12 @@ func adddot(n *Node) *Node {
case path != nil:
// rebuild elided dots
for c := len(path) - 1; c >= 0; c-- {
- n.Left = nodSym(ODOT, n.Left, path[c].field.Sym)
- n.Left.SetImplicit(true)
+ n.SetLeft(nodSym(ir.ODOT, n.Left(), path[c].field.Sym))
+ n.Left().SetImplicit(true)
}
case ambig:
- yyerror("ambiguous selector %v", n)
- n.Left = nil
+ base.Errorf("ambiguous selector %v", n)
+ n.SetLeft(nil)
}
return n
@@ -1465,8 +1116,8 @@ func expandmeth(t *types.Type) {
}
// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl *types.Type, mustname bool) []*Node {
- var args []*Node
+func structargs(tl *types.Type, mustname bool) []ir.Node {
+ var args []ir.Node
gen := 0
for _, t := range tl.Fields().Slice() {
s := t.Sym
@@ -1476,7 +1127,7 @@ func structargs(tl *types.Type, mustname bool) []*Node {
gen++
}
a := symfield(s, t.Type)
- a.Pos = t.Pos
+ a.SetPos(t.Pos)
a.SetIsDDD(t.IsDDD())
args = append(args, a)
}
@@ -1506,48 +1157,48 @@ func structargs(tl *types.Type, mustname bool) []*Node {
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
- if false && Debug.r != 0 {
+ if false && base.Flag.LowerR != 0 {
fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
}
// Only generate (*T).M wrappers for T.M in T's own package.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != localpkg {
+ rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != ir.LocalPkg {
return
}
// Only generate I.M wrappers for I in I's own package
// but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != localpkg && rcvr != types.Errortype {
+ if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != ir.LocalPkg && rcvr != types.Errortype {
return
}
- lineno = autogeneratedPos
- dclcontext = PEXTERN
+ base.Pos = autogeneratedPos
+ dclcontext = ir.PEXTERN
- tfn := nod(OTFUNC, nil, nil)
- tfn.Left = namedfield(".this", rcvr)
- tfn.List.Set(structargs(method.Type.Params(), true))
- tfn.Rlist.Set(structargs(method.Type.Results(), false))
+ tfn := ir.Nod(ir.OTFUNC, nil, nil)
+ tfn.SetLeft(namedfield(".this", rcvr))
+ tfn.PtrList().Set(structargs(method.Type.Params(), true))
+ tfn.PtrRlist().Set(structargs(method.Type.Results(), false))
fn := dclfunc(newnam, tfn)
- fn.Func.SetDupok(true)
+ fn.Func().SetDupok(true)
- nthis := asNode(tfn.Type.Recv().Nname)
+ nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
// generate nil pointer check for better error
if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
// generating wrapper from *T to T.
- n := nod(OIF, nil, nil)
- n.Left = nod(OEQ, nthis, nodnil())
- call := nod(OCALL, syslook("panicwrap"), nil)
- n.Nbody.Set1(call)
- fn.Nbody.Append(n)
+ n := ir.Nod(ir.OIF, nil, nil)
+ n.SetLeft(ir.Nod(ir.OEQ, nthis, nodnil()))
+ call := ir.Nod(ir.OCALL, syslook("panicwrap"), nil)
+ n.PtrBody().Set1(call)
+ fn.PtrBody().Append(n)
}
- dot := adddot(nodSym(OXDOT, nthis, method.Sym))
+ dot := adddot(nodSym(ir.OXDOT, nthis, method.Sym))
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
@@ -1556,42 +1207,42 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
// the TOC to the appropriate value for that module. But if it returns
// directly to the wrapper's caller, nothing will reset it to the correct
// value for that function.
- if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && Ctxt.Flag_dynlink) {
+ if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
// generate tail call: adjust pointer receiver and jump to embedded method.
- dot = dot.Left // skip final .M
+ dot = dot.Left() // skip final .M
// TODO(mdempsky): Remove dependency on dotlist.
if !dotlist[0].field.Type.IsPtr() {
- dot = nod(OADDR, dot, nil)
+ dot = ir.Nod(ir.OADDR, dot, nil)
}
- as := nod(OAS, nthis, convnop(dot, rcvr))
- fn.Nbody.Append(as)
- fn.Nbody.Append(nodSym(ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
+ as := ir.Nod(ir.OAS, nthis, convnop(dot, rcvr))
+ fn.PtrBody().Append(as)
+ fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
} else {
- fn.Func.SetWrapper(true) // ignore frame for panic+recover matching
- call := nod(OCALL, dot, nil)
- call.List.Set(paramNnames(tfn.Type))
- call.SetIsDDD(tfn.Type.IsVariadic())
+ fn.Func().SetWrapper(true) // ignore frame for panic+recover matching
+ call := ir.Nod(ir.OCALL, dot, nil)
+ call.PtrList().Set(paramNnames(tfn.Type()))
+ call.SetIsDDD(tfn.Type().IsVariadic())
if method.Type.NumResults() > 0 {
- n := nod(ORETURN, nil, nil)
- n.List.Set1(call)
+ n := ir.Nod(ir.ORETURN, nil, nil)
+ n.PtrList().Set1(call)
call = n
}
- fn.Nbody.Append(call)
+ fn.PtrBody().Append(call)
}
- if false && Debug.r != 0 {
- dumplist("genwrapper body", fn.Nbody)
+ if false && base.Flag.LowerR != 0 {
+ ir.DumpList("genwrapper body", fn.Body())
}
funcbody()
- if debug_dclstack != 0 {
+ if base.Debug.DclStack != 0 {
testdclstack()
}
fn = typecheck(fn, ctxStmt)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
// Inline calls within (*T).M wrappers. This is safe because we only
// generate those wrappers within the same compilation unit as (T).M.
@@ -1599,32 +1250,32 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
inlcalls(fn)
}
- escapeFuncs([]*Node{fn}, false)
+ escapeFuncs([]ir.Node{fn}, false)
Curfn = nil
xtop = append(xtop, fn)
}
-func paramNnames(ft *types.Type) []*Node {
- args := make([]*Node, ft.NumParams())
+func paramNnames(ft *types.Type) []ir.Node {
+ args := make([]ir.Node, ft.NumParams())
for i, f := range ft.Params().FieldSlice() {
- args[i] = asNode(f.Nname)
+ args[i] = ir.AsNode(f.Nname)
}
return args
}
-func hashmem(t *types.Type) *Node {
+func hashmem(t *types.Type) ir.Node {
sym := Runtimepkg.Lookup("memhash")
- n := newname(sym)
+ n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
- anonfield(types.Types[TUINTPTR]),
- anonfield(types.Types[TUINTPTR]),
- }, []*Node{
- anonfield(types.Types[TUINTPTR]),
- })
+ anonfield(types.Types[types.TUINTPTR]),
+ anonfield(types.Types[types.TUINTPTR]),
+ }, []ir.Node{
+ anonfield(types.Types[types.TUINTPTR]),
+ }))
return n
}
@@ -1636,7 +1287,7 @@ func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field,
path, ambig := dotpath(s, t, &m, ignorecase)
if path == nil {
if ambig {
- yyerror("%v.%v is ambiguous", t, s)
+ base.Errorf("%v.%v is ambiguous", t, s)
}
return nil, false
}
@@ -1649,7 +1300,7 @@ func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field,
}
if !m.IsMethod() {
- yyerror("%v.%v is a field, not a method", t, s)
+ base.Errorf("%v.%v is a field, not a method", t, s)
return nil, followptr
}
@@ -1720,8 +1371,8 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
// the method does not exist for value types.
rcvr := tm.Type.Recv().Type
if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
- if false && Debug.r != 0 {
- yyerror("interface pointer mismatch")
+ if false && base.Flag.LowerR != 0 {
+ base.Errorf("interface pointer mismatch")
}
*m = im
@@ -1742,53 +1393,44 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool
return true
}
-func listtreecopy(l []*Node, pos src.XPos) []*Node {
- var out []*Node
+func listtreecopy(l []ir.Node, pos src.XPos) []ir.Node {
+ var out []ir.Node
for _, n := range l {
out = append(out, treecopy(n, pos))
}
return out
}
-func liststmt(l []*Node) *Node {
- n := nod(OBLOCK, nil, nil)
- n.List.Set(l)
+func liststmt(l []ir.Node) ir.Node {
+ n := ir.Nod(ir.OBLOCK, nil, nil)
+ n.PtrList().Set(l)
if len(l) != 0 {
- n.Pos = l[0].Pos
- }
- return n
-}
-
-func (l Nodes) asblock() *Node {
- n := nod(OBLOCK, nil, nil)
- n.List = l
- if l.Len() != 0 {
- n.Pos = l.First().Pos
+ n.SetPos(l[0].Pos())
}
return n
}
-func ngotype(n *Node) *types.Sym {
- if n.Type != nil {
- return typenamesym(n.Type)
+func ngotype(n ir.Node) *types.Sym {
+ if n.Type() != nil {
+ return typenamesym(n.Type())
}
return nil
}
// The result of addinit MUST be assigned back to n, e.g.
// n.Left = addinit(n.Left, init)
-func addinit(n *Node, init []*Node) *Node {
+func addinit(n ir.Node, init []ir.Node) ir.Node {
if len(init) == 0 {
return n
}
- if n.mayBeShared() {
+ if ir.MayBeShared(n) {
// Introduce OCONVNOP to hold init list.
- n = nod(OCONVNOP, n, nil)
- n.Type = n.Left.Type
+ n = ir.Nod(ir.OCONVNOP, n, nil)
+ n.SetType(n.Left().Type())
n.SetTypecheck(1)
}
- n.Ninit.Prepend(init...)
+ n.PtrInit().Prepend(init...)
n.SetHasCall(true)
return n
}
@@ -1805,40 +1447,40 @@ var reservedimports = []string{
func isbadimport(path string, allowSpace bool) bool {
if strings.Contains(path, "\x00") {
- yyerror("import path contains NUL")
+ base.Errorf("import path contains NUL")
return true
}
for _, ri := range reservedimports {
if path == ri {
- yyerror("import path %q is reserved and cannot be used", path)
+ base.Errorf("import path %q is reserved and cannot be used", path)
return true
}
}
for _, r := range path {
if r == utf8.RuneError {
- yyerror("import path contains invalid UTF-8 sequence: %q", path)
+ base.Errorf("import path contains invalid UTF-8 sequence: %q", path)
return true
}
if r < 0x20 || r == 0x7f {
- yyerror("import path contains control character: %q", path)
+ base.Errorf("import path contains control character: %q", path)
return true
}
if r == '\\' {
- yyerror("import path contains backslash; use slash: %q", path)
+ base.Errorf("import path contains backslash; use slash: %q", path)
return true
}
if !allowSpace && unicode.IsSpace(r) {
- yyerror("import path contains space character: %q", path)
+ base.Errorf("import path contains space character: %q", path)
return true
}
if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
- yyerror("import path contains invalid character '%c': %q", r, path)
+ base.Errorf("import path contains invalid character '%c': %q", r, path)
return true
}
}
@@ -1854,20 +1496,20 @@ func isdirectiface(t *types.Type) bool {
}
switch t.Etype {
- case TPTR:
+ case types.TPTR:
// Pointers to notinheap types must be stored indirectly. See issue 42076.
return !t.Elem().NotInHeap()
- case TCHAN,
- TMAP,
- TFUNC,
- TUNSAFEPTR:
+ case types.TCHAN,
+ types.TMAP,
+ types.TFUNC,
+ types.TUNSAFEPTR:
return true
- case TARRAY:
+ case types.TARRAY:
// Array of 1 direct iface type can be direct.
return t.NumElem() == 1 && isdirectiface(t.Elem())
- case TSTRUCT:
+ case types.TSTRUCT:
// Struct with 1 field of direct iface type can be direct.
return t.NumFields() == 1 && isdirectiface(t.Field(0).Type)
}
@@ -1876,32 +1518,32 @@ func isdirectiface(t *types.Type) bool {
}
// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab *Node) *Node {
- typ := nodSym(ODOTPTR, itab, nil)
- typ.Type = types.NewPtr(types.Types[TUINT8])
+func itabType(itab ir.Node) ir.Node {
+ typ := nodSym(ir.ODOTPTR, itab, nil)
+ typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
typ.SetTypecheck(1)
- typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab
- typ.SetBounded(true) // guaranteed not to fault
+ typ.SetOffset(int64(Widthptr)) // offset of _type in runtime.itab
+ typ.SetBounded(true) // guaranteed not to fault
return typ
}
// ifaceData loads the data field from an interface.
// The concrete type must be known to have type t.
// It follows the pointer if !isdirectiface(t).
-func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node {
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
if t.IsInterface() {
- Fatalf("ifaceData interface: %v", t)
+ base.Fatalf("ifaceData interface: %v", t)
}
- ptr := nodlSym(pos, OIDATA, n, nil)
+ ptr := nodlSym(pos, ir.OIDATA, n, nil)
if isdirectiface(t) {
- ptr.Type = t
+ ptr.SetType(t)
ptr.SetTypecheck(1)
return ptr
}
- ptr.Type = types.NewPtr(t)
+ ptr.SetType(types.NewPtr(t))
ptr.SetTypecheck(1)
- ind := nodl(pos, ODEREF, ptr, nil)
- ind.Type = t
+ ind := ir.NodAt(pos, ir.ODEREF, ptr, nil)
+ ind.SetType(t)
ind.SetTypecheck(1)
ind.SetBounded(true)
return ind
@@ -1910,9 +1552,9 @@ func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node {
// typePos returns the position associated with t.
// This is where t was declared or where it appeared as a type expression.
func typePos(t *types.Type) src.XPos {
- n := asNode(t.Nod)
- if n == nil || !n.Pos.IsKnown() {
- Fatalf("bad type: %v", t)
+ n := ir.AsNode(t.Nod)
+ if n == nil || !n.Pos().IsKnown() {
+ base.Fatalf("bad type: %v", t)
}
- return n.Pos
+ return n.Pos()
}
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
index 8d9fbe300e..02d38ac4b1 100644
--- a/src/cmd/compile/internal/gc/swt.go
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -5,43 +5,47 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
+ "go/constant"
+ "go/token"
"sort"
)
// typecheckswitch typechecks a switch statement.
-func typecheckswitch(n *Node) {
- typecheckslice(n.Ninit.Slice(), ctxStmt)
- if n.Left != nil && n.Left.Op == OTYPESW {
+func typecheckswitch(n ir.Node) {
+ typecheckslice(n.Init().Slice(), ctxStmt)
+ if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
} else {
typecheckExprSwitch(n)
}
}
-func typecheckTypeSwitch(n *Node) {
- n.Left.Right = typecheck(n.Left.Right, ctxExpr)
- t := n.Left.Right.Type
+func typecheckTypeSwitch(n ir.Node) {
+ n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr))
+ t := n.Left().Right().Type()
if t != nil && !t.IsInterface() {
- yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
+ base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", n.Left().Right())
t = nil
}
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we won't
// notice that it went unused.
- if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 {
- yyerrorl(v.Pos, "%v declared but not used", v.Sym)
+ if v := n.Left().Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
+ base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
- var defCase, nilCase *Node
+ var defCase, nilCase ir.Node
var ts typeSet
- for _, ncase := range n.List.Slice() {
- ls := ncase.List.Slice()
+ for _, ncase := range n.List().Slice() {
+ ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
- yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
@@ -50,65 +54,65 @@ func typecheckTypeSwitch(n *Node) {
for i := range ls {
ls[i] = typecheck(ls[i], ctxExpr|ctxType)
n1 := ls[i]
- if t == nil || n1.Type == nil {
+ if t == nil || n1.Type() == nil {
continue
}
var missing, have *types.Field
var ptr int
switch {
- case n1.isNil(): // case nil:
+ case ir.IsNil(n1): // case nil:
if nilCase != nil {
- yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
} else {
nilCase = ncase
}
- case n1.Op != OTYPE:
- yyerrorl(ncase.Pos, "%L is not a type", n1)
- case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke():
+ case n1.Op() != ir.OTYPE:
+ base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
+ case !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke():
if have != nil && !have.Broke() {
- yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left().Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
- yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (%v method has pointer receiver)", n.Left().Right(), n1.Type(), missing.Sym)
} else {
- yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (missing %v method)", n.Left().Right(), n1.Type(), missing.Sym)
}
}
- if n1.Op == OTYPE {
- ts.add(ncase.Pos, n1.Type)
+ if n1.Op() == ir.OTYPE {
+ ts.add(ncase.Pos(), n1.Type())
}
}
- if ncase.Rlist.Len() != 0 {
+ if ncase.Rlist().Len() != 0 {
// Assign the clause variable's type.
vt := t
if len(ls) == 1 {
- if ls[0].Op == OTYPE {
- vt = ls[0].Type
- } else if ls[0].Op != OLITERAL { // TODO(mdempsky): Should be !ls[0].isNil()
+ if ls[0].Op() == ir.OTYPE {
+ vt = ls[0].Type()
+ } else if !ir.IsNil(ls[0]) {
// Invalid single-type case;
// mark variable as broken.
vt = nil
}
}
- // TODO(mdempsky): It should be possible to
- // still typecheck the case body.
- if vt == nil {
- continue
+ nvar := ncase.Rlist().First()
+ nvar.SetType(vt)
+ if vt != nil {
+ nvar = typecheck(nvar, ctxExpr|ctxAssign)
+ } else {
+ // Clause variable is broken; prevent typechecking.
+ nvar.SetTypecheck(1)
+ nvar.SetWalkdef(1)
}
-
- nvar := ncase.Rlist.First()
- nvar.Type = vt
- nvar = typecheck(nvar, ctxExpr|ctxAssign)
- ncase.Rlist.SetFirst(nvar)
+ ncase.Rlist().SetFirst(nvar)
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
@@ -133,19 +137,19 @@ func (s *typeSet) add(pos src.XPos, typ *types.Type) {
prevs := s.m[ls]
for _, prev := range prevs {
if types.Identical(typ, prev.typ) {
- yyerrorl(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, linestr(prev.pos))
+ base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
return
}
}
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
-func typecheckExprSwitch(n *Node) {
- t := types.Types[TBOOL]
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- t = n.Left.Type
+func typecheckExprSwitch(n ir.Node) {
+ t := types.Types[types.TBOOL]
+ if n.Left() != nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ t = n.Left().Type()
}
var nilonly string
@@ -153,28 +157,28 @@ func typecheckExprSwitch(n *Node) {
switch {
case t.IsMap():
nilonly = "map"
- case t.Etype == TFUNC:
+ case t.Etype == types.TFUNC:
nilonly = "func"
case t.IsSlice():
nilonly = "slice"
case !IsComparable(t):
if t.IsStruct() {
- yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type)
+ base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type)
} else {
- yyerrorl(n.Pos, "cannot switch on %L", n.Left)
+ base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left())
}
t = nil
}
}
- var defCase *Node
+ var defCase ir.Node
var cs constSet
- for _, ncase := range n.List.Slice() {
- ls := ncase.List.Slice()
+ for _, ncase := range n.List().Slice() {
+ ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
- yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
@@ -185,22 +189,22 @@ func typecheckExprSwitch(n *Node) {
ls[i] = typecheck(ls[i], ctxExpr)
ls[i] = defaultlit(ls[i], t)
n1 := ls[i]
- if t == nil || n1.Type == nil {
+ if t == nil || n1.Type() == nil {
continue
}
- if nilonly != "" && !n1.isNil() {
- yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
- } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
- yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
+ if nilonly != "" && !ir.IsNil(n1) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left())
+ } else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
} else {
- op1, _ := assignop(n1.Type, t)
- op2, _ := assignop(t, n1.Type)
- if op1 == OXXX && op2 == OXXX {
- if n.Left != nil {
- yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
+ op1, _ := assignop(n1.Type(), t)
+ op2, _ := assignop(t, n1.Type())
+ if op1 == ir.OXXX && op2 == ir.OXXX {
+ if n.Left() != nil {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t)
} else {
- yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
}
}
}
@@ -211,23 +215,23 @@ func typecheckExprSwitch(n *Node) {
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
- if !n1.Type.IsBoolean() {
- cs.add(ncase.Pos, n1, "case", "switch")
+ if !n1.Type().IsBoolean() {
+ cs.add(ncase.Pos(), n1, "case", "switch")
}
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
// walkswitch walks a switch statement.
-func walkswitch(sw *Node) {
+func walkswitch(sw ir.Node) {
// Guard against double walk, see #25776.
- if sw.List.Len() == 0 && sw.Nbody.Len() > 0 {
+ if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
- if sw.Left != nil && sw.Left.Op == OTYPESW {
+ if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW {
walkTypeSwitch(sw)
} else {
walkExprSwitch(sw)
@@ -236,11 +240,11 @@ func walkswitch(sw *Node) {
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
-func walkExprSwitch(sw *Node) {
+func walkExprSwitch(sw ir.Node) {
lno := setlineno(sw)
- cond := sw.Left
- sw.Left = nil
+ cond := sw.Left()
+ sw.SetLeft(nil)
// convert switch {...} to switch true {...}
if cond == nil {
@@ -256,79 +260,79 @@ func walkExprSwitch(sw *Node) {
// because walkexpr will lower the string
// conversion into a runtime call.
// See issue 24937 for more discussion.
- if cond.Op == OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
- cond.Op = OBYTES2STRTMP
+ if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+ cond.SetOp(ir.OBYTES2STRTMP)
}
- cond = walkexpr(cond, &sw.Ninit)
- if cond.Op != OLITERAL {
- cond = copyexpr(cond, cond.Type, &sw.Nbody)
+ cond = walkexpr(cond, sw.PtrInit())
+ if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
+ cond = copyexpr(cond, cond.Type(), sw.PtrBody())
}
- lineno = lno
+ base.Pos = lno
s := exprSwitch{
exprname: cond,
}
- var defaultGoto *Node
- var body Nodes
- for _, ncase := range sw.List.Slice() {
+ var defaultGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.List().Slice() {
label := autolabel(".s")
- jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
+ jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
// Process case dispatch.
- if ncase.List.Len() == 0 {
+ if ncase.List().Len() == 0 {
if defaultGoto != nil {
- Fatalf("duplicate default case not detected during typechecking")
+ base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
- for _, n1 := range ncase.List.Slice() {
- s.Add(ncase.Pos, n1, jmp)
+ for _, n1 := range ncase.List().Slice() {
+ s.Add(ncase.Pos(), n1, jmp)
}
// Process body.
- body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
- body.Append(ncase.Nbody.Slice()...)
- if fall, pos := hasFall(ncase.Nbody.Slice()); !fall {
- br := nod(OBREAK, nil, nil)
- br.Pos = pos
+ body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
+ body.Append(ncase.Body().Slice()...)
+ if fall, pos := hasFall(ncase.Body().Slice()); !fall {
+ br := ir.Nod(ir.OBREAK, nil, nil)
+ br.SetPos(pos)
body.Append(br)
}
}
- sw.List.Set(nil)
+ sw.PtrList().Set(nil)
if defaultGoto == nil {
- br := nod(OBREAK, nil, nil)
- br.Pos = br.Pos.WithNotStmt()
+ br := ir.Nod(ir.OBREAK, nil, nil)
+ br.SetPos(br.Pos().WithNotStmt())
defaultGoto = br
}
- s.Emit(&sw.Nbody)
- sw.Nbody.Append(defaultGoto)
- sw.Nbody.AppendNodes(&body)
- walkstmtlist(sw.Nbody.Slice())
+ s.Emit(sw.PtrBody())
+ sw.PtrBody().Append(defaultGoto)
+ sw.PtrBody().AppendNodes(&body)
+ walkstmtlist(sw.Body().Slice())
}
// An exprSwitch walks an expression switch.
type exprSwitch struct {
- exprname *Node // value being switched on
+ exprname ir.Node // value being switched on
- done Nodes
+ done ir.Nodes
clauses []exprClause
}
type exprClause struct {
pos src.XPos
- lo, hi *Node
- jmp *Node
+ lo, hi ir.Node
+ jmp ir.Node
}
-func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
- if okforcmp[s.exprname.Type.Etype] && expr.Op == OLITERAL {
+ if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
return
}
@@ -338,7 +342,7 @@ func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
s.flush()
}
-func (s *exprSwitch) Emit(out *Nodes) {
+func (s *exprSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
@@ -355,7 +359,7 @@ func (s *exprSwitch) flush() {
// (e.g., sort.Slice doesn't need to invoke the less function
// when there's only a single slice element).
- if s.exprname.Type.IsString() && len(cc) >= 2 {
+ if s.exprname.Type().IsString() && len(cc) >= 2 {
// Sort strings by length and then by value. It is
// much cheaper to compare lengths than values, and
// all we need here is consistency. We respect this
@@ -385,26 +389,25 @@ func (s *exprSwitch) flush() {
runs = append(runs, cc[start:])
// Perform two-level binary search.
- nlen := nod(OLEN, s.exprname, nil)
binarySearch(len(runs), &s.done,
- func(i int) *Node {
- return nod(OLE, nlen, nodintconst(runLen(runs[i-1])))
+ func(i int) ir.Node {
+ return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
},
- func(i int, nif *Node) {
+ func(i int, nif ir.Node) {
run := runs[i]
- nif.Left = nod(OEQ, nlen, nodintconst(runLen(run)))
- s.search(run, &nif.Nbody)
+ nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
+ s.search(run, nif.PtrBody())
},
)
return
}
sort.Slice(cc, func(i, j int) bool {
- return compareOp(cc[i].lo.Val(), OLT, cc[j].lo.Val())
+ return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
})
// Merge consecutive integer cases.
- if s.exprname.Type.IsInteger() {
+ if s.exprname.Type().IsInteger() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
@@ -420,40 +423,40 @@ func (s *exprSwitch) flush() {
s.search(cc, &s.done)
}
-func (s *exprSwitch) search(cc []exprClause, out *Nodes) {
+func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
binarySearch(len(cc), out,
- func(i int) *Node {
- return nod(OLE, s.exprname, cc[i-1].hi)
+ func(i int) ir.Node {
+ return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
},
- func(i int, nif *Node) {
+ func(i int, nif ir.Node) {
c := &cc[i]
- nif.Left = c.test(s.exprname)
- nif.Nbody.Set1(c.jmp)
+ nif.SetLeft(c.test(s.exprname))
+ nif.PtrBody().Set1(c.jmp)
},
)
}
-func (c *exprClause) test(exprname *Node) *Node {
+func (c *exprClause) test(exprname ir.Node) ir.Node {
// Integer range.
if c.hi != c.lo {
- low := nodl(c.pos, OGE, exprname, c.lo)
- high := nodl(c.pos, OLE, exprname, c.hi)
- return nodl(c.pos, OANDAND, low, high)
+ low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo)
+ high := ir.NodAt(c.pos, ir.OLE, exprname, c.hi)
+ return ir.NodAt(c.pos, ir.OANDAND, low, high)
}
// Optimize "switch true { ...}" and "switch false { ... }".
- if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
+ if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
if exprname.BoolVal() {
return c.lo
} else {
- return nodl(c.pos, ONOT, c.lo, nil)
+ return ir.NodAt(c.pos, ir.ONOT, c.lo, nil)
}
}
- return nodl(c.pos, OEQ, exprname, c.lo)
+ return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
}
-func allCaseExprsAreSideEffectFree(sw *Node) bool {
+func allCaseExprsAreSideEffectFree(sw ir.Node) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
@@ -461,12 +464,12 @@ func allCaseExprsAreSideEffectFree(sw *Node) bool {
// Restricting to constants is simple and probably powerful
// enough.
- for _, ncase := range sw.List.Slice() {
- if ncase.Op != OCASE {
- Fatalf("switch string(byteslice) bad op: %v", ncase.Op)
+ for _, ncase := range sw.List().Slice() {
+ if ncase.Op() != ir.OCASE {
+ base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op())
}
- for _, v := range ncase.List.Slice() {
- if v.Op != OLITERAL {
+ for _, v := range ncase.List().Slice() {
+ if v.Op() != ir.OLITERAL {
return false
}
}
@@ -475,7 +478,7 @@ func allCaseExprsAreSideEffectFree(sw *Node) bool {
}
// hasFall reports whether stmts ends with a "fallthrough" statement.
-func hasFall(stmts []*Node) (bool, src.XPos) {
+func hasFall(stmts []ir.Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
@@ -483,30 +486,30 @@ func hasFall(stmts []*Node) (bool, src.XPos) {
// nodes will be at the end of the list.
i := len(stmts) - 1
- for i >= 0 && stmts[i].Op == OVARKILL {
+ for i >= 0 && stmts[i].Op() == ir.OVARKILL {
i--
}
if i < 0 {
return false, src.NoXPos
}
- return stmts[i].Op == OFALL, stmts[i].Pos
+ return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
}
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
-func walkTypeSwitch(sw *Node) {
+func walkTypeSwitch(sw ir.Node) {
var s typeSwitch
- s.facename = sw.Left.Right
- sw.Left = nil
+ s.facename = sw.Left().Right()
+ sw.SetLeft(nil)
- s.facename = walkexpr(s.facename, &sw.Ninit)
- s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody)
- s.okname = temp(types.Types[TBOOL])
+ s.facename = walkexpr(s.facename, sw.PtrInit())
+ s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody())
+ s.okname = temp(types.Types[types.TBOOL])
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
- itab := nod(OITAB, s.facename, nil)
+ itab := ir.Nod(ir.OITAB, s.facename, nil)
// For empty interfaces, do:
// if e._type == nil {
@@ -514,92 +517,92 @@ func walkTypeSwitch(sw *Node) {
// }
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
- ifNil := nod(OIF, nil, nil)
- ifNil.Left = nod(OEQ, itab, nodnil())
- lineno = lineno.WithNotStmt() // disable statement marks after the first check.
- ifNil.Left = typecheck(ifNil.Left, ctxExpr)
- ifNil.Left = defaultlit(ifNil.Left, nil)
+ ifNil := ir.Nod(ir.OIF, nil, nil)
+ ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil()))
+ base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
+ ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr))
+ ifNil.SetLeft(defaultlit(ifNil.Left(), nil))
// ifNil.Nbody assigned at end.
- sw.Nbody.Append(ifNil)
+ sw.PtrBody().Append(ifNil)
// Load hash from type or itab.
- dotHash := nodSym(ODOTPTR, itab, nil)
- dotHash.Type = types.Types[TUINT32]
+ dotHash := nodSym(ir.ODOTPTR, itab, nil)
+ dotHash.SetType(types.Types[types.TUINT32])
dotHash.SetTypecheck(1)
- if s.facename.Type.IsEmptyInterface() {
- dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
+ if s.facename.Type().IsEmptyInterface() {
+ dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type
} else {
- dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
+ dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab
}
dotHash.SetBounded(true) // guaranteed not to fault
- s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody)
-
- br := nod(OBREAK, nil, nil)
- var defaultGoto, nilGoto *Node
- var body Nodes
- for _, ncase := range sw.List.Slice() {
- var caseVar *Node
- if ncase.Rlist.Len() != 0 {
- caseVar = ncase.Rlist.First()
+ s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
+
+ br := ir.Nod(ir.OBREAK, nil, nil)
+ var defaultGoto, nilGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.List().Slice() {
+ var caseVar ir.Node
+ if ncase.Rlist().Len() != 0 {
+ caseVar = ncase.Rlist().First()
}
// For single-type cases with an interface type,
// we initialize the case variable as part of the type assertion.
// In other cases, we initialize it in the body.
var singleType *types.Type
- if ncase.List.Len() == 1 && ncase.List.First().Op == OTYPE {
- singleType = ncase.List.First().Type
+ if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE {
+ singleType = ncase.List().First().Type()
}
caseVarInitialized := false
label := autolabel(".s")
- jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
+ jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
- if ncase.List.Len() == 0 { // default:
+ if ncase.List().Len() == 0 { // default:
if defaultGoto != nil {
- Fatalf("duplicate default case not detected during typechecking")
+ base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
- for _, n1 := range ncase.List.Slice() {
- if n1.isNil() { // case nil:
+ for _, n1 := range ncase.List().Slice() {
+ if ir.IsNil(n1) { // case nil:
if nilGoto != nil {
- Fatalf("duplicate nil case not detected during typechecking")
+ base.Fatalf("duplicate nil case not detected during typechecking")
}
nilGoto = jmp
continue
}
if singleType != nil && singleType.IsInterface() {
- s.Add(ncase.Pos, n1.Type, caseVar, jmp)
+ s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
caseVarInitialized = true
} else {
- s.Add(ncase.Pos, n1.Type, nil, jmp)
+ s.Add(ncase.Pos(), n1.Type(), nil, jmp)
}
}
- body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
+ body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
if caseVar != nil && !caseVarInitialized {
val := s.facename
if singleType != nil {
// We have a single concrete type. Extract the data.
if singleType.IsInterface() {
- Fatalf("singleType interface should have been handled in Add")
+ base.Fatalf("singleType interface should have been handled in Add")
}
- val = ifaceData(ncase.Pos, s.facename, singleType)
+ val = ifaceData(ncase.Pos(), s.facename, singleType)
}
- l := []*Node{
- nodl(ncase.Pos, ODCL, caseVar, nil),
- nodl(ncase.Pos, OAS, caseVar, val),
+ l := []ir.Node{
+ ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
+ ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
}
- body.Append(ncase.Nbody.Slice()...)
+ body.Append(ncase.Body().Slice()...)
body.Append(br)
}
- sw.List.Set(nil)
+ sw.PtrList().Set(nil)
if defaultGoto == nil {
defaultGoto = br
@@ -607,58 +610,58 @@ func walkTypeSwitch(sw *Node) {
if nilGoto == nil {
nilGoto = defaultGoto
}
- ifNil.Nbody.Set1(nilGoto)
+ ifNil.PtrBody().Set1(nilGoto)
- s.Emit(&sw.Nbody)
- sw.Nbody.Append(defaultGoto)
- sw.Nbody.AppendNodes(&body)
+ s.Emit(sw.PtrBody())
+ sw.PtrBody().Append(defaultGoto)
+ sw.PtrBody().AppendNodes(&body)
- walkstmtlist(sw.Nbody.Slice())
+ walkstmtlist(sw.Body().Slice())
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
- facename *Node // value being type-switched on
- hashname *Node // type hash of the value being type-switched on
- okname *Node // boolean used for comma-ok type assertions
+ facename ir.Node // value being type-switched on
+ hashname ir.Node // type hash of the value being type-switched on
+ okname ir.Node // boolean used for comma-ok type assertions
- done Nodes
+ done ir.Nodes
clauses []typeClause
}
type typeClause struct {
hash uint32
- body Nodes
+ body ir.Nodes
}
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
- var body Nodes
+func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
+ var body ir.Nodes
if caseVar != nil {
- l := []*Node{
- nodl(pos, ODCL, caseVar, nil),
- nodl(pos, OAS, caseVar, nil),
+ l := []ir.Node{
+ ir.NodAt(pos, ir.ODCL, caseVar, nil),
+ ir.NodAt(pos, ir.OAS, caseVar, nil),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
} else {
- caseVar = nblank
+ caseVar = ir.BlankNode
}
// cv, ok = iface.(type)
- as := nodl(pos, OAS2, nil, nil)
- as.List.Set2(caseVar, s.okname) // cv, ok =
- dot := nodl(pos, ODOTTYPE, s.facename, nil)
- dot.Type = typ // iface.(type)
- as.Rlist.Set1(dot)
+ as := ir.NodAt(pos, ir.OAS2, nil, nil)
+ as.PtrList().Set2(caseVar, s.okname) // cv, ok =
+ dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil)
+ dot.SetType(typ) // iface.(type)
+ as.PtrRlist().Set1(dot)
as = typecheck(as, ctxStmt)
as = walkexpr(as, &body)
body.Append(as)
// if ok { goto label }
- nif := nodl(pos, OIF, nil, nil)
- nif.Left = s.okname
- nif.Nbody.Set1(jmp)
+ nif := ir.NodAt(pos, ir.OIF, nil, nil)
+ nif.SetLeft(s.okname)
+ nif.PtrBody().Set1(jmp)
body.Append(nif)
if !typ.IsInterface() {
@@ -673,7 +676,7 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
s.done.AppendNodes(&body)
}
-func (s *typeSwitch) Emit(out *Nodes) {
+func (s *typeSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
@@ -700,15 +703,15 @@ func (s *typeSwitch) flush() {
cc = merged
binarySearch(len(cc), &s.done,
- func(i int) *Node {
- return nod(OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
+ func(i int) ir.Node {
+ return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
- func(i int, nif *Node) {
+ func(i int, nif ir.Node) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
- nif.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
- nif.Nbody.AppendNodes(&c.body)
+ nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))))
+ nif.PtrBody().AppendNodes(&c.body)
},
)
}
@@ -720,35 +723,35 @@ func (s *typeSwitch) flush() {
// less(i) should return a boolean expression. If it evaluates true,
// then cases before i will be tested; otherwise, cases i and later.
//
-// base(i, nif) should setup nif (an OIF node) to test case i. In
+// leaf(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
-func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, nif *Node)) {
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif ir.Node)) {
const binarySearchMin = 4 // minimum number of cases for binary search
- var do func(lo, hi int, out *Nodes)
- do = func(lo, hi int, out *Nodes) {
+ var do func(lo, hi int, out *ir.Nodes)
+ do = func(lo, hi int, out *ir.Nodes) {
n := hi - lo
if n < binarySearchMin {
for i := lo; i < hi; i++ {
- nif := nod(OIF, nil, nil)
- base(i, nif)
- lineno = lineno.WithNotStmt()
- nif.Left = typecheck(nif.Left, ctxExpr)
- nif.Left = defaultlit(nif.Left, nil)
+ nif := ir.Nod(ir.OIF, nil, nil)
+ leaf(i, nif)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.SetLeft(typecheck(nif.Left(), ctxExpr))
+ nif.SetLeft(defaultlit(nif.Left(), nil))
out.Append(nif)
- out = &nif.Rlist
+ out = nif.PtrRlist()
}
return
}
half := lo + n/2
- nif := nod(OIF, nil, nil)
- nif.Left = less(half)
- lineno = lineno.WithNotStmt()
- nif.Left = typecheck(nif.Left, ctxExpr)
- nif.Left = defaultlit(nif.Left, nil)
- do(lo, half, &nif.Nbody)
- do(half, hi, &nif.Rlist)
+ nif := ir.Nod(ir.OIF, nil, nil)
+ nif.SetLeft(less(half))
+ base.Pos = base.Pos.WithNotStmt()
+ nif.SetLeft(typecheck(nif.Left(), ctxExpr))
+ nif.SetLeft(defaultlit(nif.Left(), nil))
+ do(lo, half, nif.PtrBody())
+ do(half, hi, nif.PtrRlist())
out.Append(nif)
}
diff --git a/src/cmd/compile/internal/gc/trace.go b/src/cmd/compile/internal/gc/trace.go
index ed4b5a268d..c6eb23a090 100644
--- a/src/cmd/compile/internal/gc/trace.go
+++ b/src/cmd/compile/internal/gc/trace.go
@@ -9,6 +9,8 @@ package gc
import (
"os"
tracepkg "runtime/trace"
+
+ "cmd/compile/internal/base"
)
func init() {
@@ -18,10 +20,10 @@ func init() {
func traceHandlerGo17(traceprofile string) {
f, err := os.Create(traceprofile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
if err := tracepkg.Start(f); err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- atExit(tracepkg.Stop)
+ base.AtExit(tracepkg.Stop)
}
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index c0b05035f0..4e2f205312 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -5,27 +5,30 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"fmt"
+ "go/constant"
+ "go/token"
"strings"
)
// To enable tracing support (-t flag), set enableTrace to true.
const enableTrace = false
-var trace bool
var traceIndent []byte
var skipDowidthForTracing bool
-func tracePrint(title string, n *Node) func(np **Node) {
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
indent := traceIndent
// guard against nil
var pos, op string
var tc uint8
if n != nil {
- pos = linestr(n.Pos)
- op = n.Op.String()
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
tc = n.Typecheck()
}
@@ -34,7 +37,7 @@ func tracePrint(title string, n *Node) func(np **Node) {
fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
traceIndent = append(traceIndent, ". "...)
- return func(np **Node) {
+ return func(np *ir.Node) {
traceIndent = traceIndent[:len(traceIndent)-2]
// if we have a result, use that
@@ -47,10 +50,10 @@ func tracePrint(title string, n *Node) func(np **Node) {
var tc uint8
var typ *types.Type
if n != nil {
- pos = linestr(n.Pos)
- op = n.Op.String()
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
tc = n.Typecheck()
- typ = n.Type
+ typ = n.Type()
}
skipDowidthForTracing = true
@@ -74,22 +77,22 @@ const (
// marks variables that escape the local frame.
// rewrites n.Op to be more specific in some cases.
-var typecheckdefstack []*Node
+var typecheckdefstack []ir.Node
// resolve ONONAME to definition, if any.
-func resolve(n *Node) (res *Node) {
- if n == nil || n.Op != ONONAME {
+func resolve(n ir.Node) (res ir.Node) {
+ if n == nil || n.Op() != ir.ONONAME {
return n
}
// only trace if there's work to do
- if enableTrace && trace {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("resolve", n)(&res)
}
- if n.Sym.Pkg != localpkg {
+ if n.Sym().Pkg != ir.LocalPkg {
if inimport {
- Fatalf("recursive inimport")
+ base.Fatalf("recursive inimport")
}
inimport = true
expandDecl(n)
@@ -97,12 +100,12 @@ func resolve(n *Node) (res *Node) {
return n
}
- r := asNode(n.Sym.Def)
+ r := ir.AsNode(n.Sym().Def)
if r == nil {
return n
}
- if r.Op == OIOTA {
+ if r.Op() == ir.OIOTA {
if x := getIotaValue(); x >= 0 {
return nodintconst(x)
}
@@ -112,41 +115,41 @@ func resolve(n *Node) (res *Node) {
return r
}
-func typecheckslice(l []*Node, top int) {
+func typecheckslice(l []ir.Node, top int) {
for i := range l {
l[i] = typecheck(l[i], top)
}
}
var _typekind = []string{
- TINT: "int",
- TUINT: "uint",
- TINT8: "int8",
- TUINT8: "uint8",
- TINT16: "int16",
- TUINT16: "uint16",
- TINT32: "int32",
- TUINT32: "uint32",
- TINT64: "int64",
- TUINT64: "uint64",
- TUINTPTR: "uintptr",
- TCOMPLEX64: "complex64",
- TCOMPLEX128: "complex128",
- TFLOAT32: "float32",
- TFLOAT64: "float64",
- TBOOL: "bool",
- TSTRING: "string",
- TPTR: "pointer",
- TUNSAFEPTR: "unsafe.Pointer",
- TSTRUCT: "struct",
- TINTER: "interface",
- TCHAN: "chan",
- TMAP: "map",
- TARRAY: "array",
- TSLICE: "slice",
- TFUNC: "func",
- TNIL: "nil",
- TIDEAL: "untyped number",
+ types.TINT: "int",
+ types.TUINT: "uint",
+ types.TINT8: "int8",
+ types.TUINT8: "uint8",
+ types.TINT16: "int16",
+ types.TUINT16: "uint16",
+ types.TINT32: "int32",
+ types.TUINT32: "uint32",
+ types.TINT64: "int64",
+ types.TUINT64: "uint64",
+ types.TUINTPTR: "uintptr",
+ types.TCOMPLEX64: "complex64",
+ types.TCOMPLEX128: "complex128",
+ types.TFLOAT32: "float32",
+ types.TFLOAT64: "float64",
+ types.TBOOL: "bool",
+ types.TSTRING: "string",
+ types.TPTR: "pointer",
+ types.TUNSAFEPTR: "unsafe.Pointer",
+ types.TSTRUCT: "struct",
+ types.TINTER: "interface",
+ types.TCHAN: "chan",
+ types.TMAP: "map",
+ types.TARRAY: "array",
+ types.TSLICE: "slice",
+ types.TFUNC: "func",
+ types.TNIL: "nil",
+ types.TIDEAL: "untyped number",
}
func typekind(t *types.Type) string {
@@ -163,7 +166,7 @@ func typekind(t *types.Type) string {
return fmt.Sprintf("etype=%d", et)
}
-func cycleFor(start *Node) []*Node {
+func cycleFor(start ir.Node) []ir.Node {
// Find the start node in typecheck_tcstack.
// We know that it must exist because each time we mark
// a node with n.SetTypecheck(2) we push it on the stack,
@@ -176,9 +179,9 @@ func cycleFor(start *Node) []*Node {
}
// collect all nodes with same Op
- var cycle []*Node
+ var cycle []ir.Node
for _, n := range typecheck_tcstack[i:] {
- if n.Op == start.Op {
+ if n.Op() == start.Op() {
cycle = append(cycle, n)
}
}
@@ -186,23 +189,23 @@ func cycleFor(start *Node) []*Node {
return cycle
}
-func cycleTrace(cycle []*Node) string {
+func cycleTrace(cycle []ir.Node) string {
var s string
for i, n := range cycle {
- s += fmt.Sprintf("\n\t%v: %v uses %v", n.Line(), n, cycle[(i+1)%len(cycle)])
+ s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
}
return s
}
-var typecheck_tcstack []*Node
+var typecheck_tcstack []ir.Node
// typecheck type checks node n.
// The result of typecheck MUST be assigned back to n, e.g.
// n.Left = typecheck(n.Left, top)
-func typecheck(n *Node, top int) (res *Node) {
+func typecheck(n ir.Node, top int) (res ir.Node) {
// cannot type check until all the source has been parsed
if !typecheckok {
- Fatalf("early typecheck")
+ base.Fatalf("early typecheck")
}
if n == nil {
@@ -210,15 +213,15 @@ func typecheck(n *Node, top int) (res *Node) {
}
// only trace if there's work to do
- if enableTrace && trace {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheck", n)(&res)
}
lno := setlineno(n)
// Skip over parens.
- for n.Op == OPAREN {
- n = n.Left
+ for n.Op() == ir.OPAREN {
+ n = n.Left()
}
// Resolve definition of name and value of iota lazily.
@@ -227,12 +230,12 @@ func typecheck(n *Node, top int) (res *Node) {
// Skip typecheck if already done.
// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
if n.Typecheck() == 1 {
- switch n.Op {
- case ONAME, OTYPE, OLITERAL, OPACK:
+ switch n.Op() {
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
break
default:
- lineno = lno
+ base.Pos = lno
return n
}
}
@@ -240,14 +243,14 @@ func typecheck(n *Node, top int) (res *Node) {
if n.Typecheck() == 2 {
// Typechecking loop. Trying printing a meaningful message,
// otherwise a stack trace of typechecking.
- switch n.Op {
+ switch n.Op() {
// We can already diagnose variables used as types.
- case ONAME:
+ case ir.ONAME:
if top&(ctxExpr|ctxType) == ctxType {
- yyerror("%v is not a type", n)
+ base.Errorf("%v is not a type", n)
}
- case OTYPE:
+ case ir.OTYPE:
// Only report a type cycle if we are expecting a type.
// Otherwise let other code report an error.
if top&ctxType == ctxType {
@@ -256,40 +259,40 @@ func typecheck(n *Node, top int) (res *Node) {
// are substituted.
cycle := cycleFor(n)
for _, n1 := range cycle {
- if n1.Name != nil && !n1.Name.Param.Alias() {
+ if n1.Name() != nil && !n1.Name().Param.Alias() {
// Cycle is ok. But if n is an alias type and doesn't
// have a type yet, we have a recursive type declaration
// with aliases that we can't handle properly yet.
// Report an error rather than crashing later.
- if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
- lineno = n.Pos
- Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
+ if n.Name() != nil && n.Name().Param.Alias() && n.Type() == nil {
+ base.Pos = n.Pos()
+ base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
}
- lineno = lno
+ base.Pos = lno
return n
}
}
- yyerrorl(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle))
+ base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle))
}
- case OLITERAL:
+ case ir.OLITERAL:
if top&(ctxExpr|ctxType) == ctxType {
- yyerror("%v is not a type", n)
+ base.Errorf("%v is not a type", n)
break
}
- yyerrorl(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n)))
+ base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n)))
}
- if nsavederrors+nerrors == 0 {
+ if base.Errors() == 0 {
var trace string
for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
x := typecheck_tcstack[i]
- trace += fmt.Sprintf("\n\t%v %v", x.Line(), x)
+ trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
}
- yyerror("typechecking loop involving %v%s", n, trace)
+ base.Errorf("typechecking loop involving %v%s", n, trace)
}
- lineno = lno
+ base.Pos = lno
return n
}
@@ -304,7 +307,7 @@ func typecheck(n *Node, top int) (res *Node) {
typecheck_tcstack[last] = nil
typecheck_tcstack = typecheck_tcstack[:last]
- lineno = lno
+ base.Pos = lno
return n
}
@@ -314,61 +317,61 @@ func typecheck(n *Node, top int) (res *Node) {
// value of type int (see also checkmake for comparison).
// The result of indexlit MUST be assigned back to n, e.g.
// n.Left = indexlit(n.Left)
-func indexlit(n *Node) *Node {
- if n != nil && n.Type != nil && n.Type.Etype == TIDEAL {
- return defaultlit(n, types.Types[TINT])
+func indexlit(n ir.Node) ir.Node {
+ if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL {
+ return defaultlit(n, types.Types[types.TINT])
}
return n
}
// The result of typecheck1 MUST be assigned back to n, e.g.
// n.Left = typecheck1(n.Left, top)
-func typecheck1(n *Node, top int) (res *Node) {
- if enableTrace && trace {
+func typecheck1(n ir.Node, top int) (res ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheck1", n)(&res)
}
- switch n.Op {
- case OLITERAL, ONAME, ONONAME, OTYPE:
- if n.Sym == nil {
+ switch n.Op() {
+ case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE:
+ if n.Sym() == nil {
break
}
- if n.Op == ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
- yyerror("use of builtin %v not in function call", n.Sym)
- n.Type = nil
+ if n.Op() == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
+ base.Errorf("use of builtin %v not in function call", n.Sym())
+ n.SetType(nil)
return n
}
typecheckdef(n)
- if n.Op == ONONAME {
- n.Type = nil
+ if n.Op() == ir.ONONAME {
+ n.SetType(nil)
return n
}
}
ok := 0
- switch n.Op {
+ switch n.Op() {
// until typecheck is complete, do nothing.
default:
- Dump("typecheck", n)
+ ir.Dump("typecheck", n)
- Fatalf("typecheck %v", n.Op)
+ base.Fatalf("typecheck %v", n.Op())
// names
- case OLITERAL:
+ case ir.OLITERAL:
ok |= ctxExpr
- if n.Type == nil && n.Val().Ctype() == CTSTR {
- n.Type = types.UntypedString
+ if n.Type() == nil && n.Val().Kind() == constant.String {
+ base.Fatalf("string literal missing type")
}
- case ONONAME:
+ case ir.ONIL, ir.ONONAME:
ok |= ctxExpr
- case ONAME:
- if n.Name.Decldepth == 0 {
- n.Name.Decldepth = decldepth
+ case ir.ONAME:
+ if n.Name().Decldepth == 0 {
+ n.Name().Decldepth = decldepth
}
if n.SubOp() != 0 {
ok |= ctxCallee
@@ -377,164 +380,165 @@ func typecheck1(n *Node, top int) (res *Node) {
if top&ctxAssign == 0 {
// not a write to the variable
- if n.isBlank() {
- yyerror("cannot use _ as value")
- n.Type = nil
+ if ir.IsBlank(n) {
+ base.Errorf("cannot use _ as value")
+ n.SetType(nil)
return n
}
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
}
ok |= ctxExpr
- case OPACK:
- yyerror("use of package %v without selector", n.Sym)
- n.Type = nil
+ case ir.OPACK:
+ base.Errorf("use of package %v without selector", n.Sym())
+ n.SetType(nil)
return n
- case ODDD:
+ case ir.ODDD:
break
// types (ODEREF is with exprs)
- case OTYPE:
+ case ir.OTYPE:
ok |= ctxType
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
- case OTARRAY:
+ case ir.OTARRAY:
ok |= ctxType
- r := typecheck(n.Right, ctxType)
- if r.Type == nil {
- n.Type = nil
+ r := typecheck(n.Right(), ctxType)
+ if r.Type() == nil {
+ n.SetType(nil)
return n
}
var t *types.Type
- if n.Left == nil {
- t = types.NewSlice(r.Type)
- } else if n.Left.Op == ODDD {
+ if n.Left() == nil {
+ t = types.NewSlice(r.Type())
+ } else if n.Left().Op() == ir.ODDD {
if !n.Diag() {
n.SetDiag(true)
- yyerror("use of [...] array outside of array literal")
+ base.Errorf("use of [...] array outside of array literal")
}
- n.Type = nil
+ n.SetType(nil)
return n
} else {
- n.Left = indexlit(typecheck(n.Left, ctxExpr))
- l := n.Left
- if consttype(l) != CTINT {
+ n.SetLeft(indexlit(typecheck(n.Left(), ctxExpr)))
+ l := n.Left()
+ if ir.ConstType(l) != constant.Int {
switch {
- case l.Type == nil:
+ case l.Type() == nil:
// Error already reported elsewhere.
- case l.Type.IsInteger() && l.Op != OLITERAL:
- yyerror("non-constant array bound %v", l)
+ case l.Type().IsInteger() && l.Op() != ir.OLITERAL:
+ base.Errorf("non-constant array bound %v", l)
default:
- yyerror("invalid array bound %v", l)
+ base.Errorf("invalid array bound %v", l)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
v := l.Val()
- if doesoverflow(v, types.Types[TINT]) {
- yyerror("array bound is too large")
- n.Type = nil
+ if doesoverflow(v, types.Types[types.TINT]) {
+ base.Errorf("array bound is too large")
+ n.SetType(nil)
return n
}
- bound := v.U.(*Mpint).Int64()
- if bound < 0 {
- yyerror("array bound must be non-negative")
- n.Type = nil
+ if constant.Sign(v) < 0 {
+ base.Errorf("array bound must be non-negative")
+ n.SetType(nil)
return n
}
- t = types.NewArray(r.Type, bound)
+
+ bound, _ := constant.Int64Val(v)
+ t = types.NewArray(r.Type(), bound)
}
setTypeNode(n, t)
- n.Left = nil
- n.Right = nil
+ n.SetLeft(nil)
+ n.SetRight(nil)
checkwidth(t)
- case OTMAP:
+ case ir.OTMAP:
ok |= ctxType
- n.Left = typecheck(n.Left, ctxType)
- n.Right = typecheck(n.Right, ctxType)
- l := n.Left
- r := n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ n.SetRight(typecheck(n.Right(), ctxType))
+ l := n.Left()
+ r := n.Right()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.NotInHeap() {
- yyerror("incomplete (or unallocatable) map key not allowed")
+ if l.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map key not allowed")
}
- if r.Type.NotInHeap() {
- yyerror("incomplete (or unallocatable) map value not allowed")
+ if r.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map value not allowed")
}
- setTypeNode(n, types.NewMap(l.Type, r.Type))
+ setTypeNode(n, types.NewMap(l.Type(), r.Type()))
mapqueue = append(mapqueue, n) // check map keys when all types are settled
- n.Left = nil
- n.Right = nil
+ n.SetLeft(nil)
+ n.SetRight(nil)
- case OTCHAN:
+ case ir.OTCHAN:
ok |= ctxType
- n.Left = typecheck(n.Left, ctxType)
- l := n.Left
- if l.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ l := n.Left()
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.NotInHeap() {
- yyerror("chan of incomplete (or unallocatable) type not allowed")
+ if l.Type().NotInHeap() {
+ base.Errorf("chan of incomplete (or unallocatable) type not allowed")
}
- setTypeNode(n, types.NewChan(l.Type, n.TChanDir()))
- n.Left = nil
+ setTypeNode(n, types.NewChan(l.Type(), n.TChanDir()))
+ n.SetLeft(nil)
n.ResetAux()
- case OTSTRUCT:
+ case ir.OTSTRUCT:
ok |= ctxType
- setTypeNode(n, tostruct(n.List.Slice()))
- n.List.Set(nil)
+ setTypeNode(n, tostruct(n.List().Slice()))
+ n.PtrList().Set(nil)
- case OTINTER:
+ case ir.OTINTER:
ok |= ctxType
- setTypeNode(n, tointerface(n.List.Slice()))
+ setTypeNode(n, tointerface(n.List().Slice()))
- case OTFUNC:
+ case ir.OTFUNC:
ok |= ctxType
- setTypeNode(n, functype(n.Left, n.List.Slice(), n.Rlist.Slice()))
- n.Left = nil
- n.List.Set(nil)
- n.Rlist.Set(nil)
+ setTypeNode(n, functype(n.Left(), n.List().Slice(), n.Rlist().Slice()))
+ n.SetLeft(nil)
+ n.PtrList().Set(nil)
+ n.PtrRlist().Set(nil)
// type or expr
- case ODEREF:
- n.Left = typecheck(n.Left, ctxExpr|ctxType)
- l := n.Left
- t := l.Type
+ case ir.ODEREF:
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if l.Op == OTYPE {
+ if l.Op() == ir.OTYPE {
ok |= ctxType
- setTypeNode(n, types.NewPtr(l.Type))
- n.Left = nil
+ setTypeNode(n, types.NewPtr(l.Type()))
+ n.SetLeft(nil)
// Ensure l.Type gets dowidth'd for the backend. Issue 20174.
- checkwidth(l.Type)
+ checkwidth(l.Type())
break
}
if !t.IsPtr() {
if top&(ctxExpr|ctxStmt) != 0 {
- yyerror("invalid indirect of %L", n.Left)
- n.Type = nil
+ base.Errorf("invalid indirect of %L", n.Left())
+ n.SetType(nil)
return n
}
@@ -542,88 +546,88 @@ func typecheck1(n *Node, top int) (res *Node) {
}
ok |= ctxExpr
- n.Type = t.Elem()
+ n.SetType(t.Elem())
// arithmetic exprs
- case OASOP,
- OADD,
- OAND,
- OANDAND,
- OANDNOT,
- ODIV,
- OEQ,
- OGE,
- OGT,
- OLE,
- OLT,
- OLSH,
- ORSH,
- OMOD,
- OMUL,
- ONE,
- OOR,
- OOROR,
- OSUB,
- OXOR:
- var l *Node
- var op Op
- var r *Node
- if n.Op == OASOP {
+ case ir.OASOP,
+ ir.OADD,
+ ir.OAND,
+ ir.OANDAND,
+ ir.OANDNOT,
+ ir.ODIV,
+ ir.OEQ,
+ ir.OGE,
+ ir.OGT,
+ ir.OLE,
+ ir.OLT,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OMOD,
+ ir.OMUL,
+ ir.ONE,
+ ir.OOR,
+ ir.OOROR,
+ ir.OSUB,
+ ir.OXOR:
+ var l ir.Node
+ var op ir.Op
+ var r ir.Node
+ if n.Op() == ir.OASOP {
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- l = n.Left
- r = n.Right
- checkassign(n, n.Left)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ l = n.Left()
+ r = n.Right()
+ checkassign(n, n.Left())
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if n.Implicit() && !okforarith[l.Type.Etype] {
- yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
- n.Type = nil
+ if n.Implicit() && !okforarith[l.Type().Etype] {
+ base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type())
+ n.SetType(nil)
return n
}
// TODO(marvin): Fix Node.EType type union.
op = n.SubOp()
} else {
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- l = n.Left
- r = n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ l = n.Left()
+ r = n.Right()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- op = n.Op
+ op = n.Op()
}
- if op == OLSH || op == ORSH {
- r = defaultlit(r, types.Types[TUINT])
- n.Right = r
- t := r.Type
+ if op == ir.OLSH || op == ir.ORSH {
+ r = defaultlit(r, types.Types[types.TUINT])
+ n.SetRight(r)
+ t := r.Type()
if !t.IsInteger() {
- yyerror("invalid operation: %v (shift count type %v, must be integer)", n, r.Type)
- n.Type = nil
+ base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
+ n.SetType(nil)
return n
}
if t.IsSigned() && !langSupported(1, 13, curpkg()) {
- yyerrorv("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type)
- n.Type = nil
+ base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type())
+ n.SetType(nil)
return n
}
- t = l.Type
- if t != nil && t.Etype != TIDEAL && !t.IsInteger() {
- yyerror("invalid operation: %v (shift of type %v)", n, t)
- n.Type = nil
+ t = l.Type()
+ if t != nil && t.Etype != types.TIDEAL && !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, t)
+ n.SetType(nil)
return n
}
// no defaultlit for left
// the outer context gives the type
- n.Type = l.Type
- if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL {
- n.Type = types.UntypedInt
+ n.SetType(l.Type())
+ if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
+ n.SetType(types.UntypedInt)
}
break
@@ -632,15 +636,15 @@ func typecheck1(n *Node, top int) (res *Node) {
// For "x == x && len(s)", it's better to report that "len(s)" (type int)
// can't be used with "&&" than to report that "x == x" (type untyped bool)
// can't be converted to int (see issue #41500).
- if n.Op == OANDAND || n.Op == OOROR {
- if !n.Left.Type.IsBoolean() {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type))
- n.Type = nil
+ if n.Op() == ir.OANDAND || n.Op() == ir.OOROR {
+ if !n.Left().Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Left().Type()))
+ n.SetType(nil)
return n
}
- if !n.Right.Type.IsBoolean() {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type))
- n.Type = nil
+ if !n.Right().Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Right().Type()))
+ n.SetType(nil)
return n
}
}
@@ -648,22 +652,22 @@ func typecheck1(n *Node, top int) (res *Node) {
// ideal mixed with non-ideal
l, r = defaultlit2(l, r, false)
- n.Left = l
- n.Right = r
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(l)
+ n.SetRight(r)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- t := l.Type
- if t.Etype == TIDEAL {
- t = r.Type
+ t := l.Type()
+ if t.Etype == types.TIDEAL {
+ t = r.Type()
}
et := t.Etype
- if et == TIDEAL {
- et = TINT
+ if et == types.TIDEAL {
+ et = types.TINT
}
- aop := OXXX
- if iscmp[n.Op] && t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
+ aop := ir.OXXX
+ if iscmp[n.Op()] && t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
// comparison is okay as long as one side is
// assignable to the other. convert so they have
// the same type.
@@ -672,235 +676,235 @@ func typecheck1(n *Node, top int) (res *Node) {
// in that case, check comparability of the concrete type.
// The conversion allocates, so only do it if the concrete type is huge.
converted := false
- if r.Type.Etype != TBLANK {
- aop, _ = assignop(l.Type, r.Type)
- if aop != OXXX {
- if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
- n.Type = nil
+ if r.Type().Etype != types.TBLANK {
+ aop, _ = assignop(l.Type(), r.Type())
+ if aop != ir.OXXX {
+ if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
+ n.SetType(nil)
return n
}
- dowidth(l.Type)
- if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 {
- l = nod(aop, l, nil)
- l.Type = r.Type
+ dowidth(l.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 {
+ l = ir.Nod(aop, l, nil)
+ l.SetType(r.Type())
l.SetTypecheck(1)
- n.Left = l
+ n.SetLeft(l)
}
- t = r.Type
+ t = r.Type()
converted = true
}
}
- if !converted && l.Type.Etype != TBLANK {
- aop, _ = assignop(r.Type, l.Type)
- if aop != OXXX {
- if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
- n.Type = nil
+ if !converted && l.Type().Etype != types.TBLANK {
+ aop, _ = assignop(r.Type(), l.Type())
+ if aop != ir.OXXX {
+ if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
+ n.SetType(nil)
return n
}
- dowidth(r.Type)
- if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 {
- r = nod(aop, r, nil)
- r.Type = l.Type
+ dowidth(r.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 {
+ r = ir.Nod(aop, r, nil)
+ r.SetType(l.Type())
r.SetTypecheck(1)
- n.Right = r
+ n.SetRight(r)
}
- t = l.Type
+ t = l.Type()
}
}
et = t.Etype
}
- if t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
+ if t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
l, r = defaultlit2(l, r, true)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.IsInterface() == r.Type.IsInterface() || aop == 0 {
- yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
- n.Type = nil
+ if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
return n
}
}
- if t.Etype == TIDEAL {
- t = mixUntyped(l.Type, r.Type)
+ if t.Etype == types.TIDEAL {
+ t = mixUntyped(l.Type(), r.Type())
}
if dt := defaultType(t); !okfor[op][dt.Etype] {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
- n.Type = nil
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
+ n.SetType(nil)
return n
}
// okfor allows any array == array, map == map, func == func.
// restrict to slice/map/func == nil and nil == slice/map/func.
- if l.Type.IsArray() && !IsComparable(l.Type) {
- yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type)
- n.Type = nil
+ if l.Type().IsArray() && !IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
+ n.SetType(nil)
return n
}
- if l.Type.IsSlice() && !l.isNil() && !r.isNil() {
- yyerror("invalid operation: %v (slice can only be compared to nil)", n)
- n.Type = nil
+ if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
+ n.SetType(nil)
return n
}
- if l.Type.IsMap() && !l.isNil() && !r.isNil() {
- yyerror("invalid operation: %v (map can only be compared to nil)", n)
- n.Type = nil
+ if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
+ n.SetType(nil)
return n
}
- if l.Type.Etype == TFUNC && !l.isNil() && !r.isNil() {
- yyerror("invalid operation: %v (func can only be compared to nil)", n)
- n.Type = nil
+ if l.Type().Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
+ n.SetType(nil)
return n
}
- if l.Type.IsStruct() {
- if f := IncomparableField(l.Type); f != nil {
- yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
- n.Type = nil
+ if l.Type().IsStruct() {
+ if f := IncomparableField(l.Type()); f != nil {
+ base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
+ n.SetType(nil)
return n
}
}
- if iscmp[n.Op] {
- evconst(n)
+ if iscmp[n.Op()] {
t = types.UntypedBool
- if n.Op != OLITERAL {
+ n.SetType(t)
+ n = evalConst(n)
+ if n.Op() != ir.OLITERAL {
l, r = defaultlit2(l, r, true)
- n.Left = l
- n.Right = r
+ n.SetLeft(l)
+ n.SetRight(r)
}
}
- if et == TSTRING && n.Op == OADD {
- // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
- n.Op = OADDSTR
-
- if l.Op == OADDSTR {
- n.List.Set(l.List.Slice())
+ if et == types.TSTRING && n.Op() == ir.OADD {
+ // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ if l.Op() == ir.OADDSTR {
+ orig := n
+ n = l
+ n.SetPos(orig.Pos())
} else {
- n.List.Set1(l)
+ n = ir.NodAt(n.Pos(), ir.OADDSTR, nil, nil)
+ n.PtrList().Set1(l)
}
- if r.Op == OADDSTR {
- n.List.AppendNodes(&r.List)
+ if r.Op() == ir.OADDSTR {
+ n.PtrList().AppendNodes(r.PtrList())
} else {
- n.List.Append(r)
+ n.PtrList().Append(r)
}
- n.Left = nil
- n.Right = nil
}
- if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
- if r.Val().U.(*Mpint).CmpInt64(0) == 0 {
- yyerror("division by zero")
- n.Type = nil
+ if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
+ if constant.Sign(r.Val()) == 0 {
+ base.Errorf("division by zero")
+ n.SetType(nil)
return n
}
}
- n.Type = t
+ n.SetType(t)
- case OBITNOT, ONEG, ONOT, OPLUS:
+ case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if !okfor[n.Op][defaultType(t).Etype] {
- yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t))
- n.Type = nil
+ if !okfor[n.Op()][defaultType(t).Etype] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
// exprs
- case OADDR:
+ case ir.OADDR:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
- switch n.Left.Op {
- case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT:
- n.Op = OPTRLIT
+ switch n.Left().Op() {
+ case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+ n.SetOp(ir.OPTRLIT)
default:
- checklvalue(n.Left, "take the address of")
- r := outervalue(n.Left)
- if r.Op == ONAME {
- if r.Orig != r {
- Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
+ checklvalue(n.Left(), "take the address of")
+ r := outervalue(n.Left())
+ if r.Op() == ir.ONAME {
+ if r.Orig() != r {
+ base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
}
- r.Name.SetAddrtaken(true)
- if r.Name.IsClosureVar() && !capturevarscomplete {
+ r.Name().SetAddrtaken(true)
+ if r.Name().IsClosureVar() && !capturevarscomplete {
// Mark the original variable as Addrtaken so that capturevars
// knows not to pass it by value.
// But if the capturevars phase is complete, don't touch it,
// in case l.Name's containing function has not yet been compiled.
- r.Name.Defn.Name.SetAddrtaken(true)
+ r.Name().Defn.Name().SetAddrtaken(true)
}
}
- n.Left = defaultlit(n.Left, nil)
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
}
- n.Type = types.NewPtr(n.Left.Type)
+ n.SetType(types.NewPtr(n.Left().Type()))
- case OCOMPLIT:
+ case ir.OCOMPLIT:
ok |= ctxExpr
n = typecheckcomplit(n)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
- case OXDOT, ODOT:
- if n.Op == OXDOT {
+ case ir.OXDOT, ir.ODOT:
+ if n.Op() == ir.OXDOT {
n = adddot(n)
- n.Op = ODOT
- if n.Left == nil {
- n.Type = nil
+ n.SetOp(ir.ODOT)
+ if n.Left() == nil {
+ n.SetType(nil)
return n
}
}
- n.Left = typecheck(n.Left, ctxExpr|ctxType)
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType))
- n.Left = defaultlit(n.Left, nil)
+ n.SetLeft(defaultlit(n.Left(), nil))
- t := n.Left.Type
+ t := n.Left().Type()
if t == nil {
- adderrorname(n)
- n.Type = nil
+ base.UpdateErrorDot(ir.Line(n), n.Left().String(), n.String())
+ n.SetType(nil)
return n
}
- s := n.Sym
+ s := n.Sym()
- if n.Left.Op == OTYPE {
+ if n.Left().Op() == ir.OTYPE {
n = typecheckMethodExpr(n)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
ok = ctxExpr
@@ -910,16 +914,16 @@ func typecheck1(n *Node, top int) (res *Node) {
if t.IsPtr() && !t.Elem().IsInterface() {
t = t.Elem()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Op = ODOTPTR
+ n.SetOp(ir.ODOTPTR)
checkwidth(t)
}
- if n.Sym.IsBlank() {
- yyerror("cannot refer to blank field or method")
- n.Type = nil
+ if n.Sym().IsBlank() {
+ base.Errorf("cannot refer to blank field or method")
+ n.SetType(nil)
return n
}
@@ -927,29 +931,29 @@ func typecheck1(n *Node, top int) (res *Node) {
// Legitimate field or method lookup failed, try to explain the error
switch {
case t.IsEmptyInterface():
- yyerror("%v undefined (type %v is interface with no methods)", n, n.Left.Type)
+ base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left().Type())
case t.IsPtr() && t.Elem().IsInterface():
// Pointer to interface is almost always a mistake.
- yyerror("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type)
+ base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left().Type())
case lookdot(n, t, 1) != nil:
// Field or method matches by name, but it is not exported.
- yyerror("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym)
+ base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym())
default:
if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
- yyerror("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym)
+ base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left().Type(), n.Sym(), mt.Sym)
} else {
- yyerror("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym)
+ base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left().Type(), n.Sym())
}
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- switch n.Op {
- case ODOTINTER, ODOTMETH:
+ switch n.Op() {
+ case ir.ODOTINTER, ir.ODOTMETH:
if top&ctxCallee != 0 {
ok |= ctxCallee
} else {
@@ -961,76 +965,76 @@ func typecheck1(n *Node, top int) (res *Node) {
ok |= ctxExpr
}
- case ODOTTYPE:
+ case ir.ODOTTYPE:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsInterface() {
- yyerror("invalid type assertion: %v (non-interface type %v on left)", n, t)
- n.Type = nil
+ base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
+ n.SetType(nil)
return n
}
- if n.Right != nil {
- n.Right = typecheck(n.Right, ctxType)
- n.Type = n.Right.Type
- n.Right = nil
- if n.Type == nil {
+ if n.Right() != nil {
+ n.SetRight(typecheck(n.Right(), ctxType))
+ n.SetType(n.Right().Type())
+ n.SetRight(nil)
+ if n.Type() == nil {
return n
}
}
- if n.Type != nil && !n.Type.IsInterface() {
+ if n.Type() != nil && !n.Type().IsInterface() {
var missing, have *types.Field
var ptr int
- if !implements(n.Type, t, &missing, &have, &ptr) {
+ if !implements(n.Type(), t, &missing, &have, &ptr) {
if have != nil && have.Sym == missing.Sym {
- yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
- yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym)
} else if have != nil {
- yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else {
- yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
}
- case OINDEX:
+ case ir.OINDEX:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Left = implicitstar(n.Left)
- l := n.Left
- n.Right = typecheck(n.Right, ctxExpr)
- r := n.Right
- t := l.Type
- if t == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetLeft(implicitstar(n.Left()))
+ l := n.Left()
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ r := n.Right()
+ t := l.Type()
+ if t == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
switch t.Etype {
default:
- yyerror("invalid operation: %v (type %v does not support indexing)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
+ n.SetType(nil)
return n
- case TSTRING, TARRAY, TSLICE:
- n.Right = indexlit(n.Right)
+ case types.TSTRING, types.TARRAY, types.TSLICE:
+ n.SetRight(indexlit(n.Right()))
if t.IsString() {
- n.Type = types.Bytetype
+ n.SetType(types.Bytetype)
} else {
- n.Type = t.Elem()
+ n.SetType(t.Elem())
}
why := "string"
if t.IsArray() {
@@ -1039,275 +1043,275 @@ func typecheck1(n *Node, top int) (res *Node) {
why = "slice"
}
- if n.Right.Type != nil && !n.Right.Type.IsInteger() {
- yyerror("non-integer %s index %v", why, n.Right)
+ if n.Right().Type() != nil && !n.Right().Type().IsInteger() {
+ base.Errorf("non-integer %s index %v", why, n.Right())
break
}
- if !n.Bounded() && Isconst(n.Right, CTINT) {
- x := n.Right.Int64Val()
- if x < 0 {
- yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
- } else if t.IsArray() && x >= t.NumElem() {
- yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
- } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.StringVal())) {
- yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal()))
- } else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- yyerror("invalid %s index %v (index too large)", why, n.Right)
+ if !n.Bounded() && ir.IsConst(n.Right(), constant.Int) {
+ x := n.Right().Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right())
+ } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
+ base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right(), t.NumElem())
+ } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left().StringVal())))) {
+ base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(n.Left().StringVal()))
+ } else if doesoverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid %s index %v (index too large)", why, n.Right())
}
}
- case TMAP:
- n.Right = assignconv(n.Right, t.Key(), "map index")
- n.Type = t.Elem()
- n.Op = OINDEXMAP
+ case types.TMAP:
+ n.SetRight(assignconv(n.Right(), t.Key(), "map index"))
+ n.SetType(t.Elem())
+ n.SetOp(ir.OINDEXMAP)
n.ResetAux()
}
- case ORECV:
+ case ir.ORECV:
ok |= ctxStmt | ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
- yyerror("invalid operation: %v (receive from non-chan type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanRecv() {
- yyerror("invalid operation: %v (receive from send-only type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
+ n.SetType(nil)
return n
}
- n.Type = t.Elem()
+ n.SetType(t.Elem())
- case OSEND:
+ case ir.OSEND:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
- yyerror("invalid operation: %v (send to non-chan type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanSend() {
- yyerror("invalid operation: %v (send to receive-only type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
+ n.SetType(nil)
return n
}
- n.Right = assignconv(n.Right, t.Elem(), "send")
- if n.Right.Type == nil {
- n.Type = nil
+ n.SetRight(assignconv(n.Right(), t.Elem(), "send"))
+ if n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
- n.Type = nil
+ n.SetType(nil)
- case OSLICEHEADER:
- // Errors here are Fatalf instead of yyerror because only the compiler
+ case ir.OSLICEHEADER:
+ // Errors here are Fatalf instead of Errorf because only the compiler
// can construct an OSLICEHEADER node.
// Components used in OSLICEHEADER that are supplied by parsed source code
// have already been typechecked in e.g. OMAKESLICE earlier.
ok |= ctxExpr
- t := n.Type
+ t := n.Type()
if t == nil {
- Fatalf("no type specified for OSLICEHEADER")
+ base.Fatalf("no type specified for OSLICEHEADER")
}
if !t.IsSlice() {
- Fatalf("invalid type %v for OSLICEHEADER", n.Type)
+ base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
}
- if n.Left == nil || n.Left.Type == nil || !n.Left.Type.IsUnsafePtr() {
- Fatalf("need unsafe.Pointer for OSLICEHEADER")
+ if n.Left() == nil || n.Left().Type() == nil || !n.Left().Type().IsUnsafePtr() {
+ base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
}
- if x := n.List.Len(); x != 2 {
- Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
+ if x := n.List().Len(); x != 2 {
+ base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
}
- n.Left = typecheck(n.Left, ctxExpr)
- l := typecheck(n.List.First(), ctxExpr)
- c := typecheck(n.List.Second(), ctxExpr)
- l = defaultlit(l, types.Types[TINT])
- c = defaultlit(c, types.Types[TINT])
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := typecheck(n.List().First(), ctxExpr)
+ c := typecheck(n.List().Second(), ctxExpr)
+ l = defaultlit(l, types.Types[types.TINT])
+ c = defaultlit(c, types.Types[types.TINT])
- if Isconst(l, CTINT) && l.Int64Val() < 0 {
- Fatalf("len for OSLICEHEADER must be non-negative")
+ if ir.IsConst(l, constant.Int) && l.Int64Val() < 0 {
+ base.Fatalf("len for OSLICEHEADER must be non-negative")
}
- if Isconst(c, CTINT) && c.Int64Val() < 0 {
- Fatalf("cap for OSLICEHEADER must be non-negative")
+ if ir.IsConst(c, constant.Int) && c.Int64Val() < 0 {
+ base.Fatalf("cap for OSLICEHEADER must be non-negative")
}
- if Isconst(l, CTINT) && Isconst(c, CTINT) && l.Val().U.(*Mpint).Cmp(c.Val().U.(*Mpint)) > 0 {
- Fatalf("len larger than cap for OSLICEHEADER")
+ if ir.IsConst(l, constant.Int) && ir.IsConst(c, constant.Int) && constant.Compare(l.Val(), token.GTR, c.Val()) {
+ base.Fatalf("len larger than cap for OSLICEHEADER")
}
- n.List.SetFirst(l)
- n.List.SetSecond(c)
+ n.List().SetFirst(l)
+ n.List().SetSecond(c)
- case OMAKESLICECOPY:
- // Errors here are Fatalf instead of yyerror because only the compiler
+ case ir.OMAKESLICECOPY:
+ // Errors here are Fatalf instead of Errorf because only the compiler
// can construct an OMAKESLICECOPY node.
// Components used in OMAKESCLICECOPY that are supplied by parsed source code
// have already been typechecked in OMAKE and OCOPY earlier.
ok |= ctxExpr
- t := n.Type
+ t := n.Type()
if t == nil {
- Fatalf("no type specified for OMAKESLICECOPY")
+ base.Fatalf("no type specified for OMAKESLICECOPY")
}
if !t.IsSlice() {
- Fatalf("invalid type %v for OMAKESLICECOPY", n.Type)
+ base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
}
- if n.Left == nil {
- Fatalf("missing len argument for OMAKESLICECOPY")
+ if n.Left() == nil {
+ base.Fatalf("missing len argument for OMAKESLICECOPY")
}
- if n.Right == nil {
- Fatalf("missing slice argument to copy for OMAKESLICECOPY")
+ if n.Right() == nil {
+ base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- n.Left = defaultlit(n.Left, types.Types[TINT])
+ n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT]))
- if !n.Left.Type.IsInteger() && n.Type.Etype != TIDEAL {
- yyerror("non-integer len argument in OMAKESLICECOPY")
+ if !n.Left().Type().IsInteger() && n.Type().Etype != types.TIDEAL {
+ base.Errorf("non-integer len argument in OMAKESLICECOPY")
}
- if Isconst(n.Left, CTINT) {
- if n.Left.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- Fatalf("len for OMAKESLICECOPY too large")
+ if ir.IsConst(n.Left(), constant.Int) {
+ if doesoverflow(n.Left().Val(), types.Types[types.TINT]) {
+ base.Fatalf("len for OMAKESLICECOPY too large")
}
- if n.Left.Int64Val() < 0 {
- Fatalf("len for OMAKESLICECOPY must be non-negative")
+ if constant.Sign(n.Left().Val()) < 0 {
+ base.Fatalf("len for OMAKESLICECOPY must be non-negative")
}
}
- case OSLICE, OSLICE3:
+ case ir.OSLICE, ir.OSLICE3:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
low, high, max := n.SliceBounds()
- hasmax := n.Op.IsSlice3()
+ hasmax := n.Op().IsSlice3()
low = typecheck(low, ctxExpr)
high = typecheck(high, ctxExpr)
max = typecheck(max, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
+ n.SetLeft(defaultlit(n.Left(), nil))
low = indexlit(low)
high = indexlit(high)
max = indexlit(max)
n.SetSliceBounds(low, high, max)
- l := n.Left
- if l.Type == nil {
- n.Type = nil
+ l := n.Left()
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.IsArray() {
- if !islvalue(n.Left) {
- yyerror("invalid operation %v (slice of unaddressable value)", n)
- n.Type = nil
+ if l.Type().IsArray() {
+ if !islvalue(n.Left()) {
+ base.Errorf("invalid operation %v (slice of unaddressable value)", n)
+ n.SetType(nil)
return n
}
- n.Left = nod(OADDR, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxExpr)
- l = n.Left
+ n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l = n.Left()
}
- t := l.Type
+ t := l.Type()
var tp *types.Type
if t.IsString() {
if hasmax {
- yyerror("invalid operation %v (3-index slice of string)", n)
- n.Type = nil
+ base.Errorf("invalid operation %v (3-index slice of string)", n)
+ n.SetType(nil)
return n
}
- n.Type = t
- n.Op = OSLICESTR
+ n.SetType(t)
+ n.SetOp(ir.OSLICESTR)
} else if t.IsPtr() && t.Elem().IsArray() {
tp = t.Elem()
- n.Type = types.NewSlice(tp.Elem())
- dowidth(n.Type)
+ n.SetType(types.NewSlice(tp.Elem()))
+ dowidth(n.Type())
if hasmax {
- n.Op = OSLICE3ARR
+ n.SetOp(ir.OSLICE3ARR)
} else {
- n.Op = OSLICEARR
+ n.SetOp(ir.OSLICEARR)
}
} else if t.IsSlice() {
- n.Type = t
+ n.SetType(t)
} else {
- yyerror("cannot slice %v (type %v)", l, t)
- n.Type = nil
+ base.Errorf("cannot slice %v (type %v)", l, t)
+ n.SetType(nil)
return n
}
if low != nil && !checksliceindex(l, low, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if high != nil && !checksliceindex(l, high, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if max != nil && !checksliceindex(l, max, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
- n.Type = nil
+ n.SetType(nil)
return n
}
// call and call like
- case OCALL:
- typecheckslice(n.Ninit.Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
- n.Left = typecheck(n.Left, ctxExpr|ctxType|ctxCallee)
- if n.Left.Diag() {
+ case ir.OCALL:
+ typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee))
+ if n.Left().Diag() {
n.SetDiag(true)
}
- l := n.Left
+ l := n.Left()
- if l.Op == ONAME && l.SubOp() != 0 {
- if n.IsDDD() && l.SubOp() != OAPPEND {
- yyerror("invalid use of ... with builtin %v", l)
+ if l.Op() == ir.ONAME && l.SubOp() != 0 {
+ if n.IsDDD() && l.SubOp() != ir.OAPPEND {
+ base.Errorf("invalid use of ... with builtin %v", l)
}
// builtin: OLEN, OCAP, etc.
- n.Op = l.SubOp()
- n.Left = n.Right
- n.Right = nil
+ n.SetOp(l.SubOp())
+ n.SetLeft(n.Right())
+ n.SetRight(nil)
n = typecheck1(n, top)
return n
}
- n.Left = defaultlit(n.Left, nil)
- l = n.Left
- if l.Op == OTYPE {
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l = n.Left()
+ if l.Op() == ir.OTYPE {
if n.IsDDD() {
- if !l.Type.Broke() {
- yyerror("invalid use of ... in type conversion to %v", l.Type)
+ if !l.Type().Broke() {
+ base.Errorf("invalid use of ... in type conversion to %v", l.Type())
}
n.SetDiag(true)
}
@@ -1316,12 +1320,12 @@ func typecheck1(n *Node, top int) (res *Node) {
ok |= ctxExpr
// turn CALL(type, arg) into CONV(arg) w/ type
- n.Left = nil
+ n.SetLeft(nil)
- n.Op = OCONV
- n.Type = l.Type
- if !onearg(n, "conversion to %v", l.Type) {
- n.Type = nil
+ n.SetOp(ir.OCONV)
+ n.SetType(l.Type())
+ if !onearg(n, "conversion to %v", l.Type()) {
+ n.SetType(nil)
return n
}
n = typecheck1(n, top)
@@ -1329,19 +1333,19 @@ func typecheck1(n *Node, top int) (res *Node) {
}
typecheckargs(n)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
checkwidth(t)
- switch l.Op {
- case ODOTINTER:
- n.Op = OCALLINTER
+ switch l.Op() {
+ case ir.ODOTINTER:
+ n.SetOp(ir.OCALLINTER)
- case ODOTMETH:
- n.Op = OCALLMETH
+ case ir.ODOTMETH:
+ n.SetOp(ir.OCALLMETH)
// typecheckaste was used here but there wasn't enough
// information further down the call chain to know if we
@@ -1349,44 +1353,44 @@ func typecheck1(n *Node, top int) (res *Node) {
// It isn't necessary, so just do a sanity check.
tp := t.Recv().Type
- if l.Left == nil || !types.Identical(l.Left.Type, tp) {
- Fatalf("method receiver")
+ if l.Left() == nil || !types.Identical(l.Left().Type(), tp) {
+ base.Fatalf("method receiver")
}
default:
- n.Op = OCALLFUNC
- if t.Etype != TFUNC {
+ n.SetOp(ir.OCALLFUNC)
+ if t.Etype != types.TFUNC {
name := l.String()
- if isBuiltinFuncName(name) && l.Name.Defn != nil {
+ if isBuiltinFuncName(name) && l.Name().Defn != nil {
// be more specific when the function
// name matches a predeclared function
- yyerror("cannot call non-function %s (type %v), declared at %s",
- name, t, linestr(l.Name.Defn.Pos))
+ base.Errorf("cannot call non-function %s (type %v), declared at %s",
+ name, t, base.FmtPos(l.Name().Defn.Pos()))
} else {
- yyerror("cannot call non-function %s (type %v)", name, t)
+ base.Errorf("cannot call non-function %s (type %v)", name, t)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
}
- typecheckaste(OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
+ typecheckaste(ir.OCALL, n.Left(), n.IsDDD(), t.Params(), n.List(), func() string { return fmt.Sprintf("argument to %v", n.Left()) })
ok |= ctxStmt
if t.NumResults() == 0 {
break
}
ok |= ctxExpr
if t.NumResults() == 1 {
- n.Type = l.Type.Results().Field(0).Type
+ n.SetType(l.Type().Results().Field(0).Type)
- if n.Op == OCALLFUNC && n.Left.Op == ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && isRuntimePkg(n.Left().Sym().Pkg) && n.Left().Sym().Name == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
// so that the ordering pass can make sure to preserve the semantics of the original code
// (in particular, the exact time of the function call) by introducing temporaries.
// In this case, we know getg() always returns the same result within a given function
// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
- n.Op = OGETG
+ n.SetOp(ir.OGETG)
}
break
@@ -1394,228 +1398,228 @@ func typecheck1(n *Node, top int) (res *Node) {
// multiple return
if top&(ctxMultiOK|ctxStmt) == 0 {
- yyerror("multiple-value %v() in single-value context", l)
+ base.Errorf("multiple-value %v() in single-value context", l)
break
}
- n.Type = l.Type.Results()
+ n.SetType(l.Type().Results())
- case OALIGNOF, OOFFSETOF, OSIZEOF:
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Type = types.Types[TUINTPTR]
+ n.SetType(types.Types[types.TUINTPTR])
- case OCAP, OLEN:
+ case ir.OCAP, ir.OLEN:
ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Left = implicitstar(n.Left)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetLeft(implicitstar(n.Left()))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
var ok bool
- if n.Op == OLEN {
+ if n.Op() == ir.OLEN {
ok = okforlen[t.Etype]
} else {
ok = okforcap[t.Etype]
}
if !ok {
- yyerror("invalid argument %L for %v", l, n.Op)
- n.Type = nil
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
return n
}
- n.Type = types.Types[TINT]
+ n.SetType(types.Types[types.TINT])
- case OREAL, OIMAG:
+ case ir.OREAL, ir.OIMAG:
ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
// Determine result type.
switch t.Etype {
- case TIDEAL:
- n.Type = types.UntypedFloat
- case TCOMPLEX64:
- n.Type = types.Types[TFLOAT32]
- case TCOMPLEX128:
- n.Type = types.Types[TFLOAT64]
+ case types.TIDEAL:
+ n.SetType(types.UntypedFloat)
+ case types.TCOMPLEX64:
+ n.SetType(types.Types[types.TFLOAT32])
+ case types.TCOMPLEX128:
+ n.SetType(types.Types[types.TFLOAT64])
default:
- yyerror("invalid argument %L for %v", l, n.Op)
- n.Type = nil
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
return n
}
- case OCOMPLEX:
+ case ir.OCOMPLEX:
ok |= ctxExpr
typecheckargs(n)
if !twoarg(n) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- l := n.Left
- r := n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ l := n.Left()
+ r := n.Right()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
l, r = defaultlit2(l, r, false)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- n.Left = l
- n.Right = r
+ n.SetLeft(l)
+ n.SetRight(r)
- if !types.Identical(l.Type, r.Type) {
- yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
- n.Type = nil
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
return n
}
var t *types.Type
- switch l.Type.Etype {
+ switch l.Type().Etype {
default:
- yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
- n.Type = nil
+ base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
+ n.SetType(nil)
return n
- case TIDEAL:
+ case types.TIDEAL:
t = types.UntypedComplex
- case TFLOAT32:
- t = types.Types[TCOMPLEX64]
+ case types.TFLOAT32:
+ t = types.Types[types.TCOMPLEX64]
- case TFLOAT64:
- t = types.Types[TCOMPLEX128]
+ case types.TFLOAT64:
+ t = types.Types[types.TCOMPLEX128]
}
- n.Type = t
+ n.SetType(t)
- case OCLOSE:
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ case ir.OCLOSE:
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
- yyerror("invalid operation: %v (non-chan type %v)", n, t)
- n.Type = nil
+ base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanSend() {
- yyerror("invalid operation: %v (cannot close receive-only channel)", n)
- n.Type = nil
+ base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
+ n.SetType(nil)
return n
}
ok |= ctxStmt
- case ODELETE:
+ case ir.ODELETE:
ok |= ctxStmt
typecheckargs(n)
- args := n.List
+ args := n.List()
if args.Len() == 0 {
- yyerror("missing arguments to delete")
- n.Type = nil
+ base.Errorf("missing arguments to delete")
+ n.SetType(nil)
return n
}
if args.Len() == 1 {
- yyerror("missing second (key) argument to delete")
- n.Type = nil
+ base.Errorf("missing second (key) argument to delete")
+ n.SetType(nil)
return n
}
if args.Len() != 2 {
- yyerror("too many arguments to delete")
- n.Type = nil
+ base.Errorf("too many arguments to delete")
+ n.SetType(nil)
return n
}
l := args.First()
r := args.Second()
- if l.Type != nil && !l.Type.IsMap() {
- yyerror("first argument to delete must be map; have %L", l.Type)
- n.Type = nil
+ if l.Type() != nil && !l.Type().IsMap() {
+ base.Errorf("first argument to delete must be map; have %L", l.Type())
+ n.SetType(nil)
return n
}
- args.SetSecond(assignconv(r, l.Type.Key(), "delete"))
+ args.SetSecond(assignconv(r, l.Type().Key(), "delete"))
- case OAPPEND:
+ case ir.OAPPEND:
ok |= ctxExpr
typecheckargs(n)
- args := n.List
+ args := n.List()
if args.Len() == 0 {
- yyerror("missing arguments to append")
- n.Type = nil
+ base.Errorf("missing arguments to append")
+ n.SetType(nil)
return n
}
- t := args.First().Type
+ t := args.First().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
if !t.IsSlice() {
- if Isconst(args.First(), CTNIL) {
- yyerror("first argument to append must be typed slice; have untyped nil")
- n.Type = nil
+ if ir.IsNil(args.First()) {
+ base.Errorf("first argument to append must be typed slice; have untyped nil")
+ n.SetType(nil)
return n
}
- yyerror("first argument to append must be slice; have %L", t)
- n.Type = nil
+ base.Errorf("first argument to append must be slice; have %L", t)
+ n.SetType(nil)
return n
}
if n.IsDDD() {
if args.Len() == 1 {
- yyerror("cannot use ... on first argument to append")
- n.Type = nil
+ base.Errorf("cannot use ... on first argument to append")
+ n.SetType(nil)
return n
}
if args.Len() != 2 {
- yyerror("too many arguments to append")
- n.Type = nil
+ base.Errorf("too many arguments to append")
+ n.SetType(nil)
return n
}
- if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() {
- args.SetSecond(defaultlit(args.Second(), types.Types[TSTRING]))
+ if t.Elem().IsKind(types.TUINT8) && args.Second().Type().IsString() {
+ args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING]))
break
}
@@ -1625,464 +1629,464 @@ func typecheck1(n *Node, top int) (res *Node) {
as := args.Slice()[1:]
for i, n := range as {
- if n.Type == nil {
+ if n.Type() == nil {
continue
}
as[i] = assignconv(n, t.Elem(), "append")
- checkwidth(as[i].Type) // ensure width is calculated for backend
+ checkwidth(as[i].Type()) // ensure width is calculated for backend
}
- case OCOPY:
+ case ir.OCOPY:
ok |= ctxStmt | ctxExpr
typecheckargs(n)
if !twoarg(n) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = types.Types[TINT]
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ n.SetType(types.Types[types.TINT])
+ if n.Left().Type() == nil || n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
- n.Left = defaultlit(n.Left, nil)
- n.Right = defaultlit(n.Right, nil)
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetRight(defaultlit(n.Right(), nil))
+ if n.Left().Type() == nil || n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
// copy([]byte, string)
- if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
- if types.Identical(n.Left.Type.Elem(), types.Bytetype) {
+ if n.Left().Type().IsSlice() && n.Right().Type().IsString() {
+ if types.Identical(n.Left().Type().Elem(), types.Bytetype) {
break
}
- yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
- n.Type = nil
+ base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type())
+ n.SetType(nil)
return n
}
- if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() {
- if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() {
- yyerror("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type)
- } else if !n.Left.Type.IsSlice() {
- yyerror("first argument to copy should be slice; have %L", n.Left.Type)
+ if !n.Left().Type().IsSlice() || !n.Right().Type().IsSlice() {
+ if !n.Left().Type().IsSlice() && !n.Right().Type().IsSlice() {
+ base.Errorf("arguments to copy must be slices; have %L, %L", n.Left().Type(), n.Right().Type())
+ } else if !n.Left().Type().IsSlice() {
+ base.Errorf("first argument to copy should be slice; have %L", n.Left().Type())
} else {
- yyerror("second argument to copy should be slice or string; have %L", n.Right.Type)
+ base.Errorf("second argument to copy should be slice or string; have %L", n.Right().Type())
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) {
- yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
- n.Type = nil
+ if !types.Identical(n.Left().Type().Elem(), n.Right().Type().Elem()) {
+ base.Errorf("arguments to copy have different element types: %L and %L", n.Left().Type(), n.Right().Type())
+ n.SetType(nil)
return n
}
- case OCONV:
+ case ir.OCONV:
ok |= ctxExpr
- checkwidth(n.Type) // ensure width is calculated for backend
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = convlit1(n.Left, n.Type, true, nil)
- t := n.Left.Type
- if t == nil || n.Type == nil {
- n.Type = nil
- return n
- }
- var why string
- n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type)
- if n.Op == OXXX {
- if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
- yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
+ checkwidth(n.Type()) // ensure width is calculated for backend
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(convlit1(n.Left(), n.Type(), true, nil))
+ t := n.Left().Type()
+ if t == nil || n.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ op, why := convertop(n.Left().Op() == ir.OLITERAL, t, n.Type())
+ n.SetOp(op)
+ if n.Op() == ir.OXXX {
+ if !n.Diag() && !n.Type().Broke() && !n.Left().Diag() {
+ base.Errorf("cannot convert %L to type %v%s", n.Left(), n.Type(), why)
n.SetDiag(true)
}
- n.Op = OCONV
- n.Type = nil
+ n.SetOp(ir.OCONV)
+ n.SetType(nil)
return n
}
- switch n.Op {
- case OCONVNOP:
- if t.Etype == n.Type.Etype {
+ switch n.Op() {
+ case ir.OCONVNOP:
+ if t.Etype == n.Type().Etype {
switch t.Etype {
- case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
// Floating point casts imply rounding and
// so the conversion must be kept.
- n.Op = OCONV
+ n.SetOp(ir.OCONV)
}
}
// do not convert to []byte literal. See CL 125796.
// generated code and compiler memory footprint is better without it.
- case OSTR2BYTES:
+ case ir.OSTR2BYTES:
break
- case OSTR2RUNES:
- if n.Left.Op == OLITERAL {
+ case ir.OSTR2RUNES:
+ if n.Left().Op() == ir.OLITERAL {
n = stringtoruneslit(n)
}
}
- case OMAKE:
+ case ir.OMAKE:
ok |= ctxExpr
- args := n.List.Slice()
+ args := n.List().Slice()
if len(args) == 0 {
- yyerror("missing argument to make")
- n.Type = nil
+ base.Errorf("missing argument to make")
+ n.SetType(nil)
return n
}
- n.List.Set(nil)
+ n.PtrList().Set(nil)
l := args[0]
l = typecheck(l, ctxType)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
i := 1
switch t.Etype {
default:
- yyerror("cannot make type %v", t)
- n.Type = nil
+ base.Errorf("cannot make type %v", t)
+ n.SetType(nil)
return n
- case TSLICE:
+ case types.TSLICE:
if i >= len(args) {
- yyerror("missing len argument to make(%v)", t)
- n.Type = nil
+ base.Errorf("missing len argument to make(%v)", t)
+ n.SetType(nil)
return n
}
l = args[i]
i++
l = typecheck(l, ctxExpr)
- var r *Node
+ var r ir.Node
if i < len(args) {
r = args[i]
i++
r = typecheck(r, ctxExpr)
}
- if l.Type == nil || (r != nil && r.Type == nil) {
- n.Type = nil
+ if l.Type() == nil || (r != nil && r.Type() == nil) {
+ n.SetType(nil)
return n
}
if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && l.Val().U.(*Mpint).Cmp(r.Val().U.(*Mpint)) > 0 {
- yyerror("len larger than cap in make(%v)", t)
- n.Type = nil
+ if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
+ base.Errorf("len larger than cap in make(%v)", t)
+ n.SetType(nil)
return n
}
- n.Left = l
- n.Right = r
- n.Op = OMAKESLICE
+ n.SetLeft(l)
+ n.SetRight(r)
+ n.SetOp(ir.OMAKESLICE)
- case TMAP:
+ case types.TMAP:
if i < len(args) {
l = args[i]
i++
l = typecheck(l, ctxExpr)
- l = defaultlit(l, types.Types[TINT])
- if l.Type == nil {
- n.Type = nil
+ l = defaultlit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
if !checkmake(t, "size", &l) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
+ n.SetLeft(l)
} else {
- n.Left = nodintconst(0)
+ n.SetLeft(nodintconst(0))
}
- n.Op = OMAKEMAP
+ n.SetOp(ir.OMAKEMAP)
- case TCHAN:
+ case types.TCHAN:
l = nil
if i < len(args) {
l = args[i]
i++
l = typecheck(l, ctxExpr)
- l = defaultlit(l, types.Types[TINT])
- if l.Type == nil {
- n.Type = nil
+ l = defaultlit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
if !checkmake(t, "buffer", &l) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
+ n.SetLeft(l)
} else {
- n.Left = nodintconst(0)
+ n.SetLeft(nodintconst(0))
}
- n.Op = OMAKECHAN
+ n.SetOp(ir.OMAKECHAN)
}
if i < len(args) {
- yyerror("too many arguments to make(%v)", t)
- n.Op = OMAKE
- n.Type = nil
+ base.Errorf("too many arguments to make(%v)", t)
+ n.SetOp(ir.OMAKE)
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
- case ONEW:
+ case ir.ONEW:
ok |= ctxExpr
- args := n.List
+ args := n.List()
if args.Len() == 0 {
- yyerror("missing argument to new")
- n.Type = nil
+ base.Errorf("missing argument to new")
+ n.SetType(nil)
return n
}
l := args.First()
l = typecheck(l, ctxType)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if args.Len() > 1 {
- yyerror("too many arguments to new(%v)", t)
- n.Type = nil
+ base.Errorf("too many arguments to new(%v)", t)
+ n.SetType(nil)
return n
}
- n.Left = l
- n.Type = types.NewPtr(t)
+ n.SetLeft(l)
+ n.SetType(types.NewPtr(t))
- case OPRINT, OPRINTN:
+ case ir.OPRINT, ir.OPRINTN:
ok |= ctxStmt
typecheckargs(n)
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
// Special case for print: int constant is int64, not int.
- if Isconst(n1, CTINT) {
- ls[i1] = defaultlit(ls[i1], types.Types[TINT64])
+ if ir.IsConst(n1, constant.Int) {
+ ls[i1] = defaultlit(ls[i1], types.Types[types.TINT64])
} else {
ls[i1] = defaultlit(ls[i1], nil)
}
}
- case OPANIC:
+ case ir.OPANIC:
ok |= ctxStmt
if !onearg(n, "panic") {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, types.Types[TINTER])
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER]))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
- case ORECOVER:
+ case ir.ORECOVER:
ok |= ctxExpr | ctxStmt
- if n.List.Len() != 0 {
- yyerror("too many arguments to recover")
- n.Type = nil
+ if n.List().Len() != 0 {
+ base.Errorf("too many arguments to recover")
+ n.SetType(nil)
return n
}
- n.Type = types.Types[TINTER]
+ n.SetType(types.Types[types.TINTER])
- case OCLOSURE:
+ case ir.OCLOSURE:
ok |= ctxExpr
typecheckclosure(n, top)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
- case OITAB:
+ case ir.OITAB:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsInterface() {
- Fatalf("OITAB of %v", t)
+ base.Fatalf("OITAB of %v", t)
}
- n.Type = types.NewPtr(types.Types[TUINTPTR])
+ n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
- case OIDATA:
+ case ir.OIDATA:
// Whoever creates the OIDATA node must know a priori the concrete type at that moment,
// usually by just having checked the OITAB.
- Fatalf("cannot typecheck interface data %v", n)
+ base.Fatalf("cannot typecheck interface data %v", n)
- case OSPTR:
+ case ir.OSPTR:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsSlice() && !t.IsString() {
- Fatalf("OSPTR of %v", t)
+ base.Fatalf("OSPTR of %v", t)
}
if t.IsString() {
- n.Type = types.NewPtr(types.Types[TUINT8])
+ n.SetType(types.NewPtr(types.Types[types.TUINT8]))
} else {
- n.Type = types.NewPtr(t.Elem())
+ n.SetType(types.NewPtr(t.Elem()))
}
- case OCLOSUREVAR:
+ case ir.OCLOSUREVAR:
ok |= ctxExpr
- case OCFUNC:
+ case ir.OCFUNC:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Type = types.Types[TUINTPTR]
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetType(types.Types[types.TUINTPTR])
- case OCONVNOP:
+ case ir.OCONVNOP:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
// statements
- case OAS:
+ case ir.OAS:
ok |= ctxStmt
typecheckas(n)
// Code that creates temps does not bother to set defn, so do it here.
- if n.Left.Op == ONAME && n.Left.IsAutoTmp() {
- n.Left.Name.Defn = n
+ if n.Left().Op() == ir.ONAME && ir.IsAutoTmp(n.Left()) {
+ n.Left().Name().Defn = n
}
- case OAS2:
+ case ir.OAS2:
ok |= ctxStmt
typecheckas2(n)
- case OBREAK,
- OCONTINUE,
- ODCL,
- OEMPTY,
- OGOTO,
- OFALL,
- OVARKILL,
- OVARLIVE:
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.OEMPTY,
+ ir.OGOTO,
+ ir.OFALL,
+ ir.OVARKILL,
+ ir.OVARLIVE:
ok |= ctxStmt
- case OLABEL:
+ case ir.OLABEL:
ok |= ctxStmt
decldepth++
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
// Empty identifier is valid but useless.
// Eliminate now to simplify life later.
// See issues 7538, 11589, 11593.
- n.Op = OEMPTY
- n.Left = nil
+ n.SetOp(ir.OEMPTY)
+ n.SetLeft(nil)
}
- case ODEFER:
+ case ir.ODEFER:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
- if !n.Left.Diag() {
+ n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr))
+ if !n.Left().Diag() {
checkdefergo(n)
}
- case OGO:
+ case ir.OGO:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr))
checkdefergo(n)
- case OFOR, OFORUNTIL:
+ case ir.OFOR, ir.OFORUNTIL:
ok |= ctxStmt
- typecheckslice(n.Ninit.Slice(), ctxStmt)
+ typecheckslice(n.Init().Slice(), ctxStmt)
decldepth++
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- if n.Left != nil {
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left() != nil {
+ t := n.Left().Type()
if t != nil && !t.IsBoolean() {
- yyerror("non-bool %L used as for condition", n.Left)
+ base.Errorf("non-bool %L used as for condition", n.Left())
}
}
- n.Right = typecheck(n.Right, ctxStmt)
- if n.Op == OFORUNTIL {
- typecheckslice(n.List.Slice(), ctxStmt)
+ n.SetRight(typecheck(n.Right(), ctxStmt))
+ if n.Op() == ir.OFORUNTIL {
+ typecheckslice(n.List().Slice(), ctxStmt)
}
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
- case OIF:
+ case ir.OIF:
ok |= ctxStmt
- typecheckslice(n.Ninit.Slice(), ctxStmt)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- if n.Left != nil {
- t := n.Left.Type
+ typecheckslice(n.Init().Slice(), ctxStmt)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left() != nil {
+ t := n.Left().Type()
if t != nil && !t.IsBoolean() {
- yyerror("non-bool %L used as if condition", n.Left)
+ base.Errorf("non-bool %L used as if condition", n.Left())
}
}
- typecheckslice(n.Nbody.Slice(), ctxStmt)
- typecheckslice(n.Rlist.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
+ typecheckslice(n.Rlist().Slice(), ctxStmt)
- case ORETURN:
+ case ir.ORETURN:
ok |= ctxStmt
typecheckargs(n)
if Curfn == nil {
- yyerror("return outside function")
- n.Type = nil
+ base.Errorf("return outside function")
+ n.SetType(nil)
return n
}
- if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 {
+ if Curfn.Type().FuncType().Outnamed && n.List().Len() == 0 {
break
}
- typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" })
+ typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" })
- case ORETJMP:
+ case ir.ORETJMP:
ok |= ctxStmt
- case OSELECT:
+ case ir.OSELECT:
ok |= ctxStmt
typecheckselect(n)
- case OSWITCH:
+ case ir.OSWITCH:
ok |= ctxStmt
typecheckswitch(n)
- case ORANGE:
+ case ir.ORANGE:
ok |= ctxStmt
typecheckrange(n)
- case OTYPESW:
- yyerror("use of .(type) outside type switch")
- n.Type = nil
+ case ir.OTYPESW:
+ base.Errorf("use of .(type) outside type switch")
+ n.SetType(nil)
return n
- case ODCLFUNC:
+ case ir.ODCLFUNC:
ok |= ctxStmt
typecheckfunc(n)
- case ODCLCONST:
+ case ir.ODCLCONST:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
- case ODCLTYPE:
+ case ir.ODCLTYPE:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxType)
- checkwidth(n.Left.Type)
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ checkwidth(n.Left().Type())
}
- t := n.Type
- if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE {
+ t := n.Type()
+ if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
switch t.Etype {
- case TFUNC, // might have TANY; wait until it's called
- TANY, TFORW, TIDEAL, TNIL, TBLANK:
+ case types.TFUNC, // might have TANY; wait until it's called
+ types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
break
default:
@@ -2090,49 +2094,49 @@ func typecheck1(n *Node, top int) (res *Node) {
}
}
- evconst(n)
- if n.Op == OTYPE && top&ctxType == 0 {
- if !n.Type.Broke() {
- yyerror("type %v is not an expression", n.Type)
+ n = evalConst(n)
+ if n.Op() == ir.OTYPE && top&ctxType == 0 {
+ if !n.Type().Broke() {
+ base.Errorf("type %v is not an expression", n.Type())
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- if top&(ctxExpr|ctxType) == ctxType && n.Op != OTYPE {
- yyerror("%v is not a type", n)
- n.Type = nil
+ if top&(ctxExpr|ctxType) == ctxType && n.Op() != ir.OTYPE {
+ base.Errorf("%v is not a type", n)
+ n.SetType(nil)
return n
}
// TODO(rsc): simplify
if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 {
- yyerror("%v used as value", n)
- n.Type = nil
+ base.Errorf("%v used as value", n)
+ n.SetType(nil)
return n
}
if (top&ctxStmt != 0) && top&(ctxCallee|ctxExpr|ctxType) == 0 && ok&ctxStmt == 0 {
if !n.Diag() {
- yyerror("%v evaluated but not used", n)
+ base.Errorf("%v evaluated but not used", n)
n.SetDiag(true)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
return n
}
-func typecheckargs(n *Node) {
- if n.List.Len() != 1 || n.IsDDD() {
- typecheckslice(n.List.Slice(), ctxExpr)
+func typecheckargs(n ir.Node) {
+ if n.List().Len() != 1 || n.IsDDD() {
+ typecheckslice(n.List().Slice(), ctxExpr)
return
}
- typecheckslice(n.List.Slice(), ctxExpr|ctxMultiOK)
- t := n.List.First().Type
+ typecheckslice(n.List().Slice(), ctxExpr|ctxMultiOK)
+ t := n.List().First().Type()
if t == nil || !t.IsFuncArgStruct() {
return
}
@@ -2140,58 +2144,59 @@ func typecheckargs(n *Node) {
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
// Save n as n.Orig for fmt.go.
- if n.Orig == n {
- n.Orig = n.sepcopy()
+ if n.Orig() == n {
+ n.SetOrig(ir.SepCopy(n))
}
- as := nod(OAS2, nil, nil)
- as.Rlist.AppendNodes(&n.List)
+ as := ir.Nod(ir.OAS2, nil, nil)
+ as.PtrRlist().AppendNodes(n.PtrList())
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
- // temporary variables with dummyInitFn for now, and init.go
+ // temporary variables with initTodo for now, and init.go
// will reassociate them later when it's appropriate.
static := Curfn == nil
if static {
- Curfn = dummyInitFn
+ Curfn = initTodo
}
for _, f := range t.FieldSlice() {
t := temp(f.Type)
- as.Ninit.Append(nod(ODCL, t, nil))
- as.List.Append(t)
- n.List.Append(t)
+ as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil))
+ as.PtrList().Append(t)
+ n.PtrList().Append(t)
}
if static {
Curfn = nil
}
as = typecheck(as, ctxStmt)
- n.Ninit.Append(as)
+ n.PtrInit().Append(as)
}
-func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
- t := r.Type
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
+ t := r.Type()
if t == nil {
return false
}
if !t.IsInteger() {
- yyerror("invalid slice index %v (type %v)", r, t)
+ base.Errorf("invalid slice index %v (type %v)", r, t)
return false
}
- if r.Op == OLITERAL {
- if r.Int64Val() < 0 {
- yyerror("invalid slice index %v (index must be non-negative)", r)
+ if r.Op() == ir.OLITERAL {
+ x := r.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid slice index %v (index must be non-negative)", r)
return false
- } else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() {
- yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
+ } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
return false
- } else if Isconst(l, CTSTR) && r.Int64Val() > int64(len(l.StringVal())) {
- yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal()))
+ } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(l.StringVal())))) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal()))
return false
- } else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- yyerror("invalid slice index %v (index too large)", r)
+ } else if doesoverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid slice index %v (index too large)", r)
return false
}
}
@@ -2199,57 +2204,57 @@ func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
return true
}
-func checksliceconst(lo *Node, hi *Node) bool {
- if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && lo.Val().U.(*Mpint).Cmp(hi.Val().U.(*Mpint)) > 0 {
- yyerror("invalid slice index: %v > %v", lo, hi)
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
+ if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
+ base.Errorf("invalid slice index: %v > %v", lo, hi)
return false
}
return true
}
-func checkdefergo(n *Node) {
+func checkdefergo(n ir.Node) {
what := "defer"
- if n.Op == OGO {
+ if n.Op() == ir.OGO {
what = "go"
}
- switch n.Left.Op {
+ switch n.Left().Op() {
// ok
- case OCALLINTER,
- OCALLMETH,
- OCALLFUNC,
- OCLOSE,
- OCOPY,
- ODELETE,
- OPANIC,
- OPRINT,
- OPRINTN,
- ORECOVER:
+ case ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCALLFUNC,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.ORECOVER:
return
- case OAPPEND,
- OCAP,
- OCOMPLEX,
- OIMAG,
- OLEN,
- OMAKE,
- OMAKESLICE,
- OMAKECHAN,
- OMAKEMAP,
- ONEW,
- OREAL,
- OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
- if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
+ case ir.OAPPEND,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.OMAKESLICE,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.ONEW,
+ ir.OREAL,
+ ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+ if n.Left().Orig() != nil && n.Left().Orig().Op() == ir.OCONV {
break
}
- yyerrorl(n.Pos, "%s discards result of %v", what, n.Left)
+ base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Left())
return
}
// type is broken or missing, most likely a method call on a broken type
// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
- if n.Left.Type == nil || n.Left.Type.Broke() {
+ if n.Left().Type() == nil || n.Left().Type().Broke() {
return
}
@@ -2257,15 +2262,15 @@ func checkdefergo(n *Node) {
// The syntax made sure it was a call, so this must be
// a conversion.
n.SetDiag(true)
- yyerrorl(n.Pos, "%s requires function call, not conversion", what)
+ base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what)
}
}
// The result of implicitstar MUST be assigned back to n, e.g.
// n.Left = implicitstar(n.Left)
-func implicitstar(n *Node) *Node {
+func implicitstar(n ir.Node) ir.Node {
// insert implicit * if needed for fixed array
- t := n.Type
+ t := n.Type()
if t == nil || !t.IsPtr() {
return n
}
@@ -2276,54 +2281,54 @@ func implicitstar(n *Node) *Node {
if !t.IsArray() {
return n
}
- n = nod(ODEREF, n, nil)
+ n = ir.Nod(ir.ODEREF, n, nil)
n.SetImplicit(true)
n = typecheck(n, ctxExpr)
return n
}
-func onearg(n *Node, f string, args ...interface{}) bool {
- if n.Left != nil {
+func onearg(n ir.Node, f string, args ...interface{}) bool {
+ if n.Left() != nil {
return true
}
- if n.List.Len() == 0 {
+ if n.List().Len() == 0 {
p := fmt.Sprintf(f, args...)
- yyerror("missing argument to %s: %v", p, n)
+ base.Errorf("missing argument to %s: %v", p, n)
return false
}
- if n.List.Len() > 1 {
+ if n.List().Len() > 1 {
p := fmt.Sprintf(f, args...)
- yyerror("too many arguments to %s: %v", p, n)
- n.Left = n.List.First()
- n.List.Set(nil)
+ base.Errorf("too many arguments to %s: %v", p, n)
+ n.SetLeft(n.List().First())
+ n.PtrList().Set(nil)
return false
}
- n.Left = n.List.First()
- n.List.Set(nil)
+ n.SetLeft(n.List().First())
+ n.PtrList().Set(nil)
return true
}
-func twoarg(n *Node) bool {
- if n.Left != nil {
+func twoarg(n ir.Node) bool {
+ if n.Left() != nil {
return true
}
- if n.List.Len() != 2 {
- if n.List.Len() < 2 {
- yyerror("not enough arguments in call to %v", n)
+ if n.List().Len() != 2 {
+ if n.List().Len() < 2 {
+ base.Errorf("not enough arguments in call to %v", n)
} else {
- yyerror("too many arguments in call to %v", n)
+ base.Errorf("too many arguments in call to %v", n)
}
return false
}
- n.Left = n.List.First()
- n.Right = n.List.Second()
- n.List.Set(nil)
+ n.SetLeft(n.List().First())
+ n.SetRight(n.List().Second())
+ n.PtrList().Set(nil)
return true
}
-func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
var r *types.Field
for _, f := range fs.Slice() {
if dostrcmp != 0 && f.Sym.Name == s.Name {
@@ -2337,11 +2342,11 @@ func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dost
}
if r != nil {
if errnode != nil {
- yyerror("ambiguous selector %v", errnode)
+ base.Errorf("ambiguous selector %v", errnode)
} else if t.IsPtr() {
- yyerror("ambiguous selector (%v).%v", t, s)
+ base.Errorf("ambiguous selector (%v).%v", t, s)
} else {
- yyerror("ambiguous selector %v.%v", t, s)
+ base.Errorf("ambiguous selector %v.%v", t, s)
}
break
}
@@ -2354,12 +2359,12 @@ func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dost
// typecheckMethodExpr checks selector expressions (ODOT) where the
// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *Node) (res *Node) {
- if enableTrace && trace {
+func typecheckMethodExpr(n ir.Node) (res ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckMethodExpr", n)(&res)
}
- t := n.Left.Type
+ t := n.Left().Type()
// Compute the method set for t.
var ms *types.Fields
@@ -2368,8 +2373,8 @@ func typecheckMethodExpr(n *Node) (res *Node) {
} else {
mt := methtype(t)
if mt == nil {
- yyerror("%v undefined (type %v has no method %v)", n, t, n.Sym)
- n.Type = nil
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym())
+ n.SetType(nil)
return n
}
expandmeth(mt)
@@ -2387,40 +2392,41 @@ func typecheckMethodExpr(n *Node) (res *Node) {
}
}
- s := n.Sym
+ s := n.Sym()
m := lookdot1(n, s, t, ms, 0)
if m == nil {
if lookdot1(n, s, t, ms, 1) != nil {
- yyerror("%v undefined (cannot refer to unexported method %v)", n, s)
+ base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
} else if _, ambig := dotpath(s, t, nil, false); ambig {
- yyerror("%v undefined (ambiguous selector)", n) // method or field
+ base.Errorf("%v undefined (ambiguous selector)", n) // method or field
} else {
- yyerror("%v undefined (type %v has no method %v)", n, t, s)
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
if !isMethodApplicable(t, m) {
- yyerror("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
- n.Type = nil
+ base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
+ n.SetType(nil)
return n
}
- n.Op = ONAME
- if n.Name == nil {
- n.Name = new(Name)
+ n.SetOp(ir.OMETHEXPR)
+ if n.Name() == nil {
+ n.SetName(new(ir.Name))
}
- n.Right = newname(n.Sym)
- n.Sym = methodSym(t, n.Sym)
- n.Type = methodfunc(m.Type, n.Left.Type)
- n.Xoffset = 0
- n.SetClass(PFUNC)
+ n.SetRight(NewName(n.Sym()))
+ n.SetSym(methodSym(t, n.Sym()))
+ n.SetType(methodfunc(m.Type, n.Left().Type()))
+ n.SetOffset(0)
+ n.SetClass(ir.PFUNC)
+ n.SetOpt(m)
// methodSym already marked n.Sym as a function.
// Issue 25065. Make sure that we emit the symbol for a local method.
- if Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == localpkg) {
- makefuncsym(n.Sym)
+ if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == ir.LocalPkg) {
+ makefuncsym(n.Sym())
}
return n
@@ -2441,8 +2447,8 @@ func derefall(t *types.Type) *types.Type {
return t
}
-func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
- s := n.Sym
+func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
+ s := n.Sym()
dowidth(t)
var f1 *types.Field
@@ -2451,7 +2457,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
}
var f2 *types.Field
- if n.Left.Type == t || n.Left.Type.Sym == nil {
+ if n.Left().Type() == t || n.Left().Type().Sym == nil {
mt := methtype(t)
if mt != nil {
f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
@@ -2464,21 +2470,21 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
return f1
}
if f2 != nil {
- yyerror("%v is both field and method", n.Sym)
+ base.Errorf("%v is both field and method", n.Sym())
}
- if f1.Offset == BADWIDTH {
- Fatalf("lookdot badwidth %v %p", f1, f1)
+ if f1.Offset == types.BADWIDTH {
+ base.Fatalf("lookdot badwidth %v %p", f1, f1)
}
- n.Xoffset = f1.Offset
- n.Type = f1.Type
+ n.SetOffset(f1.Offset)
+ n.SetType(f1.Type)
if t.IsInterface() {
- if n.Left.Type.IsPtr() {
- n.Left = nod(ODEREF, n.Left, nil) // implicitstar
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxExpr)
+ if n.Left().Type().IsPtr() {
+ n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) // implicitstar
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
}
- n.Op = ODOTINTER
+ n.SetOp(ir.ODOTINTER)
} else {
n.SetOpt(f1)
}
@@ -2491,53 +2497,54 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
// Already in the process of diagnosing an error.
return f2
}
- tt := n.Left.Type
+ tt := n.Left().Type()
dowidth(tt)
rcvr := f2.Type.Recv().Type
if !types.Identical(rcvr, tt) {
if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
- checklvalue(n.Left, "call pointer method on")
- n.Left = nod(OADDR, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ checklvalue(n.Left(), "call pointer method on")
+ n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
- n.Left = nod(ODEREF, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
} else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
- yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
+ base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left())
for tt.IsPtr() {
// Stop one level early for method with pointer receiver.
if rcvr.IsPtr() && !tt.Elem().IsPtr() {
break
}
- n.Left = nod(ODEREF, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
tt = tt.Elem()
}
} else {
- Fatalf("method mismatch: %v for %v", rcvr, tt)
+ base.Fatalf("method mismatch: %v for %v", rcvr, tt)
}
}
pll := n
- ll := n.Left
- for ll.Left != nil && (ll.Op == ODOT || ll.Op == ODOTPTR || ll.Op == ODEREF) {
+ ll := n.Left()
+ for ll.Left() != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) {
pll = ll
- ll = ll.Left
+ ll = ll.Left()
}
- if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && asNode(ll.Type.Sym.Def) != nil && asNode(ll.Type.Sym.Def).Op == OTYPE {
+ if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym != nil && ir.AsNode(ll.Type().Sym.Def) != nil && ir.AsNode(ll.Type().Sym.Def).Op() == ir.OTYPE {
// It is invalid to automatically dereference a named pointer type when selecting a method.
// Make n.Left == ll to clarify error message.
- n.Left = ll
+ n.SetLeft(ll)
return nil
}
- n.Sym = methodSym(n.Left.Type, f2.Sym)
- n.Xoffset = f2.Offset
- n.Type = f2.Type
- n.Op = ODOTMETH
+ n.SetSym(methodSym(n.Left().Type(), f2.Sym))
+ n.SetOffset(f2.Offset)
+ n.SetType(f2.Type)
+ n.SetOp(ir.ODOTMETH)
+ n.SetOpt(f2)
return f2
}
@@ -2545,9 +2552,9 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
return nil
}
-func nokeys(l Nodes) bool {
+func nokeys(l ir.Nodes) bool {
for _, n := range l.Slice() {
- if n.Op == OKEY || n.Op == OSTRUCTKEY {
+ if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
return false
}
}
@@ -2565,18 +2572,18 @@ func hasddd(t *types.Type) bool {
}
// typecheck assignment: type list = expression list
-func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, desc func() string) {
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
var t *types.Type
var i int
- lno := lineno
- defer func() { lineno = lno }()
+ lno := base.Pos
+ defer func() { base.Pos = lno }()
if tstruct.Broke() {
return
}
- var n *Node
+ var n ir.Node
if nl.Len() == 1 {
n = nl.First()
}
@@ -2618,7 +2625,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes,
}
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t, desc))
}
return
@@ -2628,7 +2635,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes,
for ; i < nl.Len(); i++ {
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t.Elem(), desc))
}
}
@@ -2640,7 +2647,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes,
}
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t, desc))
}
i++
@@ -2651,27 +2658,27 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes,
}
if isddd {
if call != nil {
- yyerror("invalid use of ... in call to %v", call)
+ base.Errorf("invalid use of ... in call to %v", call)
} else {
- yyerror("invalid use of ... in %v", op)
+ base.Errorf("invalid use of ... in %v", op)
}
}
return
notenough:
- if n == nil || (!n.Diag() && n.Type != nil) {
+ if n == nil || (!n.Diag() && n.Type() != nil) {
details := errorDetails(nl, tstruct, isddd)
if call != nil {
// call is the expression being called, not the overall call.
// Method expressions have the form T.M, and the compiler has
// rewritten those to ONAME nodes but left T in Left.
- if call.isMethodExpression() {
- yyerror("not enough arguments in call to method expression %v%s", call, details)
+ if call.Op() == ir.OMETHEXPR {
+ base.Errorf("not enough arguments in call to method expression %v%s", call, details)
} else {
- yyerror("not enough arguments in call to %v%s", call, details)
+ base.Errorf("not enough arguments in call to %v%s", call, details)
}
} else {
- yyerror("not enough arguments to %v%s", op, details)
+ base.Errorf("not enough arguments to %v%s", op, details)
}
if n != nil {
n.SetDiag(true)
@@ -2682,13 +2689,13 @@ notenough:
toomany:
details := errorDetails(nl, tstruct, isddd)
if call != nil {
- yyerror("too many arguments in call to %v%s", call, details)
+ base.Errorf("too many arguments in call to %v%s", call, details)
} else {
- yyerror("too many arguments to %v%s", op, details)
+ base.Errorf("too many arguments to %v%s", op, details)
}
}
-func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
+func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
// If we don't know any type at a call site, let's suppress any return
// message signatures. See Issue https://golang.org/issues/19012.
if tstruct == nil {
@@ -2696,11 +2703,11 @@ func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
}
// If any node has an unknown type, suppress it as well
for _, n := range nl.Slice() {
- if n.Type == nil {
+ if n.Type() == nil {
return ""
}
}
- return fmt.Sprintf("\n\thave %s\n\twant %v", nl.sigerr(isddd), tstruct)
+ return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
}
// sigrepr is a type's representation to the outside world,
@@ -2714,7 +2721,7 @@ func sigrepr(t *types.Type, isddd bool) string {
return "bool"
}
- if t.Etype == TIDEAL {
+ if t.Etype == types.TIDEAL {
// "untyped number" is not commonly used
// outside of the compiler, so let's use "number".
// TODO(mdempsky): Revisit this.
@@ -2724,7 +2731,7 @@ func sigrepr(t *types.Type, isddd bool) string {
// Turn []T... argument to ...T for clearer error message.
if isddd {
if !t.IsSlice() {
- Fatalf("bad type for ... argument: %v", t)
+ base.Fatalf("bad type for ... argument: %v", t)
}
return "..." + t.Elem().String()
}
@@ -2732,7 +2739,7 @@ func sigrepr(t *types.Type, isddd bool) string {
}
// sigerr returns the signature of the types at the call or return.
-func (nl Nodes) sigerr(isddd bool) string {
+func fmtSignature(nl ir.Nodes, isddd bool) string {
if nl.Len() < 1 {
return "()"
}
@@ -2740,7 +2747,7 @@ func (nl Nodes) sigerr(isddd bool) string {
var typeStrings []string
for i, n := range nl.Slice() {
isdddArg := isddd && i == nl.Len()-1
- typeStrings = append(typeStrings, sigrepr(n.Type, isdddArg))
+ typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
}
return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
@@ -2749,7 +2756,7 @@ func (nl Nodes) sigerr(isddd bool) string {
// type check composite
func fielddup(name string, hash map[string]bool) {
if hash[name] {
- yyerror("duplicate field name in struct literal: %s", name)
+ base.Errorf("duplicate field name in struct literal: %s", name)
return
}
hash[name] = true
@@ -2758,7 +2765,7 @@ func fielddup(name string, hash map[string]bool) {
// iscomptype reports whether type t is a composite literal type.
func iscomptype(t *types.Type) bool {
switch t.Etype {
- case TARRAY, TSLICE, TSTRUCT, TMAP:
+ case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP:
return true
default:
return false
@@ -2767,21 +2774,21 @@ func iscomptype(t *types.Type) bool {
// pushtype adds elided type information for composite literals if
// appropriate, and returns the resulting expression.
-func pushtype(n *Node, t *types.Type) *Node {
- if n == nil || n.Op != OCOMPLIT || n.Right != nil {
+func pushtype(n ir.Node, t *types.Type) ir.Node {
+ if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil {
return n
}
switch {
case iscomptype(t):
// For T, return T{...}.
- n.Right = typenod(t)
+ n.SetRight(typenod(t))
case t.IsPtr() && iscomptype(t.Elem()):
// For *T, return &T{...}.
- n.Right = typenod(t.Elem())
+ n.SetRight(typenod(t.Elem()))
- n = nodl(n.Pos, OADDR, n, nil)
+ n = ir.NodAt(n.Pos(), ir.OADDR, n, nil)
n.SetImplicit(true)
}
@@ -2790,107 +2797,107 @@ func pushtype(n *Node, t *types.Type) *Node {
// The result of typecheckcomplit MUST be assigned back to n, e.g.
// n.Left = typecheckcomplit(n.Left)
-func typecheckcomplit(n *Node) (res *Node) {
- if enableTrace && trace {
+func typecheckcomplit(n ir.Node) (res ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckcomplit", n)(&res)
}
- lno := lineno
+ lno := base.Pos
defer func() {
- lineno = lno
+ base.Pos = lno
}()
- if n.Right == nil {
- yyerrorl(n.Pos, "missing type in composite literal")
- n.Type = nil
+ if n.Right() == nil {
+ base.ErrorfAt(n.Pos(), "missing type in composite literal")
+ n.SetType(nil)
return n
}
// Save original node (including n.Right)
- n.Orig = n.copy()
+ n.SetOrig(ir.Copy(n))
- setlineno(n.Right)
+ setlineno(n.Right())
// Need to handle [...]T arrays specially.
- if n.Right.Op == OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ODDD {
- n.Right.Right = typecheck(n.Right.Right, ctxType)
- if n.Right.Right.Type == nil {
- n.Type = nil
+ if n.Right().Op() == ir.OTARRAY && n.Right().Left() != nil && n.Right().Left().Op() == ir.ODDD {
+ n.Right().SetRight(typecheck(n.Right().Right(), ctxType))
+ if n.Right().Right().Type() == nil {
+ n.SetType(nil)
return n
}
- elemType := n.Right.Right.Type
+ elemType := n.Right().Right().Type()
- length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal")
+ length := typecheckarraylit(elemType, -1, n.List().Slice(), "array literal")
- n.Op = OARRAYLIT
- n.Type = types.NewArray(elemType, length)
- n.Right = nil
+ n.SetOp(ir.OARRAYLIT)
+ n.SetType(types.NewArray(elemType, length))
+ n.SetRight(nil)
return n
}
- n.Right = typecheck(n.Right, ctxType)
- t := n.Right.Type
+ n.SetRight(typecheck(n.Right(), ctxType))
+ t := n.Right().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
switch t.Etype {
default:
- yyerror("invalid composite literal type %v", t)
- n.Type = nil
+ base.Errorf("invalid composite literal type %v", t)
+ n.SetType(nil)
- case TARRAY:
- typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal")
- n.Op = OARRAYLIT
- n.Right = nil
+ case types.TARRAY:
+ typecheckarraylit(t.Elem(), t.NumElem(), n.List().Slice(), "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.SetRight(nil)
- case TSLICE:
- length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal")
- n.Op = OSLICELIT
- n.Right = nodintconst(length)
+ case types.TSLICE:
+ length := typecheckarraylit(t.Elem(), -1, n.List().Slice(), "slice literal")
+ n.SetOp(ir.OSLICELIT)
+ n.SetRight(nodintconst(length))
- case TMAP:
+ case types.TMAP:
var cs constSet
- for i3, l := range n.List.Slice() {
+ for i3, l := range n.List().Slice() {
setlineno(l)
- if l.Op != OKEY {
- n.List.SetIndex(i3, typecheck(l, ctxExpr))
- yyerror("missing key in map literal")
+ if l.Op() != ir.OKEY {
+ n.List().SetIndex(i3, typecheck(l, ctxExpr))
+ base.Errorf("missing key in map literal")
continue
}
- r := l.Left
+ r := l.Left()
r = pushtype(r, t.Key())
r = typecheck(r, ctxExpr)
- l.Left = assignconv(r, t.Key(), "map key")
- cs.add(lineno, l.Left, "key", "map literal")
+ l.SetLeft(assignconv(r, t.Key(), "map key"))
+ cs.add(base.Pos, l.Left(), "key", "map literal")
- r = l.Right
+ r = l.Right()
r = pushtype(r, t.Elem())
r = typecheck(r, ctxExpr)
- l.Right = assignconv(r, t.Elem(), "map value")
+ l.SetRight(assignconv(r, t.Elem(), "map value"))
}
- n.Op = OMAPLIT
- n.Right = nil
+ n.SetOp(ir.OMAPLIT)
+ n.SetRight(nil)
- case TSTRUCT:
+ case types.TSTRUCT:
// Need valid field offsets for Xoffset below.
dowidth(t)
errored := false
- if n.List.Len() != 0 && nokeys(n.List) {
+ if n.List().Len() != 0 && nokeys(n.List()) {
// simple list of variables
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i, n1 := range ls {
setlineno(n1)
n1 = typecheck(n1, ctxExpr)
ls[i] = n1
if i >= t.NumFields() {
if !errored {
- yyerror("too many values in %v", n)
+ base.Errorf("too many values in %v", n)
errored = true
}
continue
@@ -2898,81 +2905,81 @@ func typecheckcomplit(n *Node) (res *Node) {
f := t.Field(i)
s := f.Sym
- if s != nil && !types.IsExported(s.Name) && s.Pkg != localpkg {
- yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != ir.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
}
// No pushtype allowed here. Must name fields for that.
n1 = assignconv(n1, f.Type, "field value")
- n1 = nodSym(OSTRUCTKEY, n1, f.Sym)
- n1.Xoffset = f.Offset
+ n1 = nodSym(ir.OSTRUCTKEY, n1, f.Sym)
+ n1.SetOffset(f.Offset)
ls[i] = n1
}
if len(ls) < t.NumFields() {
- yyerror("too few values in %v", n)
+ base.Errorf("too few values in %v", n)
}
} else {
hash := make(map[string]bool)
// keyed list
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i, l := range ls {
setlineno(l)
- if l.Op == OKEY {
- key := l.Left
+ if l.Op() == ir.OKEY {
+ key := l.Left()
- l.Op = OSTRUCTKEY
- l.Left = l.Right
- l.Right = nil
+ l.SetOp(ir.OSTRUCTKEY)
+ l.SetLeft(l.Right())
+ l.SetRight(nil)
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
// so s will be non-nil, but an OXDOT
// is never a valid struct literal key.
- if key.Sym == nil || key.Op == OXDOT || key.Sym.IsBlank() {
- yyerror("invalid field name %v in struct initializer", key)
- l.Left = typecheck(l.Left, ctxExpr)
+ if key.Sym() == nil || key.Op() == ir.OXDOT || key.Sym().IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
+ l.SetLeft(typecheck(l.Left(), ctxExpr))
continue
}
// Sym might have resolved to name in other top-level
// package, because of import dot. Redirect to correct sym
// before we do the lookup.
- s := key.Sym
- if s.Pkg != localpkg && types.IsExported(s.Name) {
+ s := key.Sym()
+ if s.Pkg != ir.LocalPkg && types.IsExported(s.Name) {
s1 := lookup(s.Name)
if s1.Origpkg == s.Pkg {
s = s1
}
}
- l.Sym = s
+ l.SetSym(s)
}
- if l.Op != OSTRUCTKEY {
+ if l.Op() != ir.OSTRUCTKEY {
if !errored {
- yyerror("mixture of field:value and value initializers")
+ base.Errorf("mixture of field:value and value initializers")
errored = true
}
ls[i] = typecheck(ls[i], ctxExpr)
continue
}
- f := lookdot1(nil, l.Sym, t, t.Fields(), 0)
+ f := lookdot1(nil, l.Sym(), t, t.Fields(), 0)
if f == nil {
- if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if ci := lookdot1(nil, l.Sym(), t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
if visible(ci.Sym) {
- yyerror("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym)
- } else if nonexported(l.Sym) && l.Sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
- yyerror("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym(), t, ci.Sym)
+ } else if nonexported(l.Sym()) && l.Sym().Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym(), t)
} else {
- yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
}
continue
}
var f *types.Field
- p, _ := dotpath(l.Sym, t, &f, true)
+ p, _ := dotpath(l.Sym(), t, &f, true)
if p == nil || f.IsMethod() {
- yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
continue
}
// dotpath returns the parent embedded types in reverse order.
@@ -2980,33 +2987,33 @@ func typecheckcomplit(n *Node) (res *Node) {
for ei := len(p) - 1; ei >= 0; ei-- {
ep = append(ep, p[ei].field.Sym.Name)
}
- ep = append(ep, l.Sym.Name)
- yyerror("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
+ ep = append(ep, l.Sym().Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
continue
}
fielddup(f.Sym.Name, hash)
- l.Xoffset = f.Offset
+ l.SetOffset(f.Offset)
// No pushtype allowed here. Tried and rejected.
- l.Left = typecheck(l.Left, ctxExpr)
- l.Left = assignconv(l.Left, f.Type, "field value")
+ l.SetLeft(typecheck(l.Left(), ctxExpr))
+ l.SetLeft(assignconv(l.Left(), f.Type, "field value"))
}
}
- n.Op = OSTRUCTLIT
- n.Right = nil
+ n.SetOp(ir.OSTRUCTLIT)
+ n.SetRight(nil)
}
return n
}
// typecheckarraylit type-checks a sequence of slice/array literal elements.
-func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx string) int64 {
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
// If there are key/value pairs, create a map to keep seen
// keys so we can check for duplicate indices.
var indices map[int64]bool
for _, elt := range elts {
- if elt.Op == OKEY {
+ if elt.Op() == ir.OKEY {
indices = make(map[int64]bool)
break
}
@@ -3015,40 +3022,46 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri
var key, length int64
for i, elt := range elts {
setlineno(elt)
- vp := &elts[i]
- if elt.Op == OKEY {
- elt.Left = typecheck(elt.Left, ctxExpr)
- key = indexconst(elt.Left)
+ r := elts[i]
+ var kv ir.Node
+ if elt.Op() == ir.OKEY {
+ elt.SetLeft(typecheck(elt.Left(), ctxExpr))
+ key = indexconst(elt.Left())
if key < 0 {
- if !elt.Left.Diag() {
+ if !elt.Left().Diag() {
if key == -2 {
- yyerror("index too large")
+ base.Errorf("index too large")
} else {
- yyerror("index must be non-negative integer constant")
+ base.Errorf("index must be non-negative integer constant")
}
- elt.Left.SetDiag(true)
+ elt.Left().SetDiag(true)
}
key = -(1 << 30) // stay negative for a while
}
- vp = &elt.Right
+ kv = elt
+ r = elt.Right()
}
- r := *vp
r = pushtype(r, elemType)
r = typecheck(r, ctxExpr)
- *vp = assignconv(r, elemType, ctx)
+ r = assignconv(r, elemType, ctx)
+ if kv != nil {
+ kv.SetRight(r)
+ } else {
+ elts[i] = r
+ }
if key >= 0 {
if indices != nil {
if indices[key] {
- yyerror("duplicate index in %s: %d", ctx, key)
+ base.Errorf("duplicate index in %s: %d", ctx, key)
} else {
indices[key] = true
}
}
if bound >= 0 && key >= bound {
- yyerror("array index %d out of bounds [0:%d]", key, bound)
+ base.Errorf("array index %d out of bounds [0:%d]", key, bound)
bound = -1
}
}
@@ -3064,7 +3077,7 @@ func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx stri
// visible reports whether sym is exported or locally defined.
func visible(sym *types.Sym) bool {
- return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == localpkg)
+ return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == ir.LocalPkg)
}
// nonexported reports whether sym is an unexported field.
@@ -3073,24 +3086,24 @@ func nonexported(sym *types.Sym) bool {
}
// lvalue etc
-func islvalue(n *Node) bool {
- switch n.Op {
- case OINDEX:
- if n.Left.Type != nil && n.Left.Type.IsArray() {
- return islvalue(n.Left)
+func islvalue(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OINDEX:
+ if n.Left().Type() != nil && n.Left().Type().IsArray() {
+ return islvalue(n.Left())
}
- if n.Left.Type != nil && n.Left.Type.IsString() {
+ if n.Left().Type() != nil && n.Left().Type().IsString() {
return false
}
fallthrough
- case ODEREF, ODOTPTR, OCLOSUREVAR:
+ case ir.ODEREF, ir.ODOTPTR, ir.OCLOSUREVAR:
return true
- case ODOT:
- return islvalue(n.Left)
+ case ir.ODOT:
+ return islvalue(n.Left())
- case ONAME:
- if n.Class() == PFUNC {
+ case ir.ONAME:
+ if n.Class() == ir.PFUNC {
return false
}
return true
@@ -3099,20 +3112,20 @@ func islvalue(n *Node) bool {
return false
}
-func checklvalue(n *Node, verb string) {
+func checklvalue(n ir.Node, verb string) {
if !islvalue(n) {
- yyerror("cannot %s %v", verb, n)
+ base.Errorf("cannot %s %v", verb, n)
}
}
-func checkassign(stmt *Node, n *Node) {
+func checkassign(stmt ir.Node, n ir.Node) {
// Variables declared in ORANGE are assigned on every iteration.
- if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE {
+ if n.Name() == nil || n.Name().Defn != stmt || stmt.Op() == ir.ORANGE {
r := outervalue(n)
- if r.Op == ONAME {
- r.Name.SetAssigned(true)
- if r.Name.IsClosureVar() {
- r.Name.Defn.Name.SetAssigned(true)
+ if r.Op() == ir.ONAME {
+ r.Name().SetAssigned(true)
+ if r.Name().IsClosureVar() {
+ r.Name().Defn.Name().SetAssigned(true)
}
}
}
@@ -3120,30 +3133,30 @@ func checkassign(stmt *Node, n *Node) {
if islvalue(n) {
return
}
- if n.Op == OINDEXMAP {
+ if n.Op() == ir.OINDEXMAP {
n.SetIndexMapLValue(true)
return
}
// have already complained about n being invalid
- if n.Type == nil {
+ if n.Type() == nil {
return
}
switch {
- case n.Op == ODOT && n.Left.Op == OINDEXMAP:
- yyerror("cannot assign to struct field %v in map", n)
- case (n.Op == OINDEX && n.Left.Type.IsString()) || n.Op == OSLICESTR:
- yyerror("cannot assign to %v (strings are immutable)", n)
- case n.Op == OLITERAL && n.Sym != nil && n.isGoConst():
- yyerror("cannot assign to %v (declared const)", n)
+ case n.Op() == ir.ODOT && n.Left().Op() == ir.OINDEXMAP:
+ base.Errorf("cannot assign to struct field %v in map", n)
+ case (n.Op() == ir.OINDEX && n.Left().Type().IsString()) || n.Op() == ir.OSLICESTR:
+ base.Errorf("cannot assign to %v (strings are immutable)", n)
+ case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n):
+ base.Errorf("cannot assign to %v (declared const)", n)
default:
- yyerror("cannot assign to %v", n)
+ base.Errorf("cannot assign to %v", n)
}
- n.Type = nil
+ n.SetType(nil)
}
-func checkassignlist(stmt *Node, l Nodes) {
+func checkassignlist(stmt ir.Node, l ir.Nodes) {
for _, n := range l.Slice() {
checkassign(stmt, n)
}
@@ -3164,33 +3177,36 @@ func checkassignlist(stmt *Node, l Nodes) {
// currently OK, since the only place samesafeexpr gets used on an
// lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings.
-func samesafeexpr(l *Node, r *Node) bool {
- if l.Op != r.Op || !types.Identical(l.Type, r.Type) {
+func samesafeexpr(l ir.Node, r ir.Node) bool {
+ if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
return false
}
- switch l.Op {
- case ONAME, OCLOSUREVAR:
+ switch l.Op() {
+ case ir.ONAME, ir.OCLOSUREVAR:
return l == r
- case ODOT, ODOTPTR:
- return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
+ case ir.ODOT, ir.ODOTPTR:
+ return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left())
- case ODEREF, OCONVNOP,
- ONOT, OBITNOT, OPLUS, ONEG:
- return samesafeexpr(l.Left, r.Left)
+ case ir.ODEREF, ir.OCONVNOP,
+ ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
+ return samesafeexpr(l.Left(), r.Left())
- case OCONV:
+ case ir.OCONV:
// Some conversions can't be reused, such as []byte(str).
// Allow only numeric-ish types. This is a bit conservative.
- return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
+ return issimple[l.Type().Etype] && samesafeexpr(l.Left(), r.Left())
+
+ case ir.OINDEX, ir.OINDEXMAP,
+ ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
- case OINDEX, OINDEXMAP,
- OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
- return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
+ case ir.OLITERAL:
+ return constant.Compare(l.Val(), token.EQL, r.Val())
- case OLITERAL:
- return eqval(l.Val(), r.Val())
+ case ir.ONIL:
+ return true
}
return false
@@ -3199,8 +3215,8 @@ func samesafeexpr(l *Node, r *Node) bool {
// type check assignment.
// if this assignment is the definition of a var on the left side,
// fill in the var's type.
-func typecheckas(n *Node) {
- if enableTrace && trace {
+func typecheckas(n ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas", n)(nil)
}
@@ -3211,30 +3227,30 @@ func typecheckas(n *Node) {
// if the variable has a type (ntype) then typechecking
// will not look at defn, so it is okay (and desirable,
// so that the conversion below happens).
- n.Left = resolve(n.Left)
+ n.SetLeft(resolve(n.Left()))
- if n.Left.Name == nil || n.Left.Name.Defn != n || n.Left.Name.Param.Ntype != nil {
- n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ if n.Left().Name() == nil || n.Left().Name().Defn != n || n.Left().Name().Param.Ntype != nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
}
// Use ctxMultiOK so we can emit an "N variables but M values" error
// to be consistent with typecheckas2 (#26616).
- n.Right = typecheck(n.Right, ctxExpr|ctxMultiOK)
- checkassign(n, n.Left)
- if n.Right != nil && n.Right.Type != nil {
- if n.Right.Type.IsFuncArgStruct() {
- yyerror("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields())
+ n.SetRight(typecheck(n.Right(), ctxExpr|ctxMultiOK))
+ checkassign(n, n.Left())
+ if n.Right() != nil && n.Right().Type() != nil {
+ if n.Right().Type().IsFuncArgStruct() {
+ base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().Left(), n.Right().Type().NumFields())
// Multi-value RHS isn't actually valid for OAS; nil out
// to indicate failed typechecking.
- n.Right.Type = nil
- } else if n.Left.Type != nil {
- n.Right = assignconv(n.Right, n.Left.Type, "assignment")
+ n.Right().SetType(nil)
+ } else if n.Left().Type() != nil {
+ n.SetRight(assignconv(n.Right(), n.Left().Type(), "assignment"))
}
}
- if n.Left.Name != nil && n.Left.Name.Defn == n && n.Left.Name.Param.Ntype == nil {
- n.Right = defaultlit(n.Right, nil)
- n.Left.Type = n.Right.Type
+ if n.Left().Name() != nil && n.Left().Name().Defn == n && n.Left().Name().Param.Ntype == nil {
+ n.SetRight(defaultlit(n.Right(), nil))
+ n.Left().SetType(n.Right().Type())
}
// second half of dance.
@@ -3242,93 +3258,93 @@ func typecheckas(n *Node) {
// just to get it over with. see dance above.
n.SetTypecheck(1)
- if n.Left.Typecheck() == 0 {
- n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ if n.Left().Typecheck() == 0 {
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
}
- if !n.Left.isBlank() {
- checkwidth(n.Left.Type) // ensure width is calculated for backend
+ if !ir.IsBlank(n.Left()) {
+ checkwidth(n.Left().Type()) // ensure width is calculated for backend
}
}
-func checkassignto(src *types.Type, dst *Node) {
- if op, why := assignop(src, dst.Type); op == OXXX {
- yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why)
+func checkassignto(src *types.Type, dst ir.Node) {
+ if op, why := assignop(src, dst.Type()); op == ir.OXXX {
+ base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
}
-func typecheckas2(n *Node) {
- if enableTrace && trace {
+func typecheckas2(n ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas2", n)(nil)
}
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
// delicate little dance.
n1 = resolve(n1)
ls[i1] = n1
- if n1.Name == nil || n1.Name.Defn != n || n1.Name.Param.Ntype != nil {
+ if n1.Name() == nil || n1.Name().Defn != n || n1.Name().Param.Ntype != nil {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
- cl := n.List.Len()
- cr := n.Rlist.Len()
+ cl := n.List().Len()
+ cr := n.Rlist().Len()
if cl > 1 && cr == 1 {
- n.Rlist.SetFirst(typecheck(n.Rlist.First(), ctxExpr|ctxMultiOK))
+ n.Rlist().SetFirst(typecheck(n.Rlist().First(), ctxExpr|ctxMultiOK))
} else {
- typecheckslice(n.Rlist.Slice(), ctxExpr)
+ typecheckslice(n.Rlist().Slice(), ctxExpr)
}
- checkassignlist(n, n.List)
+ checkassignlist(n, n.List())
- var l *Node
- var r *Node
+ var l ir.Node
+ var r ir.Node
if cl == cr {
// easy
- ls := n.List.Slice()
- rs := n.Rlist.Slice()
+ ls := n.List().Slice()
+ rs := n.Rlist().Slice()
for il, nl := range ls {
nr := rs[il]
- if nl.Type != nil && nr.Type != nil {
- rs[il] = assignconv(nr, nl.Type, "assignment")
+ if nl.Type() != nil && nr.Type() != nil {
+ rs[il] = assignconv(nr, nl.Type(), "assignment")
}
- if nl.Name != nil && nl.Name.Defn == n && nl.Name.Param.Ntype == nil {
+ if nl.Name() != nil && nl.Name().Defn == n && nl.Name().Param.Ntype == nil {
rs[il] = defaultlit(rs[il], nil)
- nl.Type = rs[il].Type
+ nl.SetType(rs[il].Type())
}
}
goto out
}
- l = n.List.First()
- r = n.Rlist.First()
+ l = n.List().First()
+ r = n.Rlist().First()
// x,y,z = f()
if cr == 1 {
- if r.Type == nil {
+ if r.Type() == nil {
goto out
}
- switch r.Op {
- case OCALLMETH, OCALLINTER, OCALLFUNC:
- if !r.Type.IsFuncArgStruct() {
+ switch r.Op() {
+ case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC:
+ if !r.Type().IsFuncArgStruct() {
break
}
- cr = r.Type.NumFields()
+ cr = r.Type().NumFields()
if cr != cl {
goto mismatch
}
- n.Op = OAS2FUNC
- n.Right = r
- n.Rlist.Set(nil)
- for i, l := range n.List.Slice() {
- f := r.Type.Field(i)
- if f.Type != nil && l.Type != nil {
+ n.SetOp(ir.OAS2FUNC)
+ n.SetRight(r)
+ n.PtrRlist().Set(nil)
+ for i, l := range n.List().Slice() {
+ f := r.Type().Field(i)
+ if f.Type != nil && l.Type() != nil {
checkassignto(f.Type, l)
}
- if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
- l.Type = f.Type
+ if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil {
+ l.SetType(f.Type)
}
}
goto out
@@ -3337,51 +3353,51 @@ func typecheckas2(n *Node) {
// x, ok = y
if cl == 2 && cr == 1 {
- if r.Type == nil {
+ if r.Type() == nil {
goto out
}
- switch r.Op {
- case OINDEXMAP, ORECV, ODOTTYPE:
- switch r.Op {
- case OINDEXMAP:
- n.Op = OAS2MAPR
- case ORECV:
- n.Op = OAS2RECV
- case ODOTTYPE:
- n.Op = OAS2DOTTYPE
- r.Op = ODOTTYPE2
+ switch r.Op() {
+ case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE:
+ switch r.Op() {
+ case ir.OINDEXMAP:
+ n.SetOp(ir.OAS2MAPR)
+ case ir.ORECV:
+ n.SetOp(ir.OAS2RECV)
+ case ir.ODOTTYPE:
+ n.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODOTTYPE2)
}
- n.Right = r
- n.Rlist.Set(nil)
- if l.Type != nil {
- checkassignto(r.Type, l)
+ n.SetRight(r)
+ n.PtrRlist().Set(nil)
+ if l.Type() != nil {
+ checkassignto(r.Type(), l)
}
- if l.Name != nil && l.Name.Defn == n {
- l.Type = r.Type
+ if l.Name() != nil && l.Name().Defn == n {
+ l.SetType(r.Type())
}
- l := n.List.Second()
- if l.Type != nil && !l.Type.IsBoolean() {
- checkassignto(types.Types[TBOOL], l)
+ l := n.List().Second()
+ if l.Type() != nil && !l.Type().IsBoolean() {
+ checkassignto(types.Types[types.TBOOL], l)
}
- if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
- l.Type = types.Types[TBOOL]
+ if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil {
+ l.SetType(types.Types[types.TBOOL])
}
goto out
}
}
mismatch:
- switch r.Op {
+ switch r.Op() {
default:
- yyerror("assignment mismatch: %d variables but %d values", cl, cr)
- case OCALLFUNC, OCALLMETH, OCALLINTER:
- yyerror("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr)
+ base.Errorf("assignment mismatch: %d variables but %d values", cl, cr)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left(), cr)
}
// second half of dance
out:
n.SetTypecheck(1)
- ls = n.List.Slice()
+ ls = n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
@@ -3390,80 +3406,79 @@ out:
}
// type check function definition
-func typecheckfunc(n *Node) {
- if enableTrace && trace {
+func typecheckfunc(n ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckfunc", n)(nil)
}
- for _, ln := range n.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PPARAM || ln.Class() == PPARAMOUT) {
- ln.Name.Decldepth = 1
+ for _, ln := range n.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) {
+ ln.Name().Decldepth = 1
}
}
- n.Func.Nname = typecheck(n.Func.Nname, ctxExpr|ctxAssign)
- t := n.Func.Nname.Type
+ n.Func().Nname = typecheck(n.Func().Nname, ctxExpr|ctxAssign)
+ t := n.Func().Nname.Type()
if t == nil {
return
}
- n.Type = t
- t.FuncType().Nname = asTypesNode(n.Func.Nname)
+ n.SetType(t)
rcvr := t.Recv()
- if rcvr != nil && n.Func.Shortname != nil {
- m := addmethod(n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0)
+ if rcvr != nil && n.Func().Shortname != nil {
+ m := addmethod(n, n.Func().Shortname, t, true, n.Func().Pragma&ir.Nointerface != 0)
if m == nil {
return
}
- n.Func.Nname.Sym = methodSym(rcvr.Type, n.Func.Shortname)
- declare(n.Func.Nname, PFUNC)
+ n.Func().Nname.SetSym(methodSym(rcvr.Type, n.Func().Shortname))
+ declare(n.Func().Nname, ir.PFUNC)
}
- if Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil {
- makefuncsym(n.Func.Nname.Sym)
+ if base.Ctxt.Flag_dynlink && !inimport && n.Func().Nname != nil {
+ makefuncsym(n.Func().Nname.Sym())
}
}
// The result of stringtoruneslit MUST be assigned back to n, e.g.
// n.Left = stringtoruneslit(n.Left)
-func stringtoruneslit(n *Node) *Node {
- if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
- Fatalf("stringtoarraylit %v", n)
+func stringtoruneslit(n ir.Node) ir.Node {
+ if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String {
+ base.Fatalf("stringtoarraylit %v", n)
}
- var l []*Node
+ var l []ir.Node
i := 0
- for _, r := range n.Left.StringVal() {
- l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
+ for _, r := range n.Left().StringVal() {
+ l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
i++
}
- nn := nod(OCOMPLIT, nil, typenod(n.Type))
- nn.List.Set(l)
+ nn := ir.Nod(ir.OCOMPLIT, nil, typenod(n.Type()))
+ nn.PtrList().Set(l)
nn = typecheck(nn, ctxExpr)
return nn
}
-var mapqueue []*Node
+var mapqueue []ir.Node
func checkMapKeys() {
for _, n := range mapqueue {
- k := n.Type.MapType().Key
+ k := n.Type().MapType().Key
if !k.Broke() && !IsComparable(k) {
- yyerrorl(n.Pos, "invalid map key type %v", k)
+ base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
}
}
mapqueue = nil
}
func setUnderlying(t, underlying *types.Type) {
- if underlying.Etype == TFORW {
+ if underlying.Etype == types.TFORW {
// This type isn't computed yet; when it is, update n.
underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
return
}
- n := asNode(t.Nod)
+ n := ir.AsNode(t.Nod)
ft := t.ForwardType()
cache := t.Cache
@@ -3471,10 +3486,10 @@ func setUnderlying(t, underlying *types.Type) {
*t = *underlying
// Restore unnecessarily clobbered attributes.
- t.Nod = asTypesNode(n)
- t.Sym = n.Sym
- if n.Name != nil {
- t.Vargen = n.Name.Vargen
+ t.Nod = n
+ t.Sym = n.Sym()
+ if n.Name() != nil {
+ t.Vargen = n.Name().Vargen
}
t.Cache = cache
t.SetDeferwidth(false)
@@ -3488,7 +3503,7 @@ func setUnderlying(t, underlying *types.Type) {
}
// Propagate go:notinheap pragma from the Name to the Type.
- if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
+ if n.Name() != nil && n.Name().Param != nil && n.Name().Param.Pragma()&ir.NotInHeap != 0 {
t.SetNotInHeap(true)
}
@@ -3500,147 +3515,149 @@ func setUnderlying(t, underlying *types.Type) {
// Double-check use of type as embedded type.
if ft.Embedlineno.IsKnown() {
if t.IsPtr() || t.IsUnsafePtr() {
- yyerrorl(ft.Embedlineno, "embedded type cannot be a pointer")
+ base.ErrorfAt(ft.Embedlineno, "embedded type cannot be a pointer")
}
}
}
-func typecheckdeftype(n *Node) {
- if enableTrace && trace {
+func typecheckdeftype(n ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdeftype", n)(nil)
}
n.SetTypecheck(1)
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- t := n.Name.Param.Ntype.Type
+ n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType)
+ t := n.Name().Param.Ntype.Type()
if t == nil {
n.SetDiag(true)
- n.Type = nil
- } else if n.Type == nil {
+ n.SetType(nil)
+ } else if n.Type() == nil {
n.SetDiag(true)
} else {
// copy new type and clear fields
// that don't come along.
- setUnderlying(n.Type, t)
+ setUnderlying(n.Type(), t)
}
}
-func typecheckdef(n *Node) {
- if enableTrace && trace {
+func typecheckdef(n ir.Node) {
+ if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdef", n)(nil)
}
lno := setlineno(n)
- if n.Op == ONONAME {
+ if n.Op() == ir.ONONAME {
if !n.Diag() {
n.SetDiag(true)
// Note: adderrorname looks for this string and
// adds context about the outer expression
- yyerrorl(lineno, "undefined: %v", n.Sym)
+ base.ErrorfAt(base.Pos, "undefined: %v", n.Sym())
}
- lineno = lno
+ base.Pos = lno
return
}
if n.Walkdef() == 1 {
- lineno = lno
+ base.Pos = lno
return
}
typecheckdefstack = append(typecheckdefstack, n)
if n.Walkdef() == 2 {
- flusherrors()
+ base.FlushErrors()
fmt.Printf("typecheckdef loop:")
for i := len(typecheckdefstack) - 1; i >= 0; i-- {
n := typecheckdefstack[i]
- fmt.Printf(" %v", n.Sym)
+ fmt.Printf(" %v", n.Sym())
}
fmt.Printf("\n")
- Fatalf("typecheckdef loop")
+ base.Fatalf("typecheckdef loop")
}
n.SetWalkdef(2)
- if n.Type != nil || n.Sym == nil { // builtin or no name
+ if n.Type() != nil || n.Sym() == nil { // builtin or no name
goto ret
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("typecheckdef %v", n.Op)
-
- case OLITERAL:
- if n.Name.Param.Ntype != nil {
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- n.Type = n.Name.Param.Ntype.Type
- n.Name.Param.Ntype = nil
- if n.Type == nil {
+ base.Fatalf("typecheckdef %v", n.Op())
+
+ case ir.OLITERAL:
+ if n.Name().Param.Ntype != nil {
+ n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType)
+ n.SetType(n.Name().Param.Ntype.Type())
+ n.Name().Param.Ntype = nil
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
}
- e := n.Name.Defn
- n.Name.Defn = nil
+ e := n.Name().Defn
+ n.Name().Defn = nil
if e == nil {
- Dump("typecheckdef nil defn", n)
- yyerrorl(n.Pos, "xxx")
+ ir.Dump("typecheckdef nil defn", n)
+ base.ErrorfAt(n.Pos(), "xxx")
}
e = typecheck(e, ctxExpr)
- if e.Type == nil {
+ if e.Type() == nil {
goto ret
}
- if !e.isGoConst() {
+ if !isGoConst(e) {
if !e.Diag() {
- if Isconst(e, CTNIL) {
- yyerrorl(n.Pos, "const initializer cannot be nil")
+ if e.Op() == ir.ONIL {
+ base.ErrorfAt(n.Pos(), "const initializer cannot be nil")
} else {
- yyerrorl(n.Pos, "const initializer %v is not a constant", e)
+ base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e)
}
e.SetDiag(true)
}
goto ret
}
- t := n.Type
+ t := n.Type()
if t != nil {
- if !okforconst[t.Etype] {
- yyerrorl(n.Pos, "invalid constant type %v", t)
+ if !ir.OKForConst[t.Etype] {
+ base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
goto ret
}
- if !e.Type.IsUntyped() && !types.Identical(t, e.Type) {
- yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
+ if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) {
+ base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t)
goto ret
}
e = convlit(e, t)
}
- n.SetVal(e.Val())
- n.Type = e.Type
+ n.SetType(e.Type())
+ if n.Type() != nil {
+ n.SetVal(e.Val())
+ }
- case ONAME:
- if n.Name.Param.Ntype != nil {
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- n.Type = n.Name.Param.Ntype.Type
- if n.Type == nil {
+ case ir.ONAME:
+ if n.Name().Param.Ntype != nil {
+ n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType)
+ n.SetType(n.Name().Param.Ntype.Type())
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
}
- if n.Type != nil {
+ if n.Type() != nil {
break
}
- if n.Name.Defn == nil {
+ if n.Name().Defn == nil {
if n.SubOp() != 0 { // like OPRINTN
break
}
- if nsavederrors+nerrors > 0 {
+ if base.Errors() > 0 {
// Can have undefined variables in x := foo
// that make x have an n.name.Defn == nil.
// If there are other errors anyway, don't
@@ -3648,33 +3665,33 @@ func typecheckdef(n *Node) {
break
}
- Fatalf("var without type, init: %v", n.Sym)
+ base.Fatalf("var without type, init: %v", n.Sym())
}
- if n.Name.Defn.Op == ONAME {
- n.Name.Defn = typecheck(n.Name.Defn, ctxExpr)
- n.Type = n.Name.Defn.Type
+ if n.Name().Defn.Op() == ir.ONAME {
+ n.Name().Defn = typecheck(n.Name().Defn, ctxExpr)
+ n.SetType(n.Name().Defn.Type())
break
}
- n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
+ n.Name().Defn = typecheck(n.Name().Defn, ctxStmt) // fills in n.Type
- case OTYPE:
- if p := n.Name.Param; p.Alias() {
+ case ir.OTYPE:
+ if p := n.Name().Param; p.Alias() {
// Type alias declaration: Simply use the rhs type - no need
// to create a new type.
// If we have a syntax error, p.Ntype may be nil.
if p.Ntype != nil {
p.Ntype = typecheck(p.Ntype, ctxType)
- n.Type = p.Ntype.Type
- if n.Type == nil {
+ n.SetType(p.Ntype.Type())
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
// For package-level type aliases, set n.Sym.Def so we can identify
// it as a type alias during export. See also #31959.
- if n.Name.Curfn == nil {
- n.Sym.Def = asTypesNode(p.Ntype)
+ if n.Name().Curfn == nil {
+ n.Sym().Def = p.Ntype
}
}
break
@@ -3683,51 +3700,50 @@ func typecheckdef(n *Node) {
// regular type declaration
defercheckwidth()
n.SetWalkdef(1)
- setTypeNode(n, types.New(TFORW))
- n.Type.Sym = n.Sym
- nerrors0 := nerrors
+ setTypeNode(n, types.New(types.TFORW))
+ n.Type().Sym = n.Sym()
+ errorsBefore := base.Errors()
typecheckdeftype(n)
- if n.Type.Etype == TFORW && nerrors > nerrors0 {
+ if n.Type().Etype == types.TFORW && base.Errors() > errorsBefore {
// Something went wrong during type-checking,
// but it was reported. Silence future errors.
- n.Type.SetBroke(true)
+ n.Type().SetBroke(true)
}
resumecheckwidth()
}
ret:
- if n.Op != OLITERAL && n.Type != nil && n.Type.IsUntyped() {
- Fatalf("got %v for %v", n.Type, n)
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() {
+ base.Fatalf("got %v for %v", n.Type(), n)
}
last := len(typecheckdefstack) - 1
if typecheckdefstack[last] != n {
- Fatalf("typecheckdefstack mismatch")
+ base.Fatalf("typecheckdefstack mismatch")
}
typecheckdefstack[last] = nil
typecheckdefstack = typecheckdefstack[:last]
- lineno = lno
+ base.Pos = lno
n.SetWalkdef(1)
}
-func checkmake(t *types.Type, arg string, np **Node) bool {
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
n := *np
- if !n.Type.IsInteger() && n.Type.Etype != TIDEAL {
- yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
+ if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
+ base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
return false
}
// Do range checks for constants before defaultlit
// to avoid redundant "constant NNN overflows int" errors.
- switch consttype(n) {
- case CTINT, CTRUNE, CTFLT, CTCPLX:
- v := toint(n.Val()).U.(*Mpint)
- if v.CmpInt64(0) < 0 {
- yyerror("negative %s argument in make(%v)", arg, t)
+ if n.Op() == ir.OLITERAL {
+ v := toint(n.Val())
+ if constant.Sign(v) < 0 {
+ base.Errorf("negative %s argument in make(%v)", arg, t)
return false
}
- if v.Cmp(maxintval[TINT]) > 0 {
- yyerror("%s argument too large in make(%v)", arg, t)
+ if doesoverflow(v, types.Types[types.TINT]) {
+ base.Errorf("%s argument too large in make(%v)", arg, t)
return false
}
}
@@ -3737,55 +3753,55 @@ func checkmake(t *types.Type, arg string, np **Node) bool {
// are the same as for index expressions. Factor the code better;
// for instance, indexlit might be called here and incorporate some
// of the bounds checks done for make.
- n = defaultlit(n, types.Types[TINT])
+ n = defaultlit(n, types.Types[types.TINT])
*np = n
return true
}
-func markbreak(n *Node, implicit *Node) {
+func markbreak(n ir.Node, implicit ir.Node) {
if n == nil {
return
}
- switch n.Op {
- case OBREAK:
- if n.Sym == nil {
+ switch n.Op() {
+ case ir.OBREAK:
+ if n.Sym() == nil {
if implicit != nil {
implicit.SetHasBreak(true)
}
} else {
- lab := asNode(n.Sym.Label)
+ lab := ir.AsNode(n.Sym().Label)
if lab != nil {
lab.SetHasBreak(true)
}
}
- case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
+ case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE:
implicit = n
fallthrough
default:
- markbreak(n.Left, implicit)
- markbreak(n.Right, implicit)
- markbreaklist(n.Ninit, implicit)
- markbreaklist(n.Nbody, implicit)
- markbreaklist(n.List, implicit)
- markbreaklist(n.Rlist, implicit)
+ markbreak(n.Left(), implicit)
+ markbreak(n.Right(), implicit)
+ markbreaklist(n.Init(), implicit)
+ markbreaklist(n.Body(), implicit)
+ markbreaklist(n.List(), implicit)
+ markbreaklist(n.Rlist(), implicit)
}
}
-func markbreaklist(l Nodes, implicit *Node) {
+func markbreaklist(l ir.Nodes, implicit ir.Node) {
s := l.Slice()
for i := 0; i < len(s); i++ {
n := s[i]
if n == nil {
continue
}
- if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] {
- switch n.Name.Defn.Op {
- case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
- n.Sym.Label = asTypesNode(n.Name.Defn)
- markbreak(n.Name.Defn, n.Name.Defn)
- n.Sym.Label = nil
+ if n.Op() == ir.OLABEL && i+1 < len(s) && n.Name().Defn == s[i+1] {
+ switch n.Name().Defn.Op() {
+ case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE:
+ n.Sym().Label = n.Name().Defn
+ markbreak(n.Name().Defn, n.Name().Defn)
+ n.Sym().Label = nil
i++
continue
}
@@ -3796,32 +3812,32 @@ func markbreaklist(l Nodes, implicit *Node) {
}
// isterminating reports whether the Nodes list ends with a terminating statement.
-func (l Nodes) isterminating() bool {
+func isTermNodes(l ir.Nodes) bool {
s := l.Slice()
c := len(s)
if c == 0 {
return false
}
- return s[c-1].isterminating()
+ return isTermNode(s[c-1])
}
// Isterminating reports whether the node n, the last one in a
// statement list, is a terminating statement.
-func (n *Node) isterminating() bool {
- switch n.Op {
+func isTermNode(n ir.Node) bool {
+ switch n.Op() {
// NOTE: OLABEL is treated as a separate statement,
// not a separate prefix, so skipping to the last statement
// in the block handles the labeled statement case by
// skipping over the label. No case OLABEL here.
- case OBLOCK:
- return n.List.isterminating()
+ case ir.OBLOCK:
+ return isTermNodes(n.List())
- case OGOTO, ORETURN, ORETJMP, OPANIC, OFALL:
+ case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL:
return true
- case OFOR, OFORUNTIL:
- if n.Left != nil {
+ case ir.OFOR, ir.OFORUNTIL:
+ if n.Left() != nil {
return false
}
if n.HasBreak() {
@@ -3829,24 +3845,24 @@ func (n *Node) isterminating() bool {
}
return true
- case OIF:
- return n.Nbody.isterminating() && n.Rlist.isterminating()
+ case ir.OIF:
+ return isTermNodes(n.Body()) && isTermNodes(n.Rlist())
- case OSWITCH, OTYPESW, OSELECT:
+ case ir.OSWITCH, ir.OTYPESW, ir.OSELECT:
if n.HasBreak() {
return false
}
def := false
- for _, n1 := range n.List.Slice() {
- if !n1.Nbody.isterminating() {
+ for _, n1 := range n.List().Slice() {
+ if !isTermNodes(n1.Body()) {
return false
}
- if n1.List.Len() == 0 { // default
+ if n1.List().Len() == 0 { // default
def = true
}
}
- if n.Op != OSELECT && !def {
+ if n.Op() != ir.OSELECT && !def {
return false
}
return true
@@ -3856,36 +3872,36 @@ func (n *Node) isterminating() bool {
}
// checkreturn makes sure that fn terminates appropriately.
-func checkreturn(fn *Node) {
- if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 {
- markbreaklist(fn.Nbody, nil)
- if !fn.Nbody.isterminating() {
- yyerrorl(fn.Func.Endlineno, "missing return at end of function")
+func checkreturn(fn ir.Node) {
+ if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 {
+ markbreaklist(fn.Body(), nil)
+ if !isTermNodes(fn.Body()) {
+ base.ErrorfAt(fn.Func().Endlineno, "missing return at end of function")
}
}
}
-func deadcode(fn *Node) {
- deadcodeslice(fn.Nbody)
+func deadcode(fn ir.Node) {
+ deadcodeslice(fn.PtrBody())
deadcodefn(fn)
}
-func deadcodefn(fn *Node) {
- if fn.Nbody.Len() == 0 {
+func deadcodefn(fn ir.Node) {
+ if fn.Body().Len() == 0 {
return
}
- for _, n := range fn.Nbody.Slice() {
- if n.Ninit.Len() > 0 {
+ for _, n := range fn.Body().Slice() {
+ if n.Init().Len() > 0 {
return
}
- switch n.Op {
- case OIF:
- if !Isconst(n.Left, CTBOOL) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 {
+ switch n.Op() {
+ case ir.OIF:
+ if !ir.IsConst(n.Left(), constant.Bool) || n.Body().Len() > 0 || n.Rlist().Len() > 0 {
return
}
- case OFOR:
- if !Isconst(n.Left, CTBOOL) || n.Left.BoolVal() {
+ case ir.OFOR:
+ if !ir.IsConst(n.Left(), constant.Bool) || n.Left().BoolVal() {
return
}
default:
@@ -3893,13 +3909,13 @@ func deadcodefn(fn *Node) {
}
}
- fn.Nbody.Set([]*Node{nod(OEMPTY, nil, nil)})
+ fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OEMPTY, nil, nil)})
}
-func deadcodeslice(nn Nodes) {
+func deadcodeslice(nn *ir.Nodes) {
var lastLabel = -1
for i, n := range nn.Slice() {
- if n != nil && n.Op == OLABEL {
+ if n != nil && n.Op() == ir.OLABEL {
lastLabel = i
}
}
@@ -3911,16 +3927,16 @@ func deadcodeslice(nn Nodes) {
if n == nil {
continue
}
- if n.Op == OIF {
- n.Left = deadcodeexpr(n.Left)
- if Isconst(n.Left, CTBOOL) {
- var body Nodes
- if n.Left.BoolVal() {
- n.Rlist = Nodes{}
- body = n.Nbody
+ if n.Op() == ir.OIF {
+ n.SetLeft(deadcodeexpr(n.Left()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ var body ir.Nodes
+ if n.Left().BoolVal() {
+ n.SetRlist(ir.Nodes{})
+ body = n.Body()
} else {
- n.Nbody = Nodes{}
- body = n.Rlist
+ n.SetBody(ir.Nodes{})
+ body = n.Rlist()
}
// If "then" or "else" branch ends with panic or return statement,
// it is safe to remove all statements after this node.
@@ -3928,8 +3944,8 @@ func deadcodeslice(nn Nodes) {
// We must be careful not to deadcode-remove labels, as they
// might be the target of a goto. See issue 28616.
if body := body.Slice(); len(body) != 0 {
- switch body[(len(body) - 1)].Op {
- case ORETURN, ORETJMP, OPANIC:
+ switch body[(len(body) - 1)].Op() {
+ case ir.ORETURN, ir.ORETJMP, ir.OPANIC:
if i > lastLabel {
cut = true
}
@@ -3938,40 +3954,40 @@ func deadcodeslice(nn Nodes) {
}
}
- deadcodeslice(n.Ninit)
- deadcodeslice(n.Nbody)
- deadcodeslice(n.List)
- deadcodeslice(n.Rlist)
+ deadcodeslice(n.PtrInit())
+ deadcodeslice(n.PtrBody())
+ deadcodeslice(n.PtrList())
+ deadcodeslice(n.PtrRlist())
if cut {
- *nn.slice = nn.Slice()[:i+1]
+ nn.Set(nn.Slice()[:i+1])
break
}
}
}
-func deadcodeexpr(n *Node) *Node {
+func deadcodeexpr(n ir.Node) ir.Node {
// Perform dead-code elimination on short-circuited boolean
// expressions involving constants with the intent of
// producing a constant 'if' condition.
- switch n.Op {
- case OANDAND:
- n.Left = deadcodeexpr(n.Left)
- n.Right = deadcodeexpr(n.Right)
- if Isconst(n.Left, CTBOOL) {
- if n.Left.BoolVal() {
- return n.Right // true && x => x
+ switch n.Op() {
+ case ir.OANDAND:
+ n.SetLeft(deadcodeexpr(n.Left()))
+ n.SetRight(deadcodeexpr(n.Right()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ if n.Left().BoolVal() {
+ return n.Right() // true && x => x
} else {
- return n.Left // false && x => false
+ return n.Left() // false && x => false
}
}
- case OOROR:
- n.Left = deadcodeexpr(n.Left)
- n.Right = deadcodeexpr(n.Right)
- if Isconst(n.Left, CTBOOL) {
- if n.Left.BoolVal() {
- return n.Left // true || x => true
+ case ir.OOROR:
+ n.SetLeft(deadcodeexpr(n.Left()))
+ n.SetRight(deadcodeexpr(n.Right()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ if n.Left().BoolVal() {
+ return n.Left() // true || x => true
} else {
- return n.Right // false || x => x
+ return n.Right() // false || x => x
}
}
}
@@ -3979,17 +3995,17 @@ func deadcodeexpr(n *Node) *Node {
}
// setTypeNode sets n to an OTYPE node representing t.
-func setTypeNode(n *Node, t *types.Type) {
- n.Op = OTYPE
- n.Type = t
- n.Type.Nod = asTypesNode(n)
+func setTypeNode(n ir.Node, t *types.Type) {
+ n.SetOp(ir.OTYPE)
+ n.SetType(t)
+ n.Type().Nod = n
}
// getIotaValue returns the current value for "iota",
// or -1 if not within a ConstSpec.
func getIotaValue() int64 {
if i := len(typecheckdefstack); i > 0 {
- if x := typecheckdefstack[i-1]; x.Op == OLITERAL {
+ if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
return x.Iota()
}
}
@@ -4006,14 +4022,33 @@ func curpkg() *types.Pkg {
fn := Curfn
if fn == nil {
// Initialization expressions for package-scope variables.
- return localpkg
+ return ir.LocalPkg
}
// TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for
// Curfn, rather than mixing them.
- if fn.Op == ODCLFUNC {
- fn = fn.Func.Nname
+ if fn.Op() == ir.ODCLFUNC {
+ fn = fn.Func().Nname
}
return fnpkg(fn)
}
+
+// MethodName returns the ONAME representing the method
+// referenced by expression n, which must be a method selector,
+// method expression, or method value.
+func methodExprName(n ir.Node) ir.Node {
+ return ir.AsNode(methodExprFunc(n).Nname)
+}
+
+// MethodFunc is like MethodName, but returns the types.Field instead.
+func methodExprFunc(n ir.Node) *types.Field {
+ switch n.Op() {
+ case ir.ODOTMETH, ir.OMETHEXPR:
+ return n.Opt().(*types.Field)
+ case ir.OCALLPART:
+ return callpartMethod(n)
+ }
+ base.Fatalf("unexpected node: %v (%v)", n, n.Op())
+ panic("unreachable")
+}
diff --git a/src/cmd/compile/internal/gc/types.go b/src/cmd/compile/internal/gc/types.go
index 748f8458bd..e46735df28 100644
--- a/src/cmd/compile/internal/gc/types.go
+++ b/src/cmd/compile/internal/gc/types.go
@@ -3,56 +3,3 @@
// license that can be found in the LICENSE file.
package gc
-
-import (
- "cmd/compile/internal/types"
-)
-
-// convenience constants
-const (
- Txxx = types.Txxx
-
- TINT8 = types.TINT8
- TUINT8 = types.TUINT8
- TINT16 = types.TINT16
- TUINT16 = types.TUINT16
- TINT32 = types.TINT32
- TUINT32 = types.TUINT32
- TINT64 = types.TINT64
- TUINT64 = types.TUINT64
- TINT = types.TINT
- TUINT = types.TUINT
- TUINTPTR = types.TUINTPTR
-
- TCOMPLEX64 = types.TCOMPLEX64
- TCOMPLEX128 = types.TCOMPLEX128
-
- TFLOAT32 = types.TFLOAT32
- TFLOAT64 = types.TFLOAT64
-
- TBOOL = types.TBOOL
-
- TPTR = types.TPTR
- TFUNC = types.TFUNC
- TSLICE = types.TSLICE
- TARRAY = types.TARRAY
- TSTRUCT = types.TSTRUCT
- TCHAN = types.TCHAN
- TMAP = types.TMAP
- TINTER = types.TINTER
- TFORW = types.TFORW
- TANY = types.TANY
- TSTRING = types.TSTRING
- TUNSAFEPTR = types.TUNSAFEPTR
-
- // pseudo-types for literals
- TIDEAL = types.TIDEAL
- TNIL = types.TNIL
- TBLANK = types.TBLANK
-
- // pseudo-types for frame layout
- TFUNCARGS = types.TFUNCARGS
- TCHANARGS = types.TCHANARGS
-
- NTYPE = types.NTYPE
-)
diff --git a/src/cmd/compile/internal/gc/types_acc.go b/src/cmd/compile/internal/gc/types_acc.go
index 7240f726f6..d6d53f05cc 100644
--- a/src/cmd/compile/internal/gc/types_acc.go
+++ b/src/cmd/compile/internal/gc/types_acc.go
@@ -6,11 +6,3 @@
// TODO(gri) try to eliminate these soon
package gc
-
-import (
- "cmd/compile/internal/types"
- "unsafe"
-)
-
-func asNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) }
-func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) }
diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go
index ff8cabd8e3..978e53ac15 100644
--- a/src/cmd/compile/internal/gc/universe.go
+++ b/src/cmd/compile/internal/gc/universe.go
@@ -6,29 +6,31 @@
package gc
-import "cmd/compile/internal/types"
-
-// builtinpkg is a fake package that declares the universe block.
-var builtinpkg *types.Pkg
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
var basicTypes = [...]struct {
name string
etype types.EType
}{
- {"int8", TINT8},
- {"int16", TINT16},
- {"int32", TINT32},
- {"int64", TINT64},
- {"uint8", TUINT8},
- {"uint16", TUINT16},
- {"uint32", TUINT32},
- {"uint64", TUINT64},
- {"float32", TFLOAT32},
- {"float64", TFLOAT64},
- {"complex64", TCOMPLEX64},
- {"complex128", TCOMPLEX128},
- {"bool", TBOOL},
- {"string", TSTRING},
+ {"int8", types.TINT8},
+ {"int16", types.TINT16},
+ {"int32", types.TINT32},
+ {"int64", types.TINT64},
+ {"uint8", types.TUINT8},
+ {"uint16", types.TUINT16},
+ {"uint32", types.TUINT32},
+ {"uint64", types.TUINT64},
+ {"float32", types.TFLOAT32},
+ {"float64", types.TFLOAT64},
+ {"complex64", types.TCOMPLEX64},
+ {"complex128", types.TCOMPLEX128},
+ {"bool", types.TBOOL},
+ {"string", types.TSTRING},
}
var typedefs = [...]struct {
@@ -37,30 +39,30 @@ var typedefs = [...]struct {
sameas32 types.EType
sameas64 types.EType
}{
- {"int", TINT, TINT32, TINT64},
- {"uint", TUINT, TUINT32, TUINT64},
- {"uintptr", TUINTPTR, TUINT32, TUINT64},
+ {"int", types.TINT, types.TINT32, types.TINT64},
+ {"uint", types.TUINT, types.TUINT32, types.TUINT64},
+ {"uintptr", types.TUINTPTR, types.TUINT32, types.TUINT64},
}
var builtinFuncs = [...]struct {
name string
- op Op
+ op ir.Op
}{
- {"append", OAPPEND},
- {"cap", OCAP},
- {"close", OCLOSE},
- {"complex", OCOMPLEX},
- {"copy", OCOPY},
- {"delete", ODELETE},
- {"imag", OIMAG},
- {"len", OLEN},
- {"make", OMAKE},
- {"new", ONEW},
- {"panic", OPANIC},
- {"print", OPRINT},
- {"println", OPRINTN},
- {"real", OREAL},
- {"recover", ORECOVER},
+ {"append", ir.OAPPEND},
+ {"cap", ir.OCAP},
+ {"close", ir.OCLOSE},
+ {"complex", ir.OCOMPLEX},
+ {"copy", ir.OCOPY},
+ {"delete", ir.ODELETE},
+ {"imag", ir.OIMAG},
+ {"len", ir.OLEN},
+ {"make", ir.OMAKE},
+ {"new", ir.ONEW},
+ {"panic", ir.OPANIC},
+ {"print", ir.OPRINT},
+ {"println", ir.OPRINTN},
+ {"real", ir.OREAL},
+ {"recover", ir.ORECOVER},
}
// isBuiltinFuncName reports whether name matches a builtin function
@@ -76,11 +78,11 @@ func isBuiltinFuncName(name string) bool {
var unsafeFuncs = [...]struct {
name string
- op Op
+ op ir.Op
}{
- {"Alignof", OALIGNOF},
- {"Offsetof", OOFFSETOF},
- {"Sizeof", OSIZEOF},
+ {"Alignof", ir.OALIGNOF},
+ {"Offsetof", ir.OOFFSETOF},
+ {"Sizeof", ir.OSIZEOF},
}
// initUniverse initializes the universe block.
@@ -95,121 +97,117 @@ func lexinit() {
for _, s := range &basicTypes {
etype := s.etype
if int(etype) >= len(types.Types) {
- Fatalf("lexinit: %s bad etype", s.name)
+ base.Fatalf("lexinit: %s bad etype", s.name)
}
- s2 := builtinpkg.Lookup(s.name)
+ s2 := ir.BuiltinPkg.Lookup(s.name)
t := types.Types[etype]
if t == nil {
t = types.New(etype)
t.Sym = s2
- if etype != TANY && etype != TSTRING {
+ if etype != types.TANY && etype != types.TSTRING {
dowidth(t)
}
types.Types[etype] = t
}
- s2.Def = asTypesNode(typenod(t))
- asNode(s2.Def).Name = new(Name)
+ s2.Def = typenod(t)
+ ir.AsNode(s2.Def).SetName(new(ir.Name))
}
for _, s := range &builtinFuncs {
- s2 := builtinpkg.Lookup(s.name)
- s2.Def = asTypesNode(newname(s2))
- asNode(s2.Def).SetSubOp(s.op)
+ s2 := ir.BuiltinPkg.Lookup(s.name)
+ s2.Def = NewName(s2)
+ ir.AsNode(s2.Def).SetSubOp(s.op)
}
for _, s := range &unsafeFuncs {
s2 := unsafepkg.Lookup(s.name)
- s2.Def = asTypesNode(newname(s2))
- asNode(s2.Def).SetSubOp(s.op)
+ s2.Def = NewName(s2)
+ ir.AsNode(s2.Def).SetSubOp(s.op)
}
- types.UntypedString = types.New(TSTRING)
- types.UntypedBool = types.New(TBOOL)
- types.Types[TANY] = types.New(TANY)
+ types.UntypedString = types.New(types.TSTRING)
+ types.UntypedBool = types.New(types.TBOOL)
+ types.Types[types.TANY] = types.New(types.TANY)
- s := builtinpkg.Lookup("true")
- s.Def = asTypesNode(nodbool(true))
- asNode(s.Def).Sym = lookup("true")
- asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.UntypedBool
+ s := ir.BuiltinPkg.Lookup("true")
+ s.Def = nodbool(true)
+ ir.AsNode(s.Def).SetSym(lookup("true"))
+ ir.AsNode(s.Def).SetName(new(ir.Name))
+ ir.AsNode(s.Def).SetType(types.UntypedBool)
- s = builtinpkg.Lookup("false")
- s.Def = asTypesNode(nodbool(false))
- asNode(s.Def).Sym = lookup("false")
- asNode(s.Def).Name = new(Name)
- asNode(s.Def).Type = types.UntypedBool
+ s = ir.BuiltinPkg.Lookup("false")
+ s.Def = nodbool(false)
+ ir.AsNode(s.Def).SetSym(lookup("false"))
+ ir.AsNode(s.Def).SetName(new(ir.Name))
+ ir.AsNode(s.Def).SetType(types.UntypedBool)
s = lookup("_")
s.Block = -100
- s.Def = asTypesNode(newname(s))
- types.Types[TBLANK] = types.New(TBLANK)
- asNode(s.Def).Type = types.Types[TBLANK]
- nblank = asNode(s.Def)
+ s.Def = NewName(s)
+ types.Types[types.TBLANK] = types.New(types.TBLANK)
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+ ir.BlankNode = ir.AsNode(s.Def)
- s = builtinpkg.Lookup("_")
+ s = ir.BuiltinPkg.Lookup("_")
s.Block = -100
- s.Def = asTypesNode(newname(s))
- types.Types[TBLANK] = types.New(TBLANK)
- asNode(s.Def).Type = types.Types[TBLANK]
-
- types.Types[TNIL] = types.New(TNIL)
- s = builtinpkg.Lookup("nil")
- var v Val
- v.U = new(NilVal)
- s.Def = asTypesNode(nodlit(v))
- asNode(s.Def).Sym = s
- asNode(s.Def).Name = new(Name)
-
- s = builtinpkg.Lookup("iota")
- s.Def = asTypesNode(nod(OIOTA, nil, nil))
- asNode(s.Def).Sym = s
- asNode(s.Def).Name = new(Name)
+ s.Def = NewName(s)
+ types.Types[types.TBLANK] = types.New(types.TBLANK)
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+
+ types.Types[types.TNIL] = types.New(types.TNIL)
+ s = ir.BuiltinPkg.Lookup("nil")
+ s.Def = nodnil()
+ ir.AsNode(s.Def).SetSym(s)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
+
+ s = ir.BuiltinPkg.Lookup("iota")
+ s.Def = ir.Nod(ir.OIOTA, nil, nil)
+ ir.AsNode(s.Def).SetSym(s)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
}
func typeinit() {
if Widthptr == 0 {
- Fatalf("typeinit before betypeinit")
+ base.Fatalf("typeinit before betypeinit")
}
- for et := types.EType(0); et < NTYPE; et++ {
+ for et := types.EType(0); et < types.NTYPE; et++ {
simtype[et] = et
}
- types.Types[TPTR] = types.New(TPTR)
- dowidth(types.Types[TPTR])
+ types.Types[types.TPTR] = types.New(types.TPTR)
+ dowidth(types.Types[types.TPTR])
- t := types.New(TUNSAFEPTR)
- types.Types[TUNSAFEPTR] = t
+ t := types.New(types.TUNSAFEPTR)
+ types.Types[types.TUNSAFEPTR] = t
t.Sym = unsafepkg.Lookup("Pointer")
- t.Sym.Def = asTypesNode(typenod(t))
- asNode(t.Sym.Def).Name = new(Name)
- dowidth(types.Types[TUNSAFEPTR])
+ t.Sym.Def = typenod(t)
+ ir.AsNode(t.Sym.Def).SetName(new(ir.Name))
+ dowidth(types.Types[types.TUNSAFEPTR])
- for et := TINT8; et <= TUINT64; et++ {
+ for et := types.TINT8; et <= types.TUINT64; et++ {
isInt[et] = true
}
- isInt[TINT] = true
- isInt[TUINT] = true
- isInt[TUINTPTR] = true
+ isInt[types.TINT] = true
+ isInt[types.TUINT] = true
+ isInt[types.TUINTPTR] = true
- isFloat[TFLOAT32] = true
- isFloat[TFLOAT64] = true
+ isFloat[types.TFLOAT32] = true
+ isFloat[types.TFLOAT64] = true
- isComplex[TCOMPLEX64] = true
- isComplex[TCOMPLEX128] = true
+ isComplex[types.TCOMPLEX64] = true
+ isComplex[types.TCOMPLEX128] = true
// initialize okfor
- for et := types.EType(0); et < NTYPE; et++ {
- if isInt[et] || et == TIDEAL {
+ for et := types.EType(0); et < types.NTYPE; et++ {
+ if isInt[et] || et == types.TIDEAL {
okforeq[et] = true
okforcmp[et] = true
okforarith[et] = true
okforadd[et] = true
okforand[et] = true
- okforconst[et] = true
+ ir.OKForConst[et] = true
issimple[et] = true
- minintval[et] = new(Mpint)
- maxintval[et] = new(Mpint)
}
if isFloat[et] {
@@ -217,53 +215,51 @@ func typeinit() {
okforcmp[et] = true
okforadd[et] = true
okforarith[et] = true
- okforconst[et] = true
+ ir.OKForConst[et] = true
issimple[et] = true
- minfltval[et] = newMpflt()
- maxfltval[et] = newMpflt()
}
if isComplex[et] {
okforeq[et] = true
okforadd[et] = true
okforarith[et] = true
- okforconst[et] = true
+ ir.OKForConst[et] = true
issimple[et] = true
}
}
- issimple[TBOOL] = true
+ issimple[types.TBOOL] = true
- okforadd[TSTRING] = true
+ okforadd[types.TSTRING] = true
- okforbool[TBOOL] = true
+ okforbool[types.TBOOL] = true
- okforcap[TARRAY] = true
- okforcap[TCHAN] = true
- okforcap[TSLICE] = true
+ okforcap[types.TARRAY] = true
+ okforcap[types.TCHAN] = true
+ okforcap[types.TSLICE] = true
- okforconst[TBOOL] = true
- okforconst[TSTRING] = true
+ ir.OKForConst[types.TBOOL] = true
+ ir.OKForConst[types.TSTRING] = true
- okforlen[TARRAY] = true
- okforlen[TCHAN] = true
- okforlen[TMAP] = true
- okforlen[TSLICE] = true
- okforlen[TSTRING] = true
+ okforlen[types.TARRAY] = true
+ okforlen[types.TCHAN] = true
+ okforlen[types.TMAP] = true
+ okforlen[types.TSLICE] = true
+ okforlen[types.TSTRING] = true
- okforeq[TPTR] = true
- okforeq[TUNSAFEPTR] = true
- okforeq[TINTER] = true
- okforeq[TCHAN] = true
- okforeq[TSTRING] = true
- okforeq[TBOOL] = true
- okforeq[TMAP] = true // nil only; refined in typecheck
- okforeq[TFUNC] = true // nil only; refined in typecheck
- okforeq[TSLICE] = true // nil only; refined in typecheck
- okforeq[TARRAY] = true // only if element type is comparable; refined in typecheck
- okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
+ okforeq[types.TPTR] = true
+ okforeq[types.TUNSAFEPTR] = true
+ okforeq[types.TINTER] = true
+ okforeq[types.TCHAN] = true
+ okforeq[types.TSTRING] = true
+ okforeq[types.TBOOL] = true
+ okforeq[types.TMAP] = true // nil only; refined in typecheck
+ okforeq[types.TFUNC] = true // nil only; refined in typecheck
+ okforeq[types.TSLICE] = true // nil only; refined in typecheck
+ okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck
+ okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
- okforcmp[TSTRING] = true
+ okforcmp[types.TSTRING] = true
var i int
for i = 0; i < len(okfor); i++ {
@@ -271,76 +267,51 @@ func typeinit() {
}
// binary
- okfor[OADD] = okforadd[:]
- okfor[OAND] = okforand[:]
- okfor[OANDAND] = okforbool[:]
- okfor[OANDNOT] = okforand[:]
- okfor[ODIV] = okforarith[:]
- okfor[OEQ] = okforeq[:]
- okfor[OGE] = okforcmp[:]
- okfor[OGT] = okforcmp[:]
- okfor[OLE] = okforcmp[:]
- okfor[OLT] = okforcmp[:]
- okfor[OMOD] = okforand[:]
- okfor[OMUL] = okforarith[:]
- okfor[ONE] = okforeq[:]
- okfor[OOR] = okforand[:]
- okfor[OOROR] = okforbool[:]
- okfor[OSUB] = okforarith[:]
- okfor[OXOR] = okforand[:]
- okfor[OLSH] = okforand[:]
- okfor[ORSH] = okforand[:]
+ okfor[ir.OADD] = okforadd[:]
+ okfor[ir.OAND] = okforand[:]
+ okfor[ir.OANDAND] = okforbool[:]
+ okfor[ir.OANDNOT] = okforand[:]
+ okfor[ir.ODIV] = okforarith[:]
+ okfor[ir.OEQ] = okforeq[:]
+ okfor[ir.OGE] = okforcmp[:]
+ okfor[ir.OGT] = okforcmp[:]
+ okfor[ir.OLE] = okforcmp[:]
+ okfor[ir.OLT] = okforcmp[:]
+ okfor[ir.OMOD] = okforand[:]
+ okfor[ir.OMUL] = okforarith[:]
+ okfor[ir.ONE] = okforeq[:]
+ okfor[ir.OOR] = okforand[:]
+ okfor[ir.OOROR] = okforbool[:]
+ okfor[ir.OSUB] = okforarith[:]
+ okfor[ir.OXOR] = okforand[:]
+ okfor[ir.OLSH] = okforand[:]
+ okfor[ir.ORSH] = okforand[:]
// unary
- okfor[OBITNOT] = okforand[:]
- okfor[ONEG] = okforarith[:]
- okfor[ONOT] = okforbool[:]
- okfor[OPLUS] = okforarith[:]
+ okfor[ir.OBITNOT] = okforand[:]
+ okfor[ir.ONEG] = okforarith[:]
+ okfor[ir.ONOT] = okforbool[:]
+ okfor[ir.OPLUS] = okforarith[:]
// special
- okfor[OCAP] = okforcap[:]
- okfor[OLEN] = okforlen[:]
+ okfor[ir.OCAP] = okforcap[:]
+ okfor[ir.OLEN] = okforlen[:]
// comparison
- iscmp[OLT] = true
- iscmp[OGT] = true
- iscmp[OGE] = true
- iscmp[OLE] = true
- iscmp[OEQ] = true
- iscmp[ONE] = true
-
- maxintval[TINT8].SetString("0x7f")
- minintval[TINT8].SetString("-0x80")
- maxintval[TINT16].SetString("0x7fff")
- minintval[TINT16].SetString("-0x8000")
- maxintval[TINT32].SetString("0x7fffffff")
- minintval[TINT32].SetString("-0x80000000")
- maxintval[TINT64].SetString("0x7fffffffffffffff")
- minintval[TINT64].SetString("-0x8000000000000000")
-
- maxintval[TUINT8].SetString("0xff")
- maxintval[TUINT16].SetString("0xffff")
- maxintval[TUINT32].SetString("0xffffffff")
- maxintval[TUINT64].SetString("0xffffffffffffffff")
-
- // f is valid float if min < f < max. (min and max are not themselves valid.)
- maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
- minfltval[TFLOAT32].SetString("-33554431p103")
- maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
- minfltval[TFLOAT64].SetString("-18014398509481983p970")
-
- maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
- minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
- maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
- minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
-
- types.Types[TINTER] = types.New(TINTER) // empty interface
+ iscmp[ir.OLT] = true
+ iscmp[ir.OGT] = true
+ iscmp[ir.OGE] = true
+ iscmp[ir.OLE] = true
+ iscmp[ir.OEQ] = true
+ iscmp[ir.ONE] = true
+
+ types.Types[types.TINTER] = types.New(types.TINTER) // empty interface
// simple aliases
- simtype[TMAP] = TPTR
- simtype[TCHAN] = TPTR
- simtype[TFUNC] = TPTR
- simtype[TUNSAFEPTR] = TPTR
+ simtype[types.TMAP] = types.TPTR
+ simtype[types.TCHAN] = types.TPTR
+ simtype[types.TFUNC] = types.TPTR
+ simtype[types.TUNSAFEPTR] = types.TPTR
slicePtrOffset = 0
sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
@@ -350,31 +321,29 @@ func typeinit() {
// string is same as slice wo the cap
sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
- dowidth(types.Types[TSTRING])
+ dowidth(types.Types[types.TSTRING])
dowidth(types.UntypedString)
}
func makeErrorInterface() *types.Type {
- field := types.NewField()
- field.Type = types.Types[TSTRING]
- f := functypefield(fakeRecvField(), nil, []*types.Field{field})
+ sig := functypefield(fakeRecvField(), nil, []*types.Field{
+ types.NewField(src.NoXPos, nil, types.Types[types.TSTRING]),
+ })
- field = types.NewField()
- field.Sym = lookup("Error")
- field.Type = f
+ method := types.NewField(src.NoXPos, lookup("Error"), sig)
- t := types.New(TINTER)
- t.SetInterface([]*types.Field{field})
+ t := types.New(types.TINTER)
+ t.SetInterface([]*types.Field{method})
return t
}
func lexinit1() {
// error type
- s := builtinpkg.Lookup("error")
+ s := ir.BuiltinPkg.Lookup("error")
types.Errortype = makeErrorInterface()
types.Errortype.Sym = s
types.Errortype.Orig = makeErrorInterface()
- s.Def = asTypesNode(typenod(types.Errortype))
+ s.Def = typenod(types.Errortype)
dowidth(types.Errortype)
// We create separate byte and rune types for better error messages
@@ -386,24 +355,24 @@ func lexinit1() {
// type aliases, albeit at the cost of having to deal with it everywhere).
// byte alias
- s = builtinpkg.Lookup("byte")
- types.Bytetype = types.New(TUINT8)
+ s = ir.BuiltinPkg.Lookup("byte")
+ types.Bytetype = types.New(types.TUINT8)
types.Bytetype.Sym = s
- s.Def = asTypesNode(typenod(types.Bytetype))
- asNode(s.Def).Name = new(Name)
+ s.Def = typenod(types.Bytetype)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
dowidth(types.Bytetype)
// rune alias
- s = builtinpkg.Lookup("rune")
- types.Runetype = types.New(TINT32)
+ s = ir.BuiltinPkg.Lookup("rune")
+ types.Runetype = types.New(types.TINT32)
types.Runetype.Sym = s
- s.Def = asTypesNode(typenod(types.Runetype))
- asNode(s.Def).Name = new(Name)
+ s.Def = typenod(types.Runetype)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
dowidth(types.Runetype)
// backend-dependent builtin types (e.g. int).
for _, s := range &typedefs {
- s1 := builtinpkg.Lookup(s.name)
+ s1 := ir.BuiltinPkg.Lookup(s.name)
sameas := s.sameas32
if Widthptr == 8 {
@@ -411,17 +380,13 @@ func lexinit1() {
}
simtype[s.etype] = sameas
- minfltval[s.etype] = minfltval[sameas]
- maxfltval[s.etype] = maxfltval[sameas]
- minintval[s.etype] = minintval[sameas]
- maxintval[s.etype] = maxintval[sameas]
t := types.New(s.etype)
t.Sym = s1
types.Types[s.etype] = t
- s1.Def = asTypesNode(typenod(t))
- asNode(s1.Def).Name = new(Name)
- s1.Origpkg = builtinpkg
+ s1.Def = typenod(t)
+ ir.AsNode(s1.Def).SetName(new(ir.Name))
+ s1.Origpkg = ir.BuiltinPkg
dowidth(t)
}
@@ -433,7 +398,7 @@ func finishUniverse() {
// that we silently skip symbols that are already declared in the
// package block rather than emitting a redeclared symbol error.
- for _, s := range builtinpkg.Syms {
+ for _, s := range ir.BuiltinPkg.Syms {
if s.Def == nil {
continue
}
@@ -446,8 +411,8 @@ func finishUniverse() {
s1.Block = s.Block
}
- nodfp = newname(lookup(".fp"))
- nodfp.Type = types.Types[TINT32]
- nodfp.SetClass(PPARAM)
- nodfp.Name.SetUsed(true)
+ nodfp = NewName(lookup(".fp"))
+ nodfp.SetType(types.Types[types.TINT32])
+ nodfp.SetClass(ir.PPARAM)
+ nodfp.Name().SetUsed(true)
}
diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go
index 2233961561..678924b229 100644
--- a/src/cmd/compile/internal/gc/unsafe.go
+++ b/src/cmd/compile/internal/gc/unsafe.go
@@ -4,73 +4,78 @@
package gc
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n *Node) int64 {
- switch n.Op {
- case OALIGNOF, OSIZEOF:
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- tr := n.Left.Type
+func evalunsafe(n ir.Node) int64 {
+ switch n.Op() {
+ case ir.OALIGNOF, ir.OSIZEOF:
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ tr := n.Left().Type()
if tr == nil {
return 0
}
dowidth(tr)
- if n.Op == OALIGNOF {
+ if n.Op() == ir.OALIGNOF {
return int64(tr.Align)
}
return tr.Width
- case OOFFSETOF:
+ case ir.OOFFSETOF:
// must be a selector.
- if n.Left.Op != OXDOT {
- yyerror("invalid expression %v", n)
+ if n.Left().Op() != ir.OXDOT {
+ base.Errorf("invalid expression %v", n)
return 0
}
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
- n.Left.Left = typecheck(n.Left.Left, ctxExpr)
- base := n.Left.Left
+ n.Left().SetLeft(typecheck(n.Left().Left(), ctxExpr))
+ sbase := n.Left().Left()
- n.Left = typecheck(n.Left, ctxExpr)
- if n.Left.Type == nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ if n.Left().Type() == nil {
return 0
}
- switch n.Left.Op {
- case ODOT, ODOTPTR:
+ switch n.Left().Op() {
+ case ir.ODOT, ir.ODOTPTR:
break
- case OCALLPART:
- yyerror("invalid expression %v: argument is a method value", n)
+ case ir.OCALLPART:
+ base.Errorf("invalid expression %v: argument is a method value", n)
return 0
default:
- yyerror("invalid expression %v", n)
+ base.Errorf("invalid expression %v", n)
return 0
}
- // Sum offsets for dots until we reach base.
+ // Sum offsets for dots until we reach sbase.
var v int64
- for r := n.Left; r != base; r = r.Left {
- switch r.Op {
- case ODOTPTR:
+ for r := n.Left(); r != sbase; r = r.Left() {
+ switch r.Op() {
+ case ir.ODOTPTR:
// For Offsetof(s.f), s may itself be a pointer,
// but accessing f must not otherwise involve
// indirection via embedded pointer types.
- if r.Left != base {
- yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
+ if r.Left() != sbase {
+ base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left())
return 0
}
fallthrough
- case ODOT:
- v += r.Xoffset
+ case ir.ODOT:
+ v += r.Offset()
default:
- Dump("unsafenmagic", n.Left)
- Fatalf("impossible %#v node after dot insertion", r.Op)
+ ir.Dump("unsafenmagic", n.Left())
+ base.Fatalf("impossible %#v node after dot insertion", r.Op())
}
}
return v
}
- Fatalf("unexpected op %v", n.Op)
+ base.Fatalf("unexpected op %v", n.Op())
return 0
}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
index 58be2f8253..4baddbc029 100644
--- a/src/cmd/compile/internal/gc/util.go
+++ b/src/cmd/compile/internal/gc/util.go
@@ -8,59 +8,35 @@ import (
"os"
"runtime"
"runtime/pprof"
-)
-
-// Line returns n's position as a string. If n has been inlined,
-// it uses the outermost position where n has been inlined.
-func (n *Node) Line() string {
- return linestr(n.Pos)
-}
-var atExitFuncs []func()
-
-func atExit(f func()) {
- atExitFuncs = append(atExitFuncs, f)
-}
-
-func Exit(code int) {
- for i := len(atExitFuncs) - 1; i >= 0; i-- {
- f := atExitFuncs[i]
- atExitFuncs = atExitFuncs[:i]
- f()
- }
- os.Exit(code)
-}
+ "cmd/compile/internal/base"
+)
var (
- blockprofile string
- cpuprofile string
- memprofile string
memprofilerate int64
- traceprofile string
traceHandler func(string)
- mutexprofile string
)
func startProfile() {
- if cpuprofile != "" {
- f, err := os.Create(cpuprofile)
+ if base.Flag.CPUProfile != "" {
+ f, err := os.Create(base.Flag.CPUProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- atExit(pprof.StopCPUProfile)
+ base.AtExit(pprof.StopCPUProfile)
}
- if memprofile != "" {
+ if base.Flag.MemProfile != "" {
if memprofilerate != 0 {
runtime.MemProfileRate = int(memprofilerate)
}
- f, err := os.Create(memprofile)
+ f, err := os.Create(base.Flag.MemProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
- atExit(func() {
+ base.AtExit(func() {
// Profile all outstanding allocations.
runtime.GC()
// compilebench parses the memory profile to extract memstats,
@@ -68,36 +44,36 @@ func startProfile() {
// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
const writeLegacyFormat = 1
if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
})
} else {
// Not doing memory profiling; disable it entirely.
runtime.MemProfileRate = 0
}
- if blockprofile != "" {
- f, err := os.Create(blockprofile)
+ if base.Flag.BlockProfile != "" {
+ f, err := os.Create(base.Flag.BlockProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
runtime.SetBlockProfileRate(1)
- atExit(func() {
+ base.AtExit(func() {
pprof.Lookup("block").WriteTo(f, 0)
f.Close()
})
}
- if mutexprofile != "" {
- f, err := os.Create(mutexprofile)
+ if base.Flag.MutexProfile != "" {
+ f, err := os.Create(base.Flag.MutexProfile)
if err != nil {
- Fatalf("%v", err)
+ base.Fatalf("%v", err)
}
startMutexProfiling()
- atExit(func() {
+ base.AtExit(func() {
pprof.Lookup("mutex").WriteTo(f, 0)
f.Close()
})
}
- if traceprofile != "" && traceHandler != nil {
- traceHandler(traceprofile)
+ if base.Flag.TraceProfile != "" && traceHandler != nil {
+ traceHandler(base.Flag.TraceProfile)
}
}
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index a7b6e7fcb3..db8791ee05 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -5,12 +5,16 @@
package gc
import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/sys"
"encoding/binary"
"fmt"
+ "go/constant"
+ "go/token"
"strings"
)
@@ -18,79 +22,80 @@ import (
const tmpstringbufsize = 32
const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-func walk(fn *Node) {
+func walk(fn ir.Node) {
Curfn = fn
+ errorsBefore := base.Errors()
- if Debug.W != 0 {
- s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Nbody)
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("\nbefore walk %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Body())
}
- lno := lineno
+ lno := base.Pos
// Final typecheck for any unused variables.
- for i, ln := range fn.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
+ for i, ln := range fn.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) {
ln = typecheck(ln, ctxExpr|ctxAssign)
- fn.Func.Dcl[i] = ln
+ fn.Func().Dcl[i] = ln
}
}
// Propagate the used flag for typeswitch variables up to the NONAME in its definition.
- for _, ln := range fn.Func.Dcl {
- if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
- ln.Name.Defn.Left.Name.SetUsed(true)
+ for _, ln := range fn.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name().Defn != nil && ln.Name().Defn.Op() == ir.OTYPESW && ln.Name().Used() {
+ ln.Name().Defn.Left().Name().SetUsed(true)
}
}
- for _, ln := range fn.Func.Dcl {
- if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
+ for _, ln := range fn.Func().Dcl {
+ if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Name().Used() {
continue
}
- if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
- if defn.Left.Name.Used() {
+ if defn := ln.Name().Defn; defn != nil && defn.Op() == ir.OTYPESW {
+ if defn.Left().Name().Used() {
continue
}
- yyerrorl(defn.Left.Pos, "%v declared but not used", ln.Sym)
- defn.Left.Name.SetUsed(true) // suppress repeats
+ base.ErrorfAt(defn.Left().Pos(), "%v declared but not used", ln.Sym())
+ defn.Left().Name().SetUsed(true) // suppress repeats
} else {
- yyerrorl(ln.Pos, "%v declared but not used", ln.Sym)
+ base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
}
}
- lineno = lno
- if nerrors != 0 {
+ base.Pos = lno
+ if base.Errors() > errorsBefore {
return
}
- walkstmtlist(Curfn.Nbody.Slice())
- if Debug.W != 0 {
- s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Nbody)
+ walkstmtlist(Curfn.Body().Slice())
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("after walk %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Body())
}
zeroResults()
heapmoves()
- if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
- s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
- dumplist(s, Curfn.Func.Enter)
+ if base.Flag.W != 0 && Curfn.Func().Enter.Len() > 0 {
+ s := fmt.Sprintf("enter %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Func().Enter)
}
}
-func walkstmtlist(s []*Node) {
+func walkstmtlist(s []ir.Node) {
for i := range s {
s[i] = walkstmt(s[i])
}
}
-func paramoutheap(fn *Node) bool {
- for _, ln := range fn.Func.Dcl {
+func paramoutheap(fn ir.Node) bool {
+ for _, ln := range fn.Func().Dcl {
switch ln.Class() {
- case PPARAMOUT:
- if ln.isParamStackCopy() || ln.Name.Addrtaken() {
+ case ir.PPARAMOUT:
+ if isParamStackCopy(ln) || ln.Name().Addrtaken() {
return true
}
- case PAUTO:
+ case ir.PAUTO:
// stop early - parameters are over
return false
}
@@ -101,237 +106,237 @@ func paramoutheap(fn *Node) bool {
// The result of walkstmt MUST be assigned back to n, e.g.
// n.Left = walkstmt(n.Left)
-func walkstmt(n *Node) *Node {
+func walkstmt(n ir.Node) ir.Node {
if n == nil {
return n
}
setlineno(n)
- walkstmtlist(n.Ninit.Slice())
+ walkstmtlist(n.Init().Slice())
- switch n.Op {
+ switch n.Op() {
default:
- if n.Op == ONAME {
- yyerror("%v is not a top level statement", n.Sym)
+ if n.Op() == ir.ONAME {
+ base.Errorf("%v is not a top level statement", n.Sym())
} else {
- yyerror("%v is not a top level statement", n.Op)
- }
- Dump("nottop", n)
-
- case OAS,
- OASOP,
- OAS2,
- OAS2DOTTYPE,
- OAS2RECV,
- OAS2FUNC,
- OAS2MAPR,
- OCLOSE,
- OCOPY,
- OCALLMETH,
- OCALLINTER,
- OCALL,
- OCALLFUNC,
- ODELETE,
- OSEND,
- OPRINT,
- OPRINTN,
- OPANIC,
- OEMPTY,
- ORECOVER,
- OGETG:
+ base.Errorf("%v is not a top level statement", n.Op())
+ }
+ ir.Dump("nottop", n)
+
+ case ir.OAS,
+ ir.OASOP,
+ ir.OAS2,
+ ir.OAS2DOTTYPE,
+ ir.OAS2RECV,
+ ir.OAS2FUNC,
+ ir.OAS2MAPR,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.OCALLMETH,
+ ir.OCALLINTER,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.ODELETE,
+ ir.OSEND,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OPANIC,
+ ir.OEMPTY,
+ ir.ORECOVER,
+ ir.OGETG:
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- wascopy := n.Op == OCOPY
- init := n.Ninit
- n.Ninit.Set(nil)
+ wascopy := n.Op() == ir.OCOPY
+ init := n.Init()
+ n.PtrInit().Set(nil)
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
- if wascopy && n.Op == OCONVNOP {
- n.Op = OEMPTY // don't leave plain values as statements.
+ if wascopy && n.Op() == ir.OCONVNOP {
+ n.SetOp(ir.OEMPTY) // don't leave plain values as statements.
}
// special case for a receive where we throw away
// the value received.
- case ORECV:
+ case ir.ORECV:
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- init := n.Ninit
- n.Ninit.Set(nil)
+ init := n.Init()
+ n.PtrInit().Set(nil)
- n.Left = walkexpr(n.Left, &init)
- n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
+ n.SetLeft(walkexpr(n.Left(), &init))
+ n = mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil())
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
- case OBREAK,
- OCONTINUE,
- OFALL,
- OGOTO,
- OLABEL,
- ODCLCONST,
- ODCLTYPE,
- OCHECKNIL,
- OVARDEF,
- OVARKILL,
- OVARLIVE:
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OCHECKNIL,
+ ir.OVARDEF,
+ ir.OVARKILL,
+ ir.OVARLIVE:
break
- case ODCL:
- v := n.Left
- if v.Class() == PAUTOHEAP {
- if compiling_runtime {
- yyerror("%v escapes to heap, not allowed in runtime", v)
+ case ir.ODCL:
+ v := n.Left()
+ if v.Class() == ir.PAUTOHEAP {
+ if base.Flag.CompilingRuntime {
+ base.Errorf("%v escapes to heap, not allowed in runtime", v)
}
if prealloc[v] == nil {
- prealloc[v] = callnew(v.Type)
+ prealloc[v] = callnew(v.Type())
}
- nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v])
+ nn := ir.Nod(ir.OAS, v.Name().Param.Heapaddr, prealloc[v])
nn.SetColas(true)
nn = typecheck(nn, ctxStmt)
return walkstmt(nn)
}
- case OBLOCK:
- walkstmtlist(n.List.Slice())
+ case ir.OBLOCK:
+ walkstmtlist(n.List().Slice())
- case OCASE:
- yyerror("case statement out of place")
+ case ir.OCASE:
+ base.Errorf("case statement out of place")
- case ODEFER:
- Curfn.Func.SetHasDefer(true)
- Curfn.Func.numDefers++
- if Curfn.Func.numDefers > maxOpenDefers {
+ case ir.ODEFER:
+ Curfn.Func().SetHasDefer(true)
+ Curfn.Func().NumDefers++
+ if Curfn.Func().NumDefers > maxOpenDefers {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.Func().SetOpenCodedDeferDisallowed(true)
}
- if n.Esc != EscNever {
+ if n.Esc() != EscNever {
// If n.Esc is not EscNever, then this defer occurs in a loop,
// so open-coded defers cannot be used in this function.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.Func().SetOpenCodedDeferDisallowed(true)
}
fallthrough
- case OGO:
- switch n.Left.Op {
- case OPRINT, OPRINTN:
- n.Left = wrapCall(n.Left, &n.Ninit)
-
- case ODELETE:
- if mapfast(n.Left.List.First().Type) == mapslow {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ case ir.OGO:
+ switch n.Left().Op() {
+ case ir.OPRINT, ir.OPRINTN:
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
+
+ case ir.ODELETE:
+ if mapfast(n.Left().List().First().Type()) == mapslow {
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
- case OCOPY:
- n.Left = copyany(n.Left, &n.Ninit, true)
+ case ir.OCOPY:
+ n.SetLeft(copyany(n.Left(), n.PtrInit(), true))
- case OCALLFUNC, OCALLMETH, OCALLINTER:
- if n.Left.Nbody.Len() > 0 {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ if n.Left().Body().Len() > 0 {
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
default:
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
- case OFOR, OFORUNTIL:
- if n.Left != nil {
- walkstmtlist(n.Left.Ninit.Slice())
- init := n.Left.Ninit
- n.Left.Ninit.Set(nil)
- n.Left = walkexpr(n.Left, &init)
- n.Left = addinit(n.Left, init.Slice())
+ case ir.OFOR, ir.OFORUNTIL:
+ if n.Left() != nil {
+ walkstmtlist(n.Left().Init().Slice())
+ init := n.Left().Init()
+ n.Left().PtrInit().Set(nil)
+ n.SetLeft(walkexpr(n.Left(), &init))
+ n.SetLeft(addinit(n.Left(), init.Slice()))
}
- n.Right = walkstmt(n.Right)
- if n.Op == OFORUNTIL {
- walkstmtlist(n.List.Slice())
+ n.SetRight(walkstmt(n.Right()))
+ if n.Op() == ir.OFORUNTIL {
+ walkstmtlist(n.List().Slice())
}
- walkstmtlist(n.Nbody.Slice())
+ walkstmtlist(n.Body().Slice())
- case OIF:
- n.Left = walkexpr(n.Left, &n.Ninit)
- walkstmtlist(n.Nbody.Slice())
- walkstmtlist(n.Rlist.Slice())
+ case ir.OIF:
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
+ walkstmtlist(n.Body().Slice())
+ walkstmtlist(n.Rlist().Slice())
- case ORETURN:
- Curfn.Func.numReturns++
- if n.List.Len() == 0 {
+ case ir.ORETURN:
+ Curfn.Func().NumReturns++
+ if n.List().Len() == 0 {
break
}
- if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
+ if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
- var rl []*Node
+ var rl []ir.Node
- for _, ln := range Curfn.Func.Dcl {
+ for _, ln := range Curfn.Func().Dcl {
cl := ln.Class()
- if cl == PAUTO || cl == PAUTOHEAP {
+ if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
break
}
- if cl == PPARAMOUT {
- if ln.isParamStackCopy() {
- ln = walkexpr(typecheck(nod(ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil)
+ if cl == ir.PPARAMOUT {
+ if isParamStackCopy(ln) {
+ ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Param.Heapaddr, nil), ctxExpr), nil)
}
rl = append(rl, ln)
}
}
- if got, want := n.List.Len(), len(rl); got != want {
+ if got, want := n.List().Len(), len(rl); got != want {
// order should have rewritten multi-value function calls
// with explicit OAS2FUNC nodes.
- Fatalf("expected %v return arguments, have %v", want, got)
+ base.Fatalf("expected %v return arguments, have %v", want, got)
}
// move function calls out, to make reorder3's job easier.
- walkexprlistsafe(n.List.Slice(), &n.Ninit)
+ walkexprlistsafe(n.List().Slice(), n.PtrInit())
- ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
- n.List.Set(reorder3(ll))
+ ll := ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit())
+ n.PtrList().Set(reorder3(ll))
break
}
- walkexprlist(n.List.Slice(), &n.Ninit)
+ walkexprlist(n.List().Slice(), n.PtrInit())
// For each return parameter (lhs), assign the corresponding result (rhs).
- lhs := Curfn.Type.Results()
- rhs := n.List.Slice()
- res := make([]*Node, lhs.NumFields())
+ lhs := Curfn.Type().Results()
+ rhs := n.List().Slice()
+ res := make([]ir.Node, lhs.NumFields())
for i, nl := range lhs.FieldSlice() {
- nname := asNode(nl.Nname)
- if nname.isParamHeapCopy() {
- nname = nname.Name.Param.Stackcopy
+ nname := ir.AsNode(nl.Nname)
+ if isParamHeapCopy(nname) {
+ nname = nname.Name().Param.Stackcopy
}
- a := nod(OAS, nname, rhs[i])
- res[i] = convas(a, &n.Ninit)
+ a := ir.Nod(ir.OAS, nname, rhs[i])
+ res[i] = convas(a, n.PtrInit())
}
- n.List.Set(res)
+ n.PtrList().Set(res)
- case ORETJMP:
+ case ir.ORETJMP:
break
- case OINLMARK:
+ case ir.OINLMARK:
break
- case OSELECT:
+ case ir.OSELECT:
walkselect(n)
- case OSWITCH:
+ case ir.OSWITCH:
walkswitch(n)
- case ORANGE:
+ case ir.ORANGE:
n = walkrange(n)
}
- if n.Op == ONAME {
- Fatalf("walkstmt ended up with name: %+v", n)
+ if n.Op() == ir.ONAME {
+ base.Fatalf("walkstmt ended up with name: %+v", n)
}
return n
}
@@ -341,20 +346,20 @@ func walkstmt(n *Node) *Node {
// the types expressions are calculated.
// compile-time constants are evaluated.
// complex side effects like statements are appended to init
-func walkexprlist(s []*Node, init *Nodes) {
+func walkexprlist(s []ir.Node, init *ir.Nodes) {
for i := range s {
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistsafe(s []*Node, init *Nodes) {
+func walkexprlistsafe(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = safeexpr(n, init)
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistcheap(s []*Node, init *Nodes) {
+func walkexprlistcheap(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = cheapexpr(n, init)
s[i] = walkexpr(s[i], init)
@@ -377,7 +382,7 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
return "convT16", false
case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
return "convT32", false
- case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
+ case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
return "convT64", false
}
if sc := from.SoleComponent(); sc != nil {
@@ -402,205 +407,204 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
return "convT2I", true
}
}
- Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
+ base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
panic("unreachable")
}
// The result of walkexpr MUST be assigned back to n, e.g.
// n.Left = walkexpr(n.Left, init)
-func walkexpr(n *Node, init *Nodes) *Node {
+func walkexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return n
}
// Eagerly checkwidth all expressions for the back end.
- if n.Type != nil && !n.Type.WidthCalculated() {
- switch n.Type.Etype {
- case TBLANK, TNIL, TIDEAL:
+ if n.Type() != nil && !n.Type().WidthCalculated() {
+ switch n.Type().Etype {
+ case types.TBLANK, types.TNIL, types.TIDEAL:
default:
- checkwidth(n.Type)
+ checkwidth(n.Type())
}
}
- if init == &n.Ninit {
+ if init == n.PtrInit() {
// not okay to use n->ninit when walking n,
// because we might replace n with some other node
// and would lose the init list.
- Fatalf("walkexpr init == &n->ninit")
+ base.Fatalf("walkexpr init == &n->ninit")
}
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
lno := setlineno(n)
- if Debug.w > 1 {
- Dump("before walk expr", n)
+ if base.Flag.LowerW > 1 {
+ ir.Dump("before walk expr", n)
}
if n.Typecheck() != 1 {
- Fatalf("missed typecheck: %+v", n)
+ base.Fatalf("missed typecheck: %+v", n)
}
- if n.Type.IsUntyped() {
- Fatalf("expression has untyped type: %+v", n)
+ if n.Type().IsUntyped() {
+ base.Fatalf("expression has untyped type: %+v", n)
}
- if n.Op == ONAME && n.Class() == PAUTOHEAP {
- nn := nod(ODEREF, n.Name.Param.Heapaddr, nil)
+ if n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP {
+ nn := ir.Nod(ir.ODEREF, n.Name().Param.Heapaddr, nil)
nn = typecheck(nn, ctxExpr)
nn = walkexpr(nn, init)
- nn.Left.MarkNonNil()
+ nn.Left().MarkNonNil()
return nn
}
opswitch:
- switch n.Op {
+ switch n.Op() {
default:
- Dump("walk", n)
- Fatalf("walkexpr: switch 1 unknown op %+S", n)
+ ir.Dump("walk", n)
+ base.Fatalf("walkexpr: switch 1 unknown op %+S", n)
- case ONONAME, OEMPTY, OGETG, ONEWOBJ:
+ case ir.ONONAME, ir.OEMPTY, ir.OGETG, ir.ONEWOBJ, ir.OMETHEXPR:
- case OTYPE, ONAME, OLITERAL:
+ case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL:
// TODO(mdempsky): Just return n; see discussion on CL 38655.
// Perhaps refactor to use Node.mayBeShared for these instead.
// If these return early, make sure to still call
// stringsym for constant strings.
- case ONOT, ONEG, OPLUS, OBITNOT, OREAL, OIMAG, ODOTMETH, ODOTINTER,
- ODEREF, OSPTR, OITAB, OIDATA, OADDR:
- n.Left = walkexpr(n.Left, init)
+ case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.ODOTMETH, ir.ODOTINTER,
+ ir.ODEREF, ir.OSPTR, ir.OITAB, ir.OIDATA, ir.OADDR:
+ n.SetLeft(walkexpr(n.Left(), init))
- case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
- case ODOT, ODOTPTR:
+ case ir.ODOT, ir.ODOTPTR:
usefield(n)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
- case ODOTTYPE, ODOTTYPE2:
- n.Left = walkexpr(n.Left, init)
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n.SetLeft(walkexpr(n.Left(), init))
// Set up interface type addresses for back end.
- n.Right = typename(n.Type)
- if n.Op == ODOTTYPE {
- n.Right.Right = typename(n.Left.Type)
+ n.SetRight(typename(n.Type()))
+ if n.Op() == ir.ODOTTYPE {
+ n.Right().SetRight(typename(n.Left().Type()))
}
- if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
- n.List.Set1(itabname(n.Type, n.Left.Type))
+ if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() {
+ n.PtrList().Set1(itabname(n.Type(), n.Left().Type()))
}
- case OLEN, OCAP:
+ case ir.OLEN, ir.OCAP:
if isRuneCount(n) {
// Replace len([]rune(string)) with runtime.countrunes(string).
- n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[TSTRING]))
+ n = mkcall("countrunes", n.Type(), init, conv(n.Left().Left(), types.Types[types.TSTRING]))
break
}
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
// replace len(*[10]int) with 10.
// delayed until now to preserve side effects.
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
- safeexpr(n.Left, init)
- setintconst(n, t.NumElem())
+ safeexpr(n.Left(), init)
+ n = origIntConst(n, t.NumElem())
n.SetTypecheck(1)
}
- case OCOMPLEX:
+ case ir.OCOMPLEX:
// Use results from call expression as arguments for complex.
- if n.Left == nil && n.Right == nil {
- n.Left = n.List.First()
- n.Right = n.List.Second()
+ if n.Left() == nil && n.Right() == nil {
+ n.SetLeft(n.List().First())
+ n.SetRight(n.List().Second())
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
- case OEQ, ONE, OLT, OLE, OGT, OGE:
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n = walkcompare(n, init)
- case OANDAND, OOROR:
- n.Left = walkexpr(n.Left, init)
+ case ir.OANDAND, ir.OOROR:
+ n.SetLeft(walkexpr(n.Left(), init))
// cannot put side effects from n.Right on init,
// because they cannot run before n.Left is checked.
// save elsewhere and store on the eventual n.Right.
- var ll Nodes
+ var ll ir.Nodes
- n.Right = walkexpr(n.Right, &ll)
- n.Right = addinit(n.Right, ll.Slice())
+ n.SetRight(walkexpr(n.Right(), &ll))
+ n.SetRight(addinit(n.Right(), ll.Slice()))
- case OPRINT, OPRINTN:
+ case ir.OPRINT, ir.OPRINTN:
n = walkprint(n, init)
- case OPANIC:
- n = mkcall("gopanic", nil, init, n.Left)
+ case ir.OPANIC:
+ n = mkcall("gopanic", nil, init, n.Left())
- case ORECOVER:
- n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
+ case ir.ORECOVER:
+ n = mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil))
- case OCLOSUREVAR, OCFUNC:
+ case ir.OCLOSUREVAR, ir.OCFUNC:
- case OCALLINTER, OCALLFUNC, OCALLMETH:
- if n.Op == OCALLINTER {
+ case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
+ if n.Op() == ir.OCALLINTER {
usemethod(n)
markUsedIfaceMethod(n)
}
- if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.OCLOSURE {
// Transform direct call of a closure to call of a normal function.
// transformclosure already did all preparation work.
// Prepend captured variables to argument list.
- n.List.Prepend(n.Left.Func.Enter.Slice()...)
-
- n.Left.Func.Enter.Set(nil)
+ n.PtrList().Prepend(n.Left().Func().ClosureEnter.Slice()...)
+ n.Left().Func().ClosureEnter.Set(nil)
// Replace OCLOSURE with ONAME/PFUNC.
- n.Left = n.Left.Func.Closure.Func.Nname
+ n.SetLeft(n.Left().Func().Nname)
// Update type of OCALLFUNC node.
// Output arguments had not changed, but their offsets could.
- if n.Left.Type.NumResults() == 1 {
- n.Type = n.Left.Type.Results().Field(0).Type
+ if n.Left().Type().NumResults() == 1 {
+ n.SetType(n.Left().Type().Results().Field(0).Type)
} else {
- n.Type = n.Left.Type.Results()
+ n.SetType(n.Left().Type().Results())
}
}
walkCall(n, init)
- case OAS, OASOP:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS, ir.OASOP:
+ init.AppendNodes(n.PtrInit())
// Recognize m[k] = append(m[k], ...) so we can reuse
// the mapassign call.
- mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND
- if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) {
- Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First())
+ mapAppend := n.Left().Op() == ir.OINDEXMAP && n.Right().Op() == ir.OAPPEND
+ if mapAppend && !samesafeexpr(n.Left(), n.Right().List().First()) {
+ base.Fatalf("not same expressions: %v != %v", n.Left(), n.Right().List().First())
}
- n.Left = walkexpr(n.Left, init)
- n.Left = safeexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetLeft(safeexpr(n.Left(), init))
if mapAppend {
- n.Right.List.SetFirst(n.Left)
+ n.Right().List().SetFirst(n.Left())
}
- if n.Op == OASOP {
+ if n.Op() == ir.OASOP {
// Rewrite x op= y into x = x op y.
- n.Right = nod(n.SubOp(), n.Left, n.Right)
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetRight(ir.Nod(n.SubOp(), n.Left(), n.Right()))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- n.Op = OAS
+ n.SetOp(ir.OAS)
n.ResetAux()
}
@@ -608,35 +612,35 @@ opswitch:
break
}
- if n.Right == nil {
+ if n.Right() == nil {
// TODO(austin): Check all "implicit zeroing"
break
}
- if !instrumenting && isZero(n.Right) {
+ if !instrumenting && isZero(n.Right()) {
break
}
- switch n.Right.Op {
+ switch n.Right().Op() {
default:
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(walkexpr(n.Right(), init))
- case ORECV:
+ case ir.ORECV:
// x = <-c; n.Left is x, n.Right.Left is c.
// order.stmt made sure x is addressable.
- n.Right.Left = walkexpr(n.Right.Left, init)
+ n.Right().SetLeft(walkexpr(n.Right().Left(), init))
- n1 := nod(OADDR, n.Left, nil)
- r := n.Right.Left // the channel
- n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
+ n1 := ir.Nod(ir.OADDR, n.Left(), nil)
+ r := n.Right().Left() // the channel
+ n = mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
n = walkexpr(n, init)
break opswitch
- case OAPPEND:
+ case ir.OAPPEND:
// x = append(...)
- r := n.Right
- if r.Type.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
+ r := n.Right()
+ if r.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type().Elem())
}
switch {
case isAppendOfMake(r):
@@ -647,86 +651,86 @@ opswitch:
default:
r = walkappend(r, init, n)
}
- n.Right = r
- if r.Op == OAPPEND {
+ n.SetRight(r)
+ if r.Op() == ir.OAPPEND {
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
- r.Left = typename(r.Type.Elem())
+ r.SetLeft(typename(r.Type().Elem()))
break opswitch
}
// Otherwise, lowered for race detector.
// Treat as ordinary assignment.
}
- if n.Left != nil && n.Right != nil {
+ if n.Left() != nil && n.Right() != nil {
n = convas(n, init)
}
- case OAS2:
- init.AppendNodes(&n.Ninit)
- walkexprlistsafe(n.List.Slice(), init)
- walkexprlistsafe(n.Rlist.Slice(), init)
- ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
+ case ir.OAS2:
+ init.AppendNodes(n.PtrInit())
+ walkexprlistsafe(n.List().Slice(), init)
+ walkexprlistsafe(n.Rlist().Slice(), init)
+ ll := ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init)
ll = reorder3(ll)
n = liststmt(ll)
// a,b,... = fn()
- case OAS2FUNC:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS2FUNC:
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
r = walkexpr(r, init)
if isIntrinsicCall(r) {
- n.Right = r
+ n.SetRight(r)
break
}
init.Append(r)
- ll := ascompatet(n.List, r.Type)
+ ll := ascompatet(n.List(), r.Type())
n = liststmt(ll)
// x, y = <-c
// order.stmt made sure x is addressable or blank.
- case OAS2RECV:
- init.AppendNodes(&n.Ninit)
-
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
- var n1 *Node
- if n.List.First().isBlank() {
+ case ir.OAS2RECV:
+ init.AppendNodes(n.PtrInit())
+
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
+ var n1 ir.Node
+ if ir.IsBlank(n.List().First()) {
n1 = nodnil()
} else {
- n1 = nod(OADDR, n.List.First(), nil)
+ n1 = ir.Nod(ir.OADDR, n.List().First(), nil)
}
- fn := chanfn("chanrecv2", 2, r.Left.Type)
- ok := n.List.Second()
- call := mkcall1(fn, types.Types[TBOOL], init, r.Left, n1)
- n = nod(OAS, ok, call)
+ fn := chanfn("chanrecv2", 2, r.Left().Type())
+ ok := n.List().Second()
+ call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1)
+ n = ir.Nod(ir.OAS, ok, call)
n = typecheck(n, ctxStmt)
// a,b = m[i]
- case OAS2MAPR:
- init.AppendNodes(&n.Ninit)
+ case ir.OAS2MAPR:
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
- r.Right = walkexpr(r.Right, init)
- t := r.Left.Type
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
+ r.SetRight(walkexpr(r.Right(), init))
+ t := r.Left().Type()
fast := mapfast(t)
- var key *Node
+ var key ir.Node
if fast != mapslow {
// fast versions take key by value
- key = r.Right
+ key = r.Right()
} else {
// standard version takes key by reference
// order.expr made sure key is addressable.
- key = nod(OADDR, r.Right, nil)
+ key = ir.Nod(ir.OADDR, r.Right(), nil)
}
// from:
@@ -734,71 +738,71 @@ opswitch:
// to:
// var,b = mapaccess2*(t, m, i)
// a = *var
- a := n.List.First()
+ a := n.List().First()
if w := t.Elem().Width; w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
+ r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key)
} else {
fn := mapfn("mapaccess2_fat", t)
z := zeroaddr(w)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
+ r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
- if ok := n.List.Second(); !ok.isBlank() && ok.Type.IsBoolean() {
- r.Type.Field(1).Type = ok.Type
+ if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+ r.Type().Field(1).Type = ok.Type()
}
- n.Right = r
- n.Op = OAS2FUNC
+ n.SetRight(r)
+ n.SetOp(ir.OAS2FUNC)
// don't generate a = *var if a is _
- if !a.isBlank() {
+ if !ir.IsBlank(a) {
var_ := temp(types.NewPtr(t.Elem()))
var_.SetTypecheck(1)
var_.MarkNonNil() // mapaccess always returns a non-nil pointer
- n.List.SetFirst(var_)
+ n.List().SetFirst(var_)
n = walkexpr(n, init)
init.Append(n)
- n = nod(OAS, a, nod(ODEREF, var_, nil))
+ n = ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil))
}
n = typecheck(n, ctxStmt)
n = walkexpr(n, init)
- case ODELETE:
- init.AppendNodes(&n.Ninit)
- map_ := n.List.First()
- key := n.List.Second()
+ case ir.ODELETE:
+ init.AppendNodes(n.PtrInit())
+ map_ := n.List().First()
+ key := n.List().Second()
map_ = walkexpr(map_, init)
key = walkexpr(key, init)
- t := map_.Type
+ t := map_.Type()
fast := mapfast(t)
if fast == mapslow {
// order.stmt made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = ir.Nod(ir.OADDR, key, nil)
}
n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
- case OAS2DOTTYPE:
- walkexprlistsafe(n.List.Slice(), init)
- n.Right = walkexpr(n.Right, init)
+ case ir.OAS2DOTTYPE:
+ walkexprlistsafe(n.List().Slice(), init)
+ n.SetRight(walkexpr(n.Right(), init))
- case OCONVIFACE:
- n.Left = walkexpr(n.Left, init)
+ case ir.OCONVIFACE:
+ n.SetLeft(walkexpr(n.Left(), init))
- fromType := n.Left.Type
- toType := n.Type
+ fromType := n.Left().Type()
+ toType := n.Type()
- if !fromType.IsInterface() && !Curfn.Func.Nname.isBlank() { // skip unnamed functions (func _())
- markTypeUsedInInterface(fromType, Curfn.Func.lsym)
+ if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func().Nname) { // skip unnamed functions (func _())
+ markTypeUsedInInterface(fromType, Curfn.Func().LSym)
}
// typeword generates the type word of the interface value.
- typeword := func() *Node {
+ typeword := func() ir.Node {
if toType.IsEmptyInterface() {
return typename(fromType)
}
@@ -807,58 +811,58 @@ opswitch:
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
if isdirectiface(fromType) {
- l := nod(OEFACE, typeword(), n.Left)
- l.Type = toType
+ l := ir.Nod(ir.OEFACE, typeword(), n.Left())
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
n = l
break
}
if staticuint64s == nil {
- staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
- staticuint64s.SetClass(PEXTERN)
+ staticuint64s = NewName(Runtimepkg.Lookup("staticuint64s"))
+ staticuint64s.SetClass(ir.PEXTERN)
// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
// individual bytes.
- staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
- zerobase = newname(Runtimepkg.Lookup("zerobase"))
- zerobase.SetClass(PEXTERN)
- zerobase.Type = types.Types[TUINTPTR]
+ staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
+ zerobase = NewName(Runtimepkg.Lookup("zerobase"))
+ zerobase.SetClass(ir.PEXTERN)
+ zerobase.SetType(types.Types[types.TUINTPTR])
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
- var value *Node
+ var value ir.Node
switch {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
- cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
+ cheapexpr(n.Left(), init) // Evaluate n.Left for side-effects. See issue 19246.
value = zerobase
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
// and staticuint64s[n.Left * 8 + 7] on big-endian.
- n.Left = cheapexpr(n.Left, init)
+ n.SetLeft(cheapexpr(n.Left(), init))
// byteindex widens n.Left so that the multiplication doesn't overflow.
- index := nod(OLSH, byteindex(n.Left), nodintconst(3))
+ index := ir.Nod(ir.OLSH, byteindex(n.Left()), nodintconst(3))
if thearch.LinkArch.ByteOrder == binary.BigEndian {
- index = nod(OADD, index, nodintconst(7))
+ index = ir.Nod(ir.OADD, index, nodintconst(7))
}
- value = nod(OINDEX, staticuint64s, index)
+ value = ir.Nod(ir.OINDEX, staticuint64s, index)
value.SetBounded(true)
- case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
+ case n.Left().Class() == ir.PEXTERN && n.Left().Name() != nil && n.Left().Name().Readonly():
// n.Left is a readonly global; use it directly.
- value = n.Left
- case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024:
+ value = n.Left()
+ case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024:
// n.Left does not escape. Use a stack temporary initialized to n.Left.
value = temp(fromType)
- init.Append(typecheck(nod(OAS, value, n.Left), ctxStmt))
+ init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left()), ctxStmt))
}
if value != nil {
// Value is identical to n.Left.
// Construct the interface directly: {type/itab, &value}.
- l := nod(OEFACE, typeword(), typecheck(nod(OADDR, value, nil), ctxExpr))
- l.Type = toType
+ l := ir.Nod(ir.OEFACE, typeword(), typecheck(ir.Nod(ir.OADDR, value, nil), ctxExpr))
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
n = l
break
@@ -873,20 +877,20 @@ opswitch:
if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
// Evaluate the input interface.
c := temp(fromType)
- init.Append(nod(OAS, c, n.Left))
+ init.Append(ir.Nod(ir.OAS, c, n.Left()))
// Get the itab out of the interface.
- tmp := temp(types.NewPtr(types.Types[TUINT8]))
- init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), ctxExpr)))
+ tmp := temp(types.NewPtr(types.Types[types.TUINT8]))
+ init.Append(ir.Nod(ir.OAS, tmp, typecheck(ir.Nod(ir.OITAB, c, nil), ctxExpr)))
// Get the type out of the itab.
- nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), ctxExpr), nil)
- nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
+ nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil)
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, tmp, itabType(tmp)))
init.Append(nif)
// Build the result.
- e := nod(OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[TUINT8])))
- e.Type = toType // assign type manually, typecheck doesn't understand OEFACE.
+ e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
e.SetTypecheck(1)
n = e
break
@@ -901,20 +905,20 @@ opswitch:
fn := syslook(fnname)
dowidth(fromType)
fn = substArgTypes(fn, fromType)
- dowidth(fn.Type)
- call := nod(OCALL, fn, nil)
- call.List.Set1(n.Left)
+ dowidth(fn.Type())
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Set1(n.Left())
call = typecheck(call, ctxExpr)
call = walkexpr(call, init)
call = safeexpr(call, init)
- e := nod(OEFACE, typeword(), call)
- e.Type = toType
+ e := ir.Nod(ir.OEFACE, typeword(), call)
+ e.SetType(toType)
e.SetTypecheck(1)
n = e
break
}
- var tab *Node
+ var tab ir.Node
if fromType.IsInterface() {
// convI2I
tab = typename(toType)
@@ -923,7 +927,7 @@ opswitch:
tab = typeword()
}
- v := n.Left
+ v := n.Left()
if needsaddr {
// Types of large or unknown size are passed by reference.
// Orderexpr arranged for n.Left to be a temporary for all
@@ -932,49 +936,49 @@ opswitch:
// with non-interface cases, is not visible to order.stmt, so we
// have to fall back on allocating a temp here.
if !islvalue(v) {
- v = copyexpr(v, v.Type, init)
+ v = copyexpr(v, v.Type(), init)
}
- v = nod(OADDR, v, nil)
+ v = ir.Nod(ir.OADDR, v, nil)
}
dowidth(fromType)
fn := syslook(fnname)
fn = substArgTypes(fn, fromType, toType)
- dowidth(fn.Type)
- n = nod(OCALL, fn, nil)
- n.List.Set2(tab, v)
+ dowidth(fn.Type())
+ n = ir.Nod(ir.OCALL, fn, nil)
+ n.PtrList().Set2(tab, v)
n = typecheck(n, ctxExpr)
n = walkexpr(n, init)
- case OCONV, OCONVNOP:
- n.Left = walkexpr(n.Left, init)
- if n.Op == OCONVNOP && checkPtr(Curfn, 1) {
- if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
+ case ir.OCONV, ir.OCONVNOP:
+ n.SetLeft(walkexpr(n.Left(), init))
+ if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) {
+ if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T
n = walkCheckPtrAlignment(n, init, nil)
break
}
- if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
+ if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer
n = walkCheckPtrArithmetic(n, init)
break
}
}
- param, result := rtconvfn(n.Left.Type, n.Type)
- if param == Txxx {
+ param, result := rtconvfn(n.Left().Type(), n.Type())
+ if param == types.Txxx {
break
}
- fn := basicnames[param] + "to" + basicnames[result]
- n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
+ fn := ir.BasicTypeNames[param] + "to" + ir.BasicTypeNames[result]
+ n = conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type())
- case ODIV, OMOD:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ case ir.ODIV, ir.OMOD:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// rewrite complex div into function call.
- et := n.Left.Type.Etype
+ et := n.Left().Type().Etype
- if isComplex[et] && n.Op == ODIV {
- t := n.Type
- n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
+ if isComplex[et] && n.Op() == ir.ODIV {
+ t := n.Type()
+ n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128]))
n = conv(n, t)
break
}
@@ -987,21 +991,21 @@ opswitch:
// rewrite 64-bit div and mod on 32-bit architectures.
// TODO: Remove this code once we can introduce
// runtime calls late in SSA processing.
- if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
- if n.Right.Op == OLITERAL {
+ if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) {
+ if n.Right().Op() == ir.OLITERAL {
// Leave div/mod by constant powers of 2 or small 16-bit constants.
// The SSA backend will handle those.
switch et {
- case TINT64:
- c := n.Right.Int64Val()
+ case types.TINT64:
+ c := n.Right().Int64Val()
if c < 0 {
c = -c
}
if c != 0 && c&(c-1) == 0 {
break opswitch
}
- case TUINT64:
- c := uint64(n.Right.Int64Val())
+ case types.TUINT64:
+ c := n.Right().Uint64Val()
if c < 1<<16 {
break opswitch
}
@@ -1011,75 +1015,75 @@ opswitch:
}
}
var fn string
- if et == TINT64 {
+ if et == types.TINT64 {
fn = "int64"
} else {
fn = "uint64"
}
- if n.Op == ODIV {
+ if n.Op() == ir.ODIV {
fn += "div"
} else {
fn += "mod"
}
- n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
+ n = mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et]))
}
- case OINDEX:
- n.Left = walkexpr(n.Left, init)
+ case ir.OINDEX:
+ n.SetLeft(walkexpr(n.Left(), init))
// save the original node for bounds checking elision.
// If it was a ODIV/OMOD walk might rewrite it.
- r := n.Right
+ r := n.Right()
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(walkexpr(n.Right(), init))
// if range of type cannot exceed static array bound,
// disable bounds check.
if n.Bounded() {
break
}
- t := n.Left.Type
+ t := n.Left().Type()
if t != nil && t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
- if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
- Warn("index bounds check elided")
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
+ base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
- yyerror("index out of bounds")
+ if smallintconst(n.Right()) && !n.Bounded() {
+ base.Errorf("index out of bounds")
}
- } else if Isconst(n.Left, CTSTR) {
- n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
- if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
- Warn("index bounds check elided")
+ } else if ir.IsConst(n.Left(), constant.String) {
+ n.SetBounded(bounded(r, int64(len(n.Left().StringVal()))))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
+ base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
- yyerror("index out of bounds")
+ if smallintconst(n.Right()) && !n.Bounded() {
+ base.Errorf("index out of bounds")
}
}
- if Isconst(n.Right, CTINT) {
- if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
- yyerror("index out of bounds")
+ if ir.IsConst(n.Right(), constant.Int) {
+ if v := n.Right().Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) {
+ base.Errorf("index out of bounds")
}
}
- case OINDEXMAP:
+ case ir.OINDEXMAP:
// Replace m[k] with *map{access1,assign}(maptype, m, &k)
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- map_ := n.Left
- key := n.Right
- t := map_.Type
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ map_ := n.Left()
+ key := n.Right()
+ t := map_.Type()
if n.IndexMapLValue() {
// This m[k] expression is on the left-hand side of an assignment.
fast := mapfast(t)
if fast == mapslow {
// standard version takes key by reference.
// order.expr made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = ir.Nod(ir.OADDR, key, nil)
}
n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
} else {
@@ -1088,7 +1092,7 @@ opswitch:
if fast == mapslow {
// standard version takes key by reference.
// order.expr made sure key is addressable.
- key = nod(OADDR, key, nil)
+ key = ir.Nod(ir.OADDR, key, nil)
}
if w := t.Elem().Width; w <= zeroValSize {
@@ -1098,26 +1102,26 @@ opswitch:
n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
}
}
- n.Type = types.NewPtr(t.Elem())
+ n.SetType(types.NewPtr(t.Elem()))
n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
- n = nod(ODEREF, n, nil)
- n.Type = t.Elem()
+ n = ir.Nod(ir.ODEREF, n, nil)
+ n.SetType(t.Elem())
n.SetTypecheck(1)
- case ORECV:
- Fatalf("walkexpr ORECV") // should see inside OAS only
+ case ir.ORECV:
+ base.Fatalf("walkexpr ORECV") // should see inside OAS only
- case OSLICEHEADER:
- n.Left = walkexpr(n.Left, init)
- n.List.SetFirst(walkexpr(n.List.First(), init))
- n.List.SetSecond(walkexpr(n.List.Second(), init))
+ case ir.OSLICEHEADER:
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
+ n.List().SetSecond(walkexpr(n.List().Second(), init))
- case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
- checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().Left().Type().IsUnsafePtr()
if checkSlice {
- n.Left.Left = walkexpr(n.Left.Left, init)
+ n.Left().SetLeft(walkexpr(n.Left().Left(), init))
} else {
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
}
low, high, max := n.SliceBounds()
low = walkexpr(low, init)
@@ -1129,15 +1133,15 @@ opswitch:
max = walkexpr(max, init)
n.SetSliceBounds(low, high, max)
if checkSlice {
- n.Left = walkCheckPtrAlignment(n.Left, init, max)
+ n.SetLeft(walkCheckPtrAlignment(n.Left(), init, max))
}
- if n.Op.IsSlice3() {
- if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
+ if n.Op().IsSlice3() {
+ if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.Left()) {
// Reduce x[i:j:cap(x)] to x[i:j].
- if n.Op == OSLICE3 {
- n.Op = OSLICE
+ if n.Op() == ir.OSLICE3 {
+ n.SetOp(ir.OSLICE)
} else {
- n.Op = OSLICEARR
+ n.SetOp(ir.OSLICEARR)
}
n = reduceSlice(n)
}
@@ -1145,84 +1149,84 @@ opswitch:
n = reduceSlice(n)
}
- case ONEW:
- if n.Type.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
+ case ir.ONEW:
+ if n.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
}
- if n.Esc == EscNone {
- if n.Type.Elem().Width >= maxImplicitStackVarSize {
- Fatalf("large ONEW with EscNone: %v", n)
+ if n.Esc() == EscNone {
+ if n.Type().Elem().Width >= maxImplicitStackVarSize {
+ base.Fatalf("large ONEW with EscNone: %v", n)
}
- r := temp(n.Type.Elem())
- r = nod(OAS, r, nil) // zero temp
+ r := temp(n.Type().Elem())
+ r = ir.Nod(ir.OAS, r, nil) // zero temp
r = typecheck(r, ctxStmt)
init.Append(r)
- r = nod(OADDR, r.Left, nil)
+ r = ir.Nod(ir.OADDR, r.Left(), nil)
r = typecheck(r, ctxExpr)
n = r
} else {
- n = callnew(n.Type.Elem())
+ n = callnew(n.Type().Elem())
}
- case OADDSTR:
+ case ir.OADDSTR:
n = addstr(n, init)
- case OAPPEND:
+ case ir.OAPPEND:
// order should make sure we only see OAS(node, OAPPEND), which we handle above.
- Fatalf("append outside assignment")
+ base.Fatalf("append outside assignment")
- case OCOPY:
- n = copyany(n, init, instrumenting && !compiling_runtime)
+ case ir.OCOPY:
+ n = copyany(n, init, instrumenting && !base.Flag.CompilingRuntime)
// cannot use chanfn - closechan takes any, not chan any
- case OCLOSE:
+ case ir.OCLOSE:
fn := syslook("closechan")
- fn = substArgTypes(fn, n.Left.Type)
- n = mkcall1(fn, nil, init, n.Left)
+ fn = substArgTypes(fn, n.Left().Type())
+ n = mkcall1(fn, nil, init, n.Left())
- case OMAKECHAN:
+ case ir.OMAKECHAN:
// When size fits into int, use makechan instead of
// makechan64, which is faster and shorter on 32 bit platforms.
- size := n.Left
+ size := n.Left()
fnname := "makechan64"
- argtype := types.Types[TINT64]
+ argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL size is positive and fits in an int.
// The case of size overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makechan during runtime.
- if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
+ if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makechan"
- argtype = types.Types[TINT]
+ argtype = types.Types[types.TINT]
}
- n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype))
+ n = mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype))
- case OMAKEMAP:
- t := n.Type
+ case ir.OMAKEMAP:
+ t := n.Type()
hmapType := hmap(t)
- hint := n.Left
+ hint := n.Left()
// var h *hmap
- var h *Node
- if n.Esc == EscNone {
+ var h ir.Node
+ if n.Esc() == EscNone {
// Allocate hmap on stack.
// var hv hmap
hv := temp(hmapType)
- zero := nod(OAS, hv, nil)
+ zero := ir.Nod(ir.OAS, hv, nil)
zero = typecheck(zero, ctxStmt)
init.Append(zero)
// h = &hv
- h = nod(OADDR, hv, nil)
+ h = ir.Nod(ir.OADDR, hv, nil)
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
- if !Isconst(hint, CTINT) ||
- hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+ if !ir.IsConst(hint, constant.Int) ||
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
// In case hint is larger than BUCKETSIZE runtime.makemap
// will allocate the buckets on the heap, see #20184
@@ -1233,21 +1237,21 @@ opswitch:
// h.buckets = b
// }
- nif := nod(OIF, nod(OLE, hint, nodintconst(BUCKETSIZE)), nil)
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OLE, hint, nodintconst(BUCKETSIZE)), nil)
nif.SetLikely(true)
// var bv bmap
bv := temp(bmap(t))
- zero = nod(OAS, bv, nil)
- nif.Nbody.Append(zero)
+ zero = ir.Nod(ir.OAS, bv, nil)
+ nif.PtrBody().Append(zero)
// b = &bv
- b := nod(OADDR, bv, nil)
+ b := ir.Nod(ir.OADDR, bv, nil)
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := nod(OAS, nodSym(ODOT, h, bsym), b)
- nif.Nbody.Append(na)
+ na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b)
+ nif.PtrBody().Append(na)
nif = typecheck(nif, ctxStmt)
nif = walkstmt(nif)
@@ -1255,7 +1259,7 @@ opswitch:
}
}
- if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and
@@ -1263,13 +1267,13 @@ opswitch:
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
// and no buckets will be allocated by makemap. Therefore,
// no buckets need to be allocated in this code path.
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
// h.hash0 = fastrand()
- rand := mkcall("fastrand", types.Types[TUINT32], init)
+ rand := mkcall("fastrand", types.Types[types.TUINT32], init)
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
- a := nod(OAS, nodSym(ODOT, h, hashsym), rand)
+ a := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, hashsym), rand)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
init.Append(a)
@@ -1279,10 +1283,10 @@ opswitch:
// hmap on the heap and initialize hmap's hash0 field.
fn := syslook("makemap_small")
fn = substArgTypes(fn, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init)
+ n = mkcall1(fn, n.Type(), init)
}
} else {
- if n.Esc != EscNone {
+ if n.Esc() != EscNone {
h = nodnil()
}
// Map initialization with a variable or large hint is
@@ -1293,42 +1297,42 @@ opswitch:
// When hint fits into int, use makemap instead of
// makemap64, which is faster and shorter on 32 bit platforms.
fnname := "makemap64"
- argtype := types.Types[TINT64]
+ argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL hint is positive and fits in an int.
// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
// The case of hint overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makemap during runtime.
- if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
+ if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makemap"
- argtype = types.Types[TINT]
+ argtype = types.Types[types.TINT]
}
fn := syslook(fnname)
fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h)
+ n = mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h)
}
- case OMAKESLICE:
- l := n.Left
- r := n.Right
+ case ir.OMAKESLICE:
+ l := n.Left()
+ r := n.Right()
if r == nil {
r = safeexpr(l, init)
l = r
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
if why := heapAllocReason(n); why != "" {
- Fatalf("%v has EscNone, but %v", n, why)
+ base.Fatalf("%v has EscNone, but %v", n, why)
}
// var arr [r]T
// n = arr[:l]
i := indexconst(r)
if i < 0 {
- Fatalf("walkexpr: invalid index %v", r)
+ base.Fatalf("walkexpr: invalid index %v", r)
}
// cap is constrained to [0,2^31) or [0,2^63) depending on whether
@@ -1338,21 +1342,21 @@ opswitch:
// if len < 0 { panicmakeslicelen() }
// panicmakeslicecap()
// }
- nif := nod(OIF, nod(OGT, conv(l, types.Types[TUINT64]), nodintconst(i)), nil)
- niflen := nod(OIF, nod(OLT, l, nodintconst(0)), nil)
- niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init))
- nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil)
+ niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil)
+ niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init))
+ nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init))
nif = typecheck(nif, ctxStmt)
init.Append(nif)
t = types.NewArray(t.Elem(), i) // [r]T
var_ := temp(t)
- a := nod(OAS, var_, nil) // zero temp
+ a := ir.Nod(ir.OAS, var_, nil) // zero temp
a = typecheck(a, ctxStmt)
init.Append(a)
- r := nod(OSLICE, var_, nil) // arr[:l]
+ r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l]
r.SetSliceBounds(nil, l, nil)
- r = conv(r, n.Type) // in case n.Type is named.
+ r = conv(r, n.Type()) // in case n.Type is named.
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
n = r
@@ -1364,43 +1368,43 @@ opswitch:
len, cap := l, r
fnname := "makeslice64"
- argtype := types.Types[TINT64]
+ argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makeslice during runtime.
- if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
- (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
+ if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+ (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
fnname = "makeslice"
- argtype = types.Types[TINT]
+ argtype = types.Types[types.TINT]
}
- m := nod(OSLICEHEADER, nil, nil)
- m.Type = t
+ m := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ m.SetType(t)
fn := syslook(fnname)
- m.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
- m.Left.MarkNonNil()
- m.List.Set2(conv(len, types.Types[TINT]), conv(cap, types.Types[TINT]))
+ m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)))
+ m.Left().MarkNonNil()
+ m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT]))
m = typecheck(m, ctxExpr)
m = walkexpr(m, init)
n = m
}
- case OMAKESLICECOPY:
- if n.Esc == EscNone {
- Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+ case ir.OMAKESLICECOPY:
+ if n.Esc() == EscNone {
+ base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
- yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- length := conv(n.Left, types.Types[TINT])
- copylen := nod(OLEN, n.Right, nil)
- copyptr := nod(OSPTR, n.Right, nil)
+ length := conv(n.Left(), types.Types[types.TINT])
+ copylen := ir.Nod(ir.OLEN, n.Right(), nil)
+ copyptr := ir.Nod(ir.OSPTR, n.Right(), nil)
if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers:
@@ -1409,25 +1413,25 @@ opswitch:
// We do not check for overflow of len(to)*elem.Width here
// since len(from) is an existing checked slice capacity
// with same elem.Width for the from slice.
- size := nod(OMUL, conv(length, types.Types[TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[TUINTPTR]))
+ size := ir.Nod(ir.OMUL, conv(length, types.Types[types.TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[types.TUINTPTR]))
// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
fn := syslook("mallocgc")
- sh := nod(OSLICEHEADER, nil, nil)
- sh.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, size, nodnil(), nodbool(false))
- sh.Left.MarkNonNil()
- sh.List.Set2(length, length)
- sh.Type = t
+ sh := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)))
+ sh.Left().MarkNonNil()
+ sh.PtrList().Set2(length, length)
+ sh.SetType(t)
s := temp(t)
- r := typecheck(nod(OAS, s, sh), ctxStmt)
+ r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt)
r = walkexpr(r, init)
init.Append(r)
// instantiate memmove(to *any, frm *any, size uintptr)
fn = syslook("memmove")
fn = substArgTypes(fn, t.Elem(), t.Elem())
- ncopy := mkcall1(fn, nil, init, nod(OSPTR, s, nil), copyptr, size)
+ ncopy := mkcall1(fn, nil, init, ir.Nod(ir.OSPTR, s, nil), copyptr, size)
ncopy = typecheck(ncopy, ctxStmt)
ncopy = walkexpr(ncopy, init)
init.Append(ncopy)
@@ -1436,96 +1440,96 @@ opswitch:
} else { // Replace make+copy with runtime.makeslicecopy.
// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
fn := syslook("makeslicecopy")
- s := nod(OSLICEHEADER, nil, nil)
- s.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[TUNSAFEPTR]))
- s.Left.MarkNonNil()
- s.List.Set2(length, length)
- s.Type = t
+ s := ir.Nod(ir.OSLICEHEADER, nil, nil)
+ s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])))
+ s.Left().MarkNonNil()
+ s.PtrList().Set2(length, length)
+ s.SetType(t)
n = typecheck(s, ctxExpr)
n = walkexpr(n, init)
}
- case ORUNESTR:
+ case ir.ORUNESTR:
a := nodnil()
- if n.Esc == EscNone {
- t := types.NewArray(types.Types[TUINT8], 4)
- a = nod(OADDR, temp(t), nil)
+ if n.Esc() == EscNone {
+ t := types.NewArray(types.Types[types.TUINT8], 4)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
// intstring(*[4]byte, rune)
- n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
+ n = mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64]))
- case OBYTES2STR, ORUNES2STR:
+ case ir.OBYTES2STR, ir.ORUNES2STR:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for string on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
- if n.Op == ORUNES2STR {
+ if n.Op() == ir.ORUNES2STR {
// slicerunetostring(*[32]byte, []rune) string
- n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+ n = mkcall("slicerunetostring", n.Type(), init, a, n.Left())
} else {
// slicebytetostring(*[32]byte, ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := n.Left.backingArrayPtrLen()
- n = mkcall("slicebytetostring", n.Type, init, a, ptr, len)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ n = mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
}
- case OBYTES2STRTMP:
- n.Left = walkexpr(n.Left, init)
+ case ir.OBYTES2STRTMP:
+ n.SetLeft(walkexpr(n.Left(), init))
if !instrumenting {
// Let the backend handle OBYTES2STRTMP directly
// to avoid a function call to slicebytetostringtmp.
break
}
// slicebytetostringtmp(ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := n.Left.backingArrayPtrLen()
- n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ n = mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
- case OSTR2BYTES:
- s := n.Left
- if Isconst(s, CTSTR) {
+ case ir.OSTR2BYTES:
+ s := n.Left()
+ if ir.IsConst(s, constant.String) {
sc := s.StringVal()
// Allocate a [n]byte of the right size.
- t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
- var a *Node
- if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
+ var a ir.Node
+ if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
+ a = ir.Nod(ir.OADDR, temp(t), nil)
} else {
a = callnew(t)
}
p := temp(t.PtrTo()) // *[n]byte
- init.Append(typecheck(nod(OAS, p, a), ctxStmt))
+ init.Append(typecheck(ir.Nod(ir.OAS, p, a), ctxStmt))
// Copy from the static string data to the [n]byte.
if len(sc) > 0 {
- as := nod(OAS,
- nod(ODEREF, p, nil),
- nod(ODEREF, convnop(nod(OSPTR, s, nil), t.PtrTo()), nil))
+ as := ir.Nod(ir.OAS,
+ ir.Nod(ir.ODEREF, p, nil),
+ ir.Nod(ir.ODEREF, convnop(ir.Nod(ir.OSPTR, s, nil), t.PtrTo()), nil))
as = typecheck(as, ctxStmt)
as = walkstmt(as)
init.Append(as)
}
// Slice the [n]byte to a []byte.
- n.Op = OSLICEARR
- n.Left = p
+ n.SetOp(ir.OSLICEARR)
+ n.SetLeft(p)
n = walkexpr(n, init)
break
}
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
// stringtoslicebyte(*32[byte], string) []byte
- n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[TSTRING]))
+ n = mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING]))
- case OSTR2BYTESTMP:
+ case ir.OSTR2BYTESTMP:
// []byte(string) conversion that creates a slice
// referring to the actual string bytes.
// This conversion is handled later by the backend and
@@ -1533,43 +1537,43 @@ opswitch:
// that know that the slice won't be mutated.
// The only such case today is:
// for i, c := range []byte(string)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
- case OSTR2RUNES:
+ case ir.OSTR2RUNES:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
- a = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
+ a = ir.Nod(ir.OADDR, temp(t), nil)
}
// stringtoslicerune(*[32]rune, string) []rune
- n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
+ n = mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING]))
- case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
- if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
+ if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) {
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
- vstat := readonlystaticname(n.Type)
+ vstat := readonlystaticname(n.Type())
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
n = vstat
n = typecheck(n, ctxExpr)
break
}
- var_ := temp(n.Type)
+ var_ := temp(n.Type())
anylit(n, var_, init)
n = var_
- case OSEND:
- n1 := n.Right
- n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
+ case ir.OSEND:
+ n1 := n.Right()
+ n1 = assignconv(n1, n.Left().Type().Elem(), "chan send")
n1 = walkexpr(n1, init)
- n1 = nod(OADDR, n1, nil)
- n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
+ n1 = ir.Nod(ir.OADDR, n1, nil)
+ n = mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1)
- case OCLOSURE:
+ case ir.OCLOSURE:
n = walkclosure(n, init)
- case OCALLPART:
+ case ir.OCALLPART:
n = walkpartialcall(n, init)
}
@@ -1578,27 +1582,27 @@ opswitch:
// constants until walk. For example, if n is y%1 == 0, the
// walk of y%1 may have replaced it by 0.
// Check whether n with its updated args is itself now a constant.
- t := n.Type
- evconst(n)
- if n.Type != t {
- Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
+ t := n.Type()
+ n = evalConst(n)
+ if n.Type() != t {
+ base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type())
}
- if n.Op == OLITERAL {
+ if n.Op() == ir.OLITERAL {
n = typecheck(n, ctxExpr)
// Emit string symbol now to avoid emitting
// any concurrently during the backend.
- if s, ok := n.Val().U.(string); ok {
- _ = stringsym(n.Pos, s)
+ if v := n.Val(); v.Kind() == constant.String {
+ _ = stringsym(n.Pos(), constant.StringVal(v))
}
}
updateHasCall(n)
- if Debug.w != 0 && n != nil {
- Dump("after walk expr", n)
+ if base.Flag.LowerW != 0 && n != nil {
+ ir.Dump("after walk expr", n)
}
- lineno = lno
+ base.Pos = lno
return n
}
@@ -1615,14 +1619,14 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
// markUsedIfaceMethod marks that an interface method is used in the current
// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *Node) {
- ityp := n.Left.Left.Type
+func markUsedIfaceMethod(n ir.Node) {
+ ityp := n.Left().Left().Type()
tsym := typenamesym(ityp).Linksym()
- r := obj.Addrel(Curfn.Func.lsym)
+ r := obj.Addrel(Curfn.Func().LSym)
r.Sym = tsym
// n.Left.Xoffset is the method index * Widthptr (the offset of code pointer
// in itab).
- midx := n.Left.Xoffset / int64(Widthptr)
+ midx := n.Left().Offset() / int64(Widthptr)
r.Add = ifaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
@@ -1634,76 +1638,76 @@ func markUsedIfaceMethod(n *Node) {
// If no such function is necessary, it returns (Txxx, Txxx).
func rtconvfn(src, dst *types.Type) (param, result types.EType) {
if thearch.SoftFloat {
- return Txxx, Txxx
+ return types.Txxx, types.Txxx
}
switch thearch.LinkArch.Family {
case sys.ARM, sys.MIPS:
if src.IsFloat() {
switch dst.Etype {
- case TINT64, TUINT64:
- return TFLOAT64, dst.Etype
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Etype
}
}
if dst.IsFloat() {
switch src.Etype {
- case TINT64, TUINT64:
- return src.Etype, TFLOAT64
+ case types.TINT64, types.TUINT64:
+ return src.Etype, types.TFLOAT64
}
}
case sys.I386:
if src.IsFloat() {
switch dst.Etype {
- case TINT64, TUINT64:
- return TFLOAT64, dst.Etype
- case TUINT32, TUINT, TUINTPTR:
- return TFLOAT64, TUINT32
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Etype
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TFLOAT64, types.TUINT32
}
}
if dst.IsFloat() {
switch src.Etype {
- case TINT64, TUINT64:
- return src.Etype, TFLOAT64
- case TUINT32, TUINT, TUINTPTR:
- return TUINT32, TFLOAT64
+ case types.TINT64, types.TUINT64:
+ return src.Etype, types.TFLOAT64
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TUINT32, types.TFLOAT64
}
}
}
- return Txxx, Txxx
+ return types.Txxx, types.Txxx
}
// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *Node) *Node {
+func reduceSlice(n ir.Node) ir.Node {
low, high, max := n.SliceBounds()
- if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
+ if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) {
// Reduce x[i:len(x)] to x[i:].
high = nil
}
n.SetSliceBounds(low, high, max)
- if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
+ if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil {
// Reduce x[:] to x.
- if Debug_slice > 0 {
- Warn("slice: omit slice operation")
+ if base.Debug.Slice > 0 {
+ base.Warn("slice: omit slice operation")
}
- return n.Left
+ return n.Left()
}
return n
}
-func ascompatee1(l *Node, r *Node, init *Nodes) *Node {
+func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) ir.Node {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
- n := nod(OAS, l, r)
+ n := ir.Nod(ir.OAS, l, r)
- if l.Op == OINDEXMAP {
+ if l.Op() == ir.OINDEXMAP {
return n
}
return convas(n, init)
}
-func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
+func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
// check assign expression list to
// an expression list. called in
// expr-list = expr-list
@@ -1716,14 +1720,14 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
nr[i1] = safeexpr(nr[i1], init)
}
- var nn []*Node
+ var nn []ir.Node
i := 0
for ; i < len(nl); i++ {
if i >= len(nr) {
break
}
// Do not generate 'x = x' during return. See issue 4014.
- if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
+ if op == ir.ORETURN && samesafeexpr(nl[i], nr[i]) {
continue
}
nn = append(nn, ascompatee1(nl[i], nr[i], init))
@@ -1731,20 +1735,20 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
// cannot happen: caller checked that lists had same length
if i < len(nl) || i < len(nr) {
- var nln, nrn Nodes
+ var nln, nrn ir.Nodes
nln.Set(nl)
nrn.Set(nr)
- Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname())
+ base.Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), ir.FuncName(Curfn))
}
return nn
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l *Node, rt *types.Type) bool {
- if l.HasCall() || l.Op == OINDEXMAP {
+func fncall(l ir.Node, rt *types.Type) bool {
+ if l.HasCall() || l.Op() == ir.OINDEXMAP {
return true
}
- if types.Identical(l.Type, rt) {
+ if types.Identical(l.Type(), rt) {
return false
}
// There might be a conversion required, which might involve a runtime call.
@@ -1754,14 +1758,14 @@ func fncall(l *Node, rt *types.Type) bool {
// check assign type list to
// an expression list. called in
// expr-list = func()
-func ascompatet(nl Nodes, nr *types.Type) []*Node {
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
if nl.Len() != nr.NumFields() {
- Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
+ base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
}
- var nn, mm Nodes
+ var nn, mm ir.Nodes
for i, l := range nl.Slice() {
- if l.isBlank() {
+ if ir.IsBlank(l) {
continue
}
r := nr.Field(i)
@@ -1771,23 +1775,23 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node {
if fncall(l, r.Type) {
tmp := temp(r.Type)
tmp = typecheck(tmp, ctxExpr)
- a := nod(OAS, l, tmp)
+ a := ir.Nod(ir.OAS, l, tmp)
a = convas(a, &mm)
mm.Append(a)
l = tmp
}
- res := nod(ORESULT, nil, nil)
- res.Xoffset = Ctxt.FixedFrameSize() + r.Offset
- res.Type = r.Type
+ res := ir.Nod(ir.ORESULT, nil, nil)
+ res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset)
+ res.SetType(r.Type)
res.SetTypecheck(1)
- a := nod(OAS, l, res)
+ a := ir.Nod(ir.OAS, l, res)
a = convas(a, &nn)
updateHasCall(a)
if a.HasCall() {
- Dump("ascompatet ucount", a)
- Fatalf("ascompatet: too many function calls evaluating parameters")
+ ir.Dump("ascompatet ucount", a)
+ base.Fatalf("ascompatet: too many function calls evaluating parameters")
}
nn.Append(a)
@@ -1796,28 +1800,28 @@ func ascompatet(nl Nodes, nr *types.Type) []*Node {
}
// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *types.Type, args []*Node) *Node {
- var n *Node
+func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node {
+ var n ir.Node
if len(args) == 0 {
n = nodnil()
- n.Type = typ
+ n.SetType(typ)
} else {
- n = nod(OCOMPLIT, nil, typenod(typ))
- n.List.Append(args...)
+ n = ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
+ n.PtrList().Append(args...)
n.SetImplicit(true)
}
n = typecheck(n, ctxExpr)
- if n.Type == nil {
- Fatalf("mkdotargslice: typecheck failed")
+ if n.Type() == nil {
+ base.Fatalf("mkdotargslice: typecheck failed")
}
return n
}
// fixVariadicCall rewrites calls to variadic functions to use an
// explicit ... argument if one is not already present.
-func fixVariadicCall(call *Node) {
- fntype := call.Left.Type
+func fixVariadicCall(call ir.Node) {
+ fntype := call.Left().Type()
if !fntype.IsVariadic() || call.IsDDD() {
return
}
@@ -1825,33 +1829,33 @@ func fixVariadicCall(call *Node) {
vi := fntype.NumParams() - 1
vt := fntype.Params().Field(vi).Type
- args := call.List.Slice()
+ args := call.List().Slice()
extra := args[vi:]
slice := mkdotargslice(vt, extra)
for i := range extra {
extra[i] = nil // allow GC
}
- call.List.Set(append(args[:vi], slice))
+ call.PtrList().Set(append(args[:vi], slice))
call.SetIsDDD(true)
}
-func walkCall(n *Node, init *Nodes) {
- if n.Rlist.Len() != 0 {
+func walkCall(n ir.Node, init *ir.Nodes) {
+ if n.Rlist().Len() != 0 {
return // already walked
}
- params := n.Left.Type.Params()
- args := n.List.Slice()
+ params := n.Left().Type().Params()
+ args := n.List().Slice()
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
walkexprlist(args, init)
// If this is a method call, add the receiver at the beginning of the args.
- if n.Op == OCALLMETH {
- withRecv := make([]*Node, len(args)+1)
- withRecv[0] = n.Left.Left
- n.Left.Left = nil
+ if n.Op() == ir.OCALLMETH {
+ withRecv := make([]ir.Node, len(args)+1)
+ withRecv[0] = n.Left().Left()
+ n.Left().SetLeft(nil)
copy(withRecv[1:], args)
args = withRecv
}
@@ -1860,14 +1864,14 @@ func walkCall(n *Node, init *Nodes) {
// store that argument into a temporary variable,
// to prevent that calls from clobbering arguments already on the stack.
// When instrumenting, all arguments might require function calls.
- var tempAssigns []*Node
+ var tempAssigns []ir.Node
for i, arg := range args {
updateHasCall(arg)
// Determine param type.
var t *types.Type
- if n.Op == OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
- t = n.Left.Type.Recv().Type
+ t = n.Left().Type().Recv().Type
} else {
t = params.Field(i - 1).Type
}
@@ -1877,7 +1881,7 @@ func walkCall(n *Node, init *Nodes) {
if instrumenting || fncall(arg, t) {
// make assignment of fncall to tempAt
tmp := temp(t)
- a := nod(OAS, tmp, arg)
+ a := ir.Nod(ir.OAS, tmp, arg)
a = convas(a, init)
tempAssigns = append(tempAssigns, a)
// replace arg with temp
@@ -1885,19 +1889,19 @@ func walkCall(n *Node, init *Nodes) {
}
}
- n.List.Set(tempAssigns)
- n.Rlist.Set(args)
+ n.PtrList().Set(tempAssigns)
+ n.PtrRlist().Set(args)
}
// generate code for print
-func walkprint(nn *Node, init *Nodes) *Node {
+func walkprint(nn ir.Node, init *ir.Nodes) ir.Node {
// Hoist all the argument evaluation up before the lock.
- walkexprlistcheap(nn.List.Slice(), init)
+ walkexprlistcheap(nn.List().Slice(), init)
// For println, add " " between elements and "\n" at the end.
- if nn.Op == OPRINTN {
- s := nn.List.Slice()
- t := make([]*Node, 0, len(s)*2)
+ if nn.Op() == ir.OPRINTN {
+ s := nn.List().Slice()
+ t := make([]ir.Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, nodstr(" "))
@@ -1905,15 +1909,15 @@ func walkprint(nn *Node, init *Nodes) *Node {
t = append(t, n)
}
t = append(t, nodstr("\n"))
- nn.List.Set(t)
+ nn.PtrList().Set(t)
}
// Collapse runs of constant strings.
- s := nn.List.Slice()
- t := make([]*Node, 0, len(s))
+ s := nn.List().Slice()
+ t := make([]ir.Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
- for i < len(s) && Isconst(s[i], CTSTR) {
+ for i < len(s) && ir.IsConst(s[i], constant.String) {
strs = append(strs, s[i].StringVal())
i++
}
@@ -1925,74 +1929,75 @@ func walkprint(nn *Node, init *Nodes) *Node {
i++
}
}
- nn.List.Set(t)
+ nn.PtrList().Set(t)
- calls := []*Node{mkcall("printlock", nil, init)}
- for i, n := range nn.List.Slice() {
- if n.Op == OLITERAL {
- switch n.Val().Ctype() {
- case CTRUNE:
+ calls := []ir.Node{mkcall("printlock", nil, init)}
+ for i, n := range nn.List().Slice() {
+ if n.Op() == ir.OLITERAL {
+ if n.Type() == types.UntypedRune {
n = defaultlit(n, types.Runetype)
+ }
- case CTINT:
- n = defaultlit(n, types.Types[TINT64])
+ switch n.Val().Kind() {
+ case constant.Int:
+ n = defaultlit(n, types.Types[types.TINT64])
- case CTFLT:
- n = defaultlit(n, types.Types[TFLOAT64])
+ case constant.Float:
+ n = defaultlit(n, types.Types[types.TFLOAT64])
}
}
- if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
- n = defaultlit(n, types.Types[TINT64])
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Etype == types.TIDEAL {
+ n = defaultlit(n, types.Types[types.TINT64])
}
n = defaultlit(n, nil)
- nn.List.SetIndex(i, n)
- if n.Type == nil || n.Type.Etype == TFORW {
+ nn.List().SetIndex(i, n)
+ if n.Type() == nil || n.Type().Etype == types.TFORW {
continue
}
- var on *Node
- switch n.Type.Etype {
- case TINTER:
- if n.Type.IsEmptyInterface() {
+ var on ir.Node
+ switch n.Type().Etype {
+ case types.TINTER:
+ if n.Type().IsEmptyInterface() {
on = syslook("printeface")
} else {
on = syslook("printiface")
}
- on = substArgTypes(on, n.Type) // any-1
- case TPTR:
- if n.Type.Elem().NotInHeap() {
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TPTR:
+ if n.Type().Elem().NotInHeap() {
on = syslook("printuintptr")
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUNSAFEPTR]
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUINTPTR]
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUNSAFEPTR])
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUINTPTR])
break
}
fallthrough
- case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
+ case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
on = syslook("printpointer")
- on = substArgTypes(on, n.Type) // any-1
- case TSLICE:
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TSLICE:
on = syslook("printslice")
- on = substArgTypes(on, n.Type) // any-1
- case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
- if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" {
+ on = substArgTypes(on, n.Type()) // any-1
+ case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ if isRuntimePkg(n.Type().Sym.Pkg) && n.Type().Sym.Name == "hex" {
on = syslook("printhex")
} else {
on = syslook("printuint")
}
- case TINT, TINT8, TINT16, TINT32, TINT64:
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
on = syslook("printint")
- case TFLOAT32, TFLOAT64:
+ case types.TFLOAT32, types.TFLOAT64:
on = syslook("printfloat")
- case TCOMPLEX64, TCOMPLEX128:
+ case types.TCOMPLEX64, types.TCOMPLEX128:
on = syslook("printcomplex")
- case TBOOL:
+ case types.TBOOL:
on = syslook("printbool")
- case TSTRING:
+ case types.TSTRING:
cs := ""
- if Isconst(n, CTSTR) {
+ if ir.IsConst(n, constant.String) {
cs = n.StringVal()
}
switch cs {
@@ -2004,18 +2009,18 @@ func walkprint(nn *Node, init *Nodes) *Node {
on = syslook("printstring")
}
default:
- badtype(OPRINT, n.Type, nil)
+ badtype(ir.OPRINT, n.Type(), nil)
continue
}
- r := nod(OCALL, on, nil)
- if params := on.Type.Params().FieldSlice(); len(params) > 0 {
+ r := ir.Nod(ir.OCALL, on, nil)
+ if params := on.Type().Params().FieldSlice(); len(params) > 0 {
t := params[0].Type
- if !types.Identical(t, n.Type) {
- n = nod(OCONV, n, nil)
- n.Type = t
+ if !types.Identical(t, n.Type()) {
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(t)
}
- r.List.Append(n)
+ r.PtrList().Append(n)
}
calls = append(calls, r)
}
@@ -2025,17 +2030,17 @@ func walkprint(nn *Node, init *Nodes) *Node {
typecheckslice(calls, ctxStmt)
walkexprlist(calls, init)
- r := nod(OEMPTY, nil, nil)
+ r := ir.Nod(ir.OEMPTY, nil, nil)
r = typecheck(r, ctxStmt)
r = walkexpr(r, init)
- r.Ninit.Set(calls)
+ r.PtrInit().Set(calls)
return r
}
-func callnew(t *types.Type) *Node {
+func callnew(t *types.Type) ir.Node {
dowidth(t)
- n := nod(ONEWOBJ, typename(t), nil)
- n.Type = types.NewPtr(t)
+ n := ir.Nod(ir.ONEWOBJ, typename(t), nil)
+ n.SetType(types.NewPtr(t))
n.SetTypecheck(1)
n.MarkNonNil()
return n
@@ -2043,55 +2048,55 @@ func callnew(t *types.Type) *Node {
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
-func isReflectHeaderDataField(l *Node) bool {
- if l.Type != types.Types[TUINTPTR] {
+func isReflectHeaderDataField(l ir.Node) bool {
+ if l.Type() != types.Types[types.TUINTPTR] {
return false
}
var tsym *types.Sym
- switch l.Op {
- case ODOT:
- tsym = l.Left.Type.Sym
- case ODOTPTR:
- tsym = l.Left.Type.Elem().Sym
+ switch l.Op() {
+ case ir.ODOT:
+ tsym = l.Left().Type().Sym
+ case ir.ODOTPTR:
+ tsym = l.Left().Type().Elem().Sym
default:
return false
}
- if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
+ if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
return false
}
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
-func convas(n *Node, init *Nodes) *Node {
- if n.Op != OAS {
- Fatalf("convas: not OAS %v", n.Op)
+func convas(n ir.Node, init *ir.Nodes) ir.Node {
+ if n.Op() != ir.OAS {
+ base.Fatalf("convas: not OAS %v", n.Op())
}
defer updateHasCall(n)
n.SetTypecheck(1)
- if n.Left == nil || n.Right == nil {
+ if n.Left() == nil || n.Right() == nil {
return n
}
- lt := n.Left.Type
- rt := n.Right.Type
+ lt := n.Left().Type()
+ rt := n.Right().Type()
if lt == nil || rt == nil {
return n
}
- if n.Left.isBlank() {
- n.Right = defaultlit(n.Right, nil)
+ if ir.IsBlank(n.Left()) {
+ n.SetRight(defaultlit(n.Right(), nil))
return n
}
if !types.Identical(lt, rt) {
- n.Right = assignconv(n.Right, lt, "assignment")
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(assignconv(n.Right(), lt, "assignment"))
+ n.SetRight(walkexpr(n.Right(), init))
}
- dowidth(n.Right.Type)
+ dowidth(n.Right().Type())
return n
}
@@ -2102,53 +2107,53 @@ func convas(n *Node, init *Nodes) *Node {
// be later use of an earlier lvalue.
//
// function calls have been removed.
-func reorder3(all []*Node) []*Node {
+func reorder3(all []ir.Node) []ir.Node {
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
- var early []*Node
+ var early []ir.Node
- var mapinit Nodes
+ var mapinit ir.Nodes
for i, n := range all {
- l := n.Left
+ l := n.Left()
// Save subexpressions needed on left side.
// Drill through non-dereferences.
for {
- if l.Op == ODOT || l.Op == OPAREN {
- l = l.Left
+ if l.Op() == ir.ODOT || l.Op() == ir.OPAREN {
+ l = l.Left()
continue
}
- if l.Op == OINDEX && l.Left.Type.IsArray() {
- l.Right = reorder3save(l.Right, all, i, &early)
- l = l.Left
+ if l.Op() == ir.OINDEX && l.Left().Type().IsArray() {
+ l.SetRight(reorder3save(l.Right(), all, i, &early))
+ l = l.Left()
continue
}
break
}
- switch l.Op {
+ switch l.Op() {
default:
- Fatalf("reorder3 unexpected lvalue %#v", l.Op)
+ base.Fatalf("reorder3 unexpected lvalue %#v", l.Op())
- case ONAME:
+ case ir.ONAME:
break
- case OINDEX, OINDEXMAP:
- l.Left = reorder3save(l.Left, all, i, &early)
- l.Right = reorder3save(l.Right, all, i, &early)
- if l.Op == OINDEXMAP {
+ case ir.OINDEX, ir.OINDEXMAP:
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
+ l.SetRight(reorder3save(l.Right(), all, i, &early))
+ if l.Op() == ir.OINDEXMAP {
all[i] = convas(all[i], &mapinit)
}
- case ODEREF, ODOTPTR:
- l.Left = reorder3save(l.Left, all, i, &early)
+ case ir.ODEREF, ir.ODOTPTR:
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
}
// Save expression on right side.
- all[i].Right = reorder3save(all[i].Right, all, i, &early)
+ all[i].SetRight(reorder3save(all[i].Right(), all, i, &early))
}
early = append(mapinit.Slice(), early...)
@@ -2161,31 +2166,31 @@ func reorder3(all []*Node) []*Node {
// replace *np with that temp.
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
+func reorder3save(n ir.Node, all []ir.Node, i int, early *[]ir.Node) ir.Node {
if !aliased(n, all[:i]) {
return n
}
- q := temp(n.Type)
- q = nod(OAS, q, n)
+ q := temp(n.Type())
+ q = ir.Nod(ir.OAS, q, n)
q = typecheck(q, ctxStmt)
*early = append(*early, q)
- return q.Left
+ return q.Left()
}
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
-func outervalue(n *Node) *Node {
+func outervalue(n ir.Node) ir.Node {
for {
- switch n.Op {
- case OXDOT:
- Fatalf("OXDOT in walk")
- case ODOT, OPAREN, OCONVNOP:
- n = n.Left
+ switch n.Op() {
+ case ir.OXDOT:
+ base.Fatalf("OXDOT in walk")
+ case ir.ODOT, ir.OPAREN, ir.OCONVNOP:
+ n = n.Left()
continue
- case OINDEX:
- if n.Left.Type != nil && n.Left.Type.IsArray() {
- n = n.Left
+ case ir.OINDEX:
+ if n.Left().Type() != nil && n.Left().Type().IsArray() {
+ n = n.Left()
continue
}
}
@@ -2196,15 +2201,15 @@ func outervalue(n *Node) *Node {
// Is it possible that the computation of r might be
// affected by assignments in all?
-func aliased(r *Node, all []*Node) bool {
+func aliased(r ir.Node, all []ir.Node) bool {
if r == nil {
return false
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
- for r.Op == ODOT {
- r = r.Left
+ for r.Op() == ir.ODOT {
+ r = r.Left()
}
// Look for obvious aliasing: a variable being assigned
@@ -2215,26 +2220,26 @@ func aliased(r *Node, all []*Node) bool {
memwrite := false
for _, as := range all {
// We can ignore assignments to blank.
- if as.Left.isBlank() {
+ if ir.IsBlank(as.Left()) {
continue
}
- l := outervalue(as.Left)
- if l.Op != ONAME {
+ l := outervalue(as.Left())
+ if l.Op() != ir.ONAME {
memwrite = true
continue
}
switch l.Class() {
default:
- Fatalf("unexpected class: %v, %v", l, l.Class())
+ base.Fatalf("unexpected class: %v, %v", l, l.Class())
- case PAUTOHEAP, PEXTERN:
+ case ir.PAUTOHEAP, ir.PEXTERN:
memwrite = true
continue
- case PAUTO, PPARAM, PPARAMOUT:
- if l.Name.Addrtaken() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ if l.Name().Addrtaken() {
memwrite = true
continue
}
@@ -2270,51 +2275,51 @@ func aliased(r *Node, all []*Node) bool {
// does the evaluation of n only refer to variables
// whose addresses have not been taken?
// (and no other memory)
-func varexpr(n *Node) bool {
+func varexpr(n ir.Node) bool {
if n == nil {
return true
}
- switch n.Op {
- case OLITERAL:
+ switch n.Op() {
+ case ir.OLITERAL, ir.ONIL:
return true
- case ONAME:
+ case ir.ONAME:
switch n.Class() {
- case PAUTO, PPARAM, PPARAMOUT:
- if !n.Name.Addrtaken() {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ if !n.Name().Addrtaken() {
return true
}
}
return false
- case OADD,
- OSUB,
- OOR,
- OXOR,
- OMUL,
- ODIV,
- OMOD,
- OLSH,
- ORSH,
- OAND,
- OANDNOT,
- OPLUS,
- ONEG,
- OBITNOT,
- OPAREN,
- OANDAND,
- OOROR,
- OCONV,
- OCONVNOP,
- OCONVIFACE,
- ODOTTYPE:
- return varexpr(n.Left) && varexpr(n.Right)
-
- case ODOT: // but not ODOTPTR
+ case ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OMUL,
+ ir.ODIV,
+ ir.OMOD,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OBITNOT,
+ ir.OPAREN,
+ ir.OANDAND,
+ ir.OOROR,
+ ir.OCONV,
+ ir.OCONVNOP,
+ ir.OCONVIFACE,
+ ir.ODOTTYPE:
+ return varexpr(n.Left()) && varexpr(n.Right())
+
+ case ir.ODOT: // but not ODOTPTR
// Should have been handled in aliased.
- Fatalf("varexpr unexpected ODOT")
+ base.Fatalf("varexpr unexpected ODOT")
}
// Be conservative.
@@ -2322,26 +2327,26 @@ func varexpr(n *Node) bool {
}
// is the name l mentioned in r?
-func vmatch2(l *Node, r *Node) bool {
+func vmatch2(l ir.Node, r ir.Node) bool {
if r == nil {
return false
}
- switch r.Op {
+ switch r.Op() {
// match each right given left
- case ONAME:
+ case ir.ONAME:
return l == r
- case OLITERAL:
+ case ir.OLITERAL, ir.ONIL:
return false
}
- if vmatch2(l, r.Left) {
+ if vmatch2(l, r.Left()) {
return true
}
- if vmatch2(l, r.Right) {
+ if vmatch2(l, r.Right()) {
return true
}
- for _, n := range r.List.Slice() {
+ for _, n := range r.List().Slice() {
if vmatch2(l, n) {
return true
}
@@ -2351,15 +2356,15 @@ func vmatch2(l *Node, r *Node) bool {
// is any name mentioned in l also mentioned in r?
// called by sinit.go
-func vmatch1(l *Node, r *Node) bool {
+func vmatch1(l ir.Node, r ir.Node) bool {
// isolate all left sides
if l == nil || r == nil {
return false
}
- switch l.Op {
- case ONAME:
+ switch l.Op() {
+ case ir.ONAME:
switch l.Class() {
- case PPARAM, PAUTO:
+ case ir.PPARAM, ir.PAUTO:
break
default:
@@ -2372,17 +2377,17 @@ func vmatch1(l *Node, r *Node) bool {
return vmatch2(l, r)
- case OLITERAL:
+ case ir.OLITERAL, ir.ONIL:
return false
}
- if vmatch1(l.Left, r) {
+ if vmatch1(l.Left(), r) {
return true
}
- if vmatch1(l.Right, r) {
+ if vmatch1(l.Right(), r) {
return true
}
- for _, n := range l.List.Slice() {
+ for _, n := range l.List().Slice() {
if vmatch1(n, r) {
return true
}
@@ -2392,21 +2397,21 @@ func vmatch1(l *Node, r *Node) bool {
// paramstoheap returns code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []*Node {
- var nn []*Node
+func paramstoheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
- v := asNode(t.Nname)
- if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
+ v := ir.AsNode(t.Nname)
+ if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
v = nil
}
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
- nn = append(nn, walkstmt(nod(ODCL, v, nil)))
- if stackcopy.Class() == PPARAM {
- nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), ctxStmt)))
+ if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil {
+ nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil)))
+ if stackcopy.Class() == ir.PPARAM {
+ nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt)))
}
}
}
@@ -2422,39 +2427,39 @@ func paramstoheap(params *types.Type) []*Node {
// even allocations to move params/results to the heap.
// The generated code is added to Curfn's Enter list.
func zeroResults() {
- for _, f := range Curfn.Type.Results().Fields().Slice() {
- v := asNode(f.Nname)
- if v != nil && v.Name.Param.Heapaddr != nil {
+ for _, f := range Curfn.Type().Results().Fields().Slice() {
+ v := ir.AsNode(f.Nname)
+ if v != nil && v.Name().Param.Heapaddr != nil {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:livenessepilogue.
continue
}
- if v.isParamHeapCopy() {
+ if isParamHeapCopy(v) {
// TODO(josharian/khr): Investigate whether we can switch to "continue" here,
// and document more in either case.
// In the review of CL 114797, Keith wrote (roughly):
// I don't think the zeroing below matters.
// The stack return value will never be marked as live anywhere in the function.
// It is not written to until deferreturn returns.
- v = v.Name.Param.Stackcopy
+ v = v.Name().Param.Stackcopy
}
// Zero the stack location containing f.
- Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, v, nil))
+ Curfn.Func().Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil))
}
}
// returnsfromheap returns code to copy values for heap-escaped parameters
// back to the stack.
-func returnsfromheap(params *types.Type) []*Node {
- var nn []*Node
+func returnsfromheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
- v := asNode(t.Nname)
+ v := ir.AsNode(t.Nname)
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT {
- nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), ctxStmt)))
+ if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT {
+ nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt)))
}
}
@@ -2465,65 +2470,65 @@ func returnsfromheap(params *types.Type) []*Node {
// between the stack and the heap. The generated code is added to Curfn's
// Enter and Exit lists.
func heapmoves() {
- lno := lineno
- lineno = Curfn.Pos
- nn := paramstoheap(Curfn.Type.Recvs())
- nn = append(nn, paramstoheap(Curfn.Type.Params())...)
- nn = append(nn, paramstoheap(Curfn.Type.Results())...)
- Curfn.Func.Enter.Append(nn...)
- lineno = Curfn.Func.Endlineno
- Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
- lineno = lno
+ lno := base.Pos
+ base.Pos = Curfn.Pos()
+ nn := paramstoheap(Curfn.Type().Recvs())
+ nn = append(nn, paramstoheap(Curfn.Type().Params())...)
+ nn = append(nn, paramstoheap(Curfn.Type().Results())...)
+ Curfn.Func().Enter.Append(nn...)
+ base.Pos = Curfn.Func().Endlineno
+ Curfn.Func().Exit.Append(returnsfromheap(Curfn.Type().Results())...)
+ base.Pos = lno
}
-func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
- if fn.Type == nil || fn.Type.Etype != TFUNC {
- Fatalf("mkcall %v %v", fn, fn.Type)
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node {
+ if fn.Type() == nil || fn.Type().Etype != types.TFUNC {
+ base.Fatalf("mkcall %v %v", fn, fn.Type())
}
- n := fn.Type.NumParams()
+ n := fn.Type().NumParams()
if n != len(va) {
- Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+ base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
}
- r := nod(OCALL, fn, nil)
- r.List.Set(va)
- if fn.Type.NumResults() > 0 {
+ r := ir.Nod(ir.OCALL, fn, nil)
+ r.PtrList().Set(va)
+ if fn.Type().NumResults() > 0 {
r = typecheck(r, ctxExpr|ctxMultiOK)
} else {
r = typecheck(r, ctxStmt)
}
r = walkexpr(r, init)
- r.Type = t
+ r.SetType(t)
return r
}
-func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(syslook(name), t, init, args)
}
-func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(fn, t, init, args)
}
-func conv(n *Node, t *types.Type) *Node {
- if types.Identical(n.Type, t) {
+func conv(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
return n
}
- n = nod(OCONV, n, nil)
- n.Type = t
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
// convnop converts node n to type t using the OCONVNOP op
// and typechecks the result with ctxExpr.
-func convnop(n *Node, t *types.Type) *Node {
- if types.Identical(n.Type, t) {
+func convnop(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
return n
}
- n = nod(OCONVNOP, n, nil)
- n.Type = t
+ n = ir.Nod(ir.OCONVNOP, n, nil)
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
@@ -2531,30 +2536,30 @@ func convnop(n *Node, t *types.Type) *Node {
// byteindex converts n, which is byte-sized, to an int used to index into an array.
// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
-func byteindex(n *Node) *Node {
+func byteindex(n ir.Node) ir.Node {
// We cannot convert from bool to int directly.
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
// Reinterpreting the value as an unsigned byte solves both cases.
- if !types.Identical(n.Type, types.Types[TUINT8]) {
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TUINT8]
+ if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TUINT8])
n.SetTypecheck(1)
}
- n = nod(OCONV, n, nil)
- n.Type = types.Types[TINT]
+ n = ir.Nod(ir.OCONV, n, nil)
+ n.SetType(types.Types[types.TINT])
n.SetTypecheck(1)
return n
}
-func chanfn(name string, n int, t *types.Type) *Node {
+func chanfn(name string, n int, t *types.Type) ir.Node {
if !t.IsChan() {
- Fatalf("chanfn %v", t)
+ base.Fatalf("chanfn %v", t)
}
fn := syslook(name)
switch n {
default:
- Fatalf("chanfn %d", n)
+ base.Fatalf("chanfn %d", n)
case 1:
fn = substArgTypes(fn, t.Elem())
case 2:
@@ -2563,18 +2568,18 @@ func chanfn(name string, n int, t *types.Type) *Node {
return fn
}
-func mapfn(name string, t *types.Type) *Node {
+func mapfn(name string, t *types.Type) ir.Node {
if !t.IsMap() {
- Fatalf("mapfn %v", t)
+ base.Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
return fn
}
-func mapfndel(name string, t *types.Type) *Node {
+func mapfndel(name string, t *types.Type) ir.Node {
if !t.IsMap() {
- Fatalf("mapfn %v", t)
+ base.Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key())
@@ -2615,7 +2620,7 @@ func mapfast(t *types.Type) int {
if Widthptr == 4 {
return mapfast32ptr
}
- Fatalf("small pointer %v", t.Key())
+ base.Fatalf("small pointer %v", t.Key())
case AMEM64:
if !t.Key().HasPointers() {
return mapfast64
@@ -2631,25 +2636,25 @@ func mapfast(t *types.Type) int {
return mapslow
}
-func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)
return fn
}
-func addstr(n *Node, init *Nodes) *Node {
+func addstr(n ir.Node, init *ir.Nodes) ir.Node {
// order.expr rewrote OADDSTR to have a list of strings.
- c := n.List.Len()
+ c := n.List().Len()
if c < 2 {
- Fatalf("addstr count %d too small", c)
+ base.Fatalf("addstr count %d too small", c)
}
buf := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
sz := int64(0)
- for _, n1 := range n.List.Slice() {
- if n1.Op == OLITERAL {
+ for _, n1 := range n.List().Slice() {
+ if n1.Op() == ir.OLITERAL {
sz += int64(len(n1.StringVal()))
}
}
@@ -2657,15 +2662,15 @@ func addstr(n *Node, init *Nodes) *Node {
// Don't allocate the buffer if the result won't fit.
if sz < tmpstringbufsize {
// Create temporary buffer for result string on stack.
- t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
- buf = nod(OADDR, temp(t), nil)
+ t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
+ buf = ir.Nod(ir.OADDR, temp(t), nil)
}
}
// build list of string arguments
- args := []*Node{buf}
- for _, n2 := range n.List.Slice() {
- args = append(args, conv(n2, types.Types[TSTRING]))
+ args := []ir.Node{buf}
+ for _, n2 := range n.List().Slice() {
+ args = append(args, conv(n2, types.Types[types.TSTRING]))
}
var fn string
@@ -2677,33 +2682,33 @@ func addstr(n *Node, init *Nodes) *Node {
// large numbers of strings are passed to the runtime as a slice.
fn = "concatstrings"
- t := types.NewSlice(types.Types[TSTRING])
- slice := nod(OCOMPLIT, nil, typenod(t))
+ t := types.NewSlice(types.Types[types.TSTRING])
+ slice := ir.Nod(ir.OCOMPLIT, nil, typenod(t))
if prealloc[n] != nil {
prealloc[slice] = prealloc[n]
}
- slice.List.Set(args[1:]) // skip buf arg
- args = []*Node{buf, slice}
- slice.Esc = EscNone
+ slice.PtrList().Set(args[1:]) // skip buf arg
+ args = []ir.Node{buf, slice}
+ slice.SetEsc(EscNone)
}
cat := syslook(fn)
- r := nod(OCALL, cat, nil)
- r.List.Set(args)
+ r := ir.Nod(ir.OCALL, cat, nil)
+ r.PtrList().Set(args)
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
- r.Type = n.Type
+ r.SetType(n.Type())
return r
}
-func walkAppendArgs(n *Node, init *Nodes) {
- walkexprlistsafe(n.List.Slice(), init)
+func walkAppendArgs(n ir.Node, init *ir.Nodes) {
+ walkexprlistsafe(n.List().Slice(), init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
ls[i1] = cheapexpr(n1, init)
}
@@ -2723,90 +2728,90 @@ func walkAppendArgs(n *Node, init *Nodes) {
// s
//
// l2 is allowed to be a string.
-func appendslice(n *Node, init *Nodes) *Node {
+func appendslice(n ir.Node, init *ir.Nodes) ir.Node {
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 := n.List.Second()
+ l1 := n.List().First()
+ l2 := n.List().Second()
l2 = cheapexpr(l2, init)
- n.List.SetSecond(l2)
+ n.List().SetSecond(l2)
- var nodes Nodes
+ var nodes ir.Nodes
// var s []T
- s := temp(l1.Type)
- nodes.Append(nod(OAS, s, l1)) // s = l1
+ s := temp(l1.Type())
+ nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + len(l2)
- nn := temp(types.Types[TINT])
- nodes.Append(nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
+ nn := temp(types.Types[types.TINT])
+ nodes.Append(ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), ir.Nod(ir.OLEN, l2, nil))))
// if uint(n) > uint(cap(s))
- nif := nod(OIF, nil, nil)
- nuint := conv(nn, types.Types[TUINT])
- scapuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
- nif.Left = nod(OGT, nuint, scapuint)
+ nif := ir.Nod(ir.OIF, nil, nil)
+ nuint := conv(nn, types.Types[types.TUINT])
+ scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
+ nif.SetLeft(ir.Nod(ir.OGT, nuint, scapuint))
// instantiate growslice(typ *type, []any, int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes.Append(nif)
// s = s[:n]
- nt := nod(OSLICE, s, nil)
+ nt := ir.Nod(ir.OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
- nodes.Append(nod(OAS, s, nt))
+ nodes.Append(ir.Nod(ir.OAS, s, nt))
- var ncopy *Node
+ var ncopy ir.Node
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
- nptr1 := nod(OSLICE, s, nil)
- nptr1.Type = s.Type
- nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
+ nptr1 := ir.Nod(ir.OSLICE, s, nil)
+ nptr1.SetType(s.Type())
+ nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
nptr1 = cheapexpr(nptr1, &nodes)
nptr2 := l2
- Curfn.Func.setWBPos(n.Pos)
+ Curfn.Func().SetWBPos(n.Pos())
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
fn := syslook("typedslicecopy")
- fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
- ptr1, len1 := nptr1.backingArrayPtrLen()
- ptr2, len2 := nptr2.backingArrayPtrLen()
- ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
- } else if instrumenting && !compiling_runtime {
+ fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+ ptr1, len1 := backingArrayPtrLen(nptr1)
+ ptr2, len2 := backingArrayPtrLen(nptr2)
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
+ } else if instrumenting && !base.Flag.CompilingRuntime {
// rely on runtime to instrument:
// copy(s[len(l1):], l2)
// l2 can be a slice or string.
- nptr1 := nod(OSLICE, s, nil)
- nptr1.Type = s.Type
- nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
+ nptr1 := ir.Nod(ir.OSLICE, s, nil)
+ nptr1.SetType(s.Type())
+ nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
nptr1 = cheapexpr(nptr1, &nodes)
nptr2 := l2
- ptr1, len1 := nptr1.backingArrayPtrLen()
- ptr2, len2 := nptr2.backingArrayPtrLen()
+ ptr1, len1 := backingArrayPtrLen(nptr1)
+ ptr2, len2 := backingArrayPtrLen(nptr2)
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem())
- ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
+ fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
- nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
+ nptr1 := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
nptr1.SetBounded(true)
- nptr1 = nod(OADDR, nptr1, nil)
+ nptr1 = ir.Nod(ir.OADDR, nptr1, nil)
- nptr2 := nod(OSPTR, l2, nil)
+ nptr2 := ir.Nod(ir.OSPTR, l2, nil)
- nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &nodes)
- nwid = nod(OMUL, nwid, nodintconst(elemtype.Width))
+ nwid := cheapexpr(conv(ir.Nod(ir.OLEN, l2, nil), types.Types[types.TUINTPTR]), &nodes)
+ nwid = ir.Nod(ir.OMUL, nwid, nodintconst(elemtype.Width))
// instantiate func memmove(to *any, frm *any, length uintptr)
fn := syslook("memmove")
@@ -2823,21 +2828,21 @@ func appendslice(n *Node, init *Nodes) *Node {
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n *Node) bool {
- if Debug.N != 0 || instrumenting {
+func isAppendOfMake(n ir.Node) bool {
+ if base.Flag.N != 0 || instrumenting {
return false
}
if n.Typecheck() == 0 {
- Fatalf("missing typecheck: %+v", n)
+ base.Fatalf("missing typecheck: %+v", n)
}
- if n.Op != OAPPEND || !n.IsDDD() || n.List.Len() != 2 {
+ if n.Op() != ir.OAPPEND || !n.IsDDD() || n.List().Len() != 2 {
return false
}
- second := n.List.Second()
- if second.Op != OMAKESLICE || second.Right != nil {
+ second := n.List().Second()
+ if second.Op() != ir.OMAKESLICE || second.Right() != nil {
return false
}
@@ -2847,8 +2852,8 @@ func isAppendOfMake(n *Node) bool {
// typecheck made sure that constant arguments to make are not negative and fit into an int.
// The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
- y := second.Left
- if !Isconst(y, CTINT) && maxintval[y.Type.Etype].Cmp(maxintval[TUINT]) > 0 {
+ y := second.Left()
+ if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
return false
}
@@ -2882,93 +2887,93 @@ func isAppendOfMake(n *Node) bool {
// }
// }
// s
-func extendslice(n *Node, init *Nodes) *Node {
+func extendslice(n ir.Node, init *ir.Nodes) ir.Node {
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
// check of l2 < 0 at runtime which is generated below.
- l2 := conv(n.List.Second().Left, types.Types[TINT])
+ l2 := conv(n.List().Second().Left(), types.Types[types.TINT])
l2 = typecheck(l2, ctxExpr)
- n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
+ n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs
+ l1 := n.List().First()
+ l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs
- var nodes []*Node
+ var nodes []ir.Node
// if l2 >= 0 (likely happens), do nothing
- nifneg := nod(OIF, nod(OGE, l2, nodintconst(0)), nil)
+ nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil)
nifneg.SetLikely(true)
// else panicmakeslicelen()
- nifneg.Rlist.Set1(mkcall("panicmakeslicelen", nil, init))
+ nifneg.PtrRlist().Set1(mkcall("panicmakeslicelen", nil, init))
nodes = append(nodes, nifneg)
// s := l1
- s := temp(l1.Type)
- nodes = append(nodes, nod(OAS, s, l1))
+ s := temp(l1.Type())
+ nodes = append(nodes, ir.Nod(ir.OAS, s, l1))
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + l2
- nn := temp(types.Types[TINT])
- nodes = append(nodes, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), l2)))
+ nn := temp(types.Types[types.TINT])
+ nodes = append(nodes, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, ir.Nod(ir.OLEN, s, nil), l2)))
// if uint(n) > uint(cap(s))
- nuint := conv(nn, types.Types[TUINT])
- capuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
- nif := nod(OIF, nod(OGT, nuint, capuint), nil)
+ nuint := conv(nn, types.Types[types.TUINT])
+ capuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
+ nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, nuint, capuint), nil)
// instantiate growslice(typ *type, old []any, newcap int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes = append(nodes, nif)
// s = s[:n]
- nt := nod(OSLICE, s, nil)
+ nt := ir.Nod(ir.OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
nt.SetBounded(true)
- nodes = append(nodes, nod(OAS, s, nt))
+ nodes = append(nodes, ir.Nod(ir.OAS, s, nt))
// lptr := &l1[0]
- l1ptr := temp(l1.Type.Elem().PtrTo())
- tmp := nod(OSPTR, l1, nil)
- nodes = append(nodes, nod(OAS, l1ptr, tmp))
+ l1ptr := temp(l1.Type().Elem().PtrTo())
+ tmp := ir.Nod(ir.OSPTR, l1, nil)
+ nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp))
// sptr := &s[0]
sptr := temp(elemtype.PtrTo())
- tmp = nod(OSPTR, s, nil)
- nodes = append(nodes, nod(OAS, sptr, tmp))
+ tmp = ir.Nod(ir.OSPTR, s, nil)
+ nodes = append(nodes, ir.Nod(ir.OAS, sptr, tmp))
// hp := &s[len(l1)]
- hp := nod(OINDEX, s, nod(OLEN, l1, nil))
+ hp := ir.Nod(ir.OINDEX, s, ir.Nod(ir.OLEN, l1, nil))
hp.SetBounded(true)
- hp = nod(OADDR, hp, nil)
- hp = convnop(hp, types.Types[TUNSAFEPTR])
+ hp = ir.Nod(ir.OADDR, hp, nil)
+ hp = convnop(hp, types.Types[types.TUNSAFEPTR])
// hn := l2 * sizeof(elem(s))
- hn := nod(OMUL, l2, nodintconst(elemtype.Width))
- hn = conv(hn, types.Types[TUINTPTR])
+ hn := ir.Nod(ir.OMUL, l2, nodintconst(elemtype.Width))
+ hn = conv(hn, types.Types[types.TUINTPTR])
clrname := "memclrNoHeapPointers"
hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
- Curfn.Func.setWBPos(n.Pos)
+ Curfn.Func().SetWBPos(n.Pos())
}
- var clr Nodes
+ var clr ir.Nodes
clrfn := mkcall(clrname, nil, &clr, hp, hn)
clr.Append(clrfn)
if hasPointers {
// if l1ptr == sptr
- nifclr := nod(OIF, nod(OEQ, l1ptr, sptr), nil)
- nifclr.Nbody = clr
+ nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil)
+ nifclr.SetBody(clr)
nodes = append(nodes, nifclr)
} else {
nodes = append(nodes, clr.Slice()...)
@@ -3001,14 +3006,14 @@ func extendslice(n *Node, init *Nodes) *Node {
// ...
// }
// s
-func walkappend(n *Node, init *Nodes, dst *Node) *Node {
- if !samesafeexpr(dst, n.List.First()) {
- n.List.SetFirst(safeexpr(n.List.First(), init))
- n.List.SetFirst(walkexpr(n.List.First(), init))
+func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node {
+ if !samesafeexpr(dst, n.List().First()) {
+ n.List().SetFirst(safeexpr(n.List().First(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
}
- walkexprlistsafe(n.List.Slice()[1:], init)
+ walkexprlistsafe(n.List().Slice()[1:], init)
- nsrc := n.List.First()
+ nsrc := n.List().First()
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
@@ -3016,60 +3021,60 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
- ls := n.List.Slice()[1:]
+ ls := n.List().Slice()[1:]
for i, n := range ls {
n = cheapexpr(n, init)
- if !types.Identical(n.Type, nsrc.Type.Elem()) {
- n = assignconv(n, nsrc.Type.Elem(), "append")
+ if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+ n = assignconv(n, nsrc.Type().Elem(), "append")
n = walkexpr(n, init)
}
ls[i] = n
}
- argc := n.List.Len() - 1
+ argc := n.List().Len() - 1
if argc < 1 {
return nsrc
}
// General case, with no function calls left as arguments.
// Leave for gen, except that instrumentation requires old form.
- if !instrumenting || compiling_runtime {
+ if !instrumenting || base.Flag.CompilingRuntime {
return n
}
- var l []*Node
+ var l []ir.Node
- ns := temp(nsrc.Type)
- l = append(l, nod(OAS, ns, nsrc)) // s = src
+ ns := temp(nsrc.Type())
+ l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src
na := nodintconst(int64(argc)) // const argc
- nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc
- nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
+ nx := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc
+ nx.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na))
fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
- fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
+ fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
- nx.Nbody.Set1(nod(OAS, ns,
- mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
- nod(OADD, nod(OLEN, ns, nil), na))))
+ nx.PtrBody().Set1(ir.Nod(ir.OAS, ns,
+ mkcall1(fn, ns.Type(), nx.PtrInit(), typename(ns.Type().Elem()), ns,
+ ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na))))
l = append(l, nx)
- nn := temp(types.Types[TINT])
- l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
+ nn := temp(types.Types[types.TINT])
+ l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OLEN, ns, nil))) // n = len(s)
- nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
- nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
+ nx = ir.Nod(ir.OSLICE, ns, nil) // ...s[:n+argc]
+ nx.SetSliceBounds(nil, ir.Nod(ir.OADD, nn, na), nil)
nx.SetBounded(true)
- l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
+ l = append(l, ir.Nod(ir.OAS, ns, nx)) // s = s[:n+argc]
- ls = n.List.Slice()[1:]
+ ls = n.List().Slice()[1:]
for i, n := range ls {
- nx = nod(OINDEX, ns, nn) // s[n] ...
+ nx = ir.Nod(ir.OINDEX, ns, nn) // s[n] ...
nx.SetBounded(true)
- l = append(l, nod(OAS, nx, n)) // s[n] = arg
+ l = append(l, ir.Nod(ir.OAS, nx, n)) // s[n] = arg
if i+1 < len(ls) {
- l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
+ l = append(l, ir.Nod(ir.OAS, nn, ir.Nod(ir.OADD, nn, nodintconst(1)))) // n = n + 1
}
}
@@ -3090,15 +3095,15 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
//
// Also works if b is a string.
//
-func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
- if n.Left.Type.Elem().HasPointers() {
- Curfn.Func.setWBPos(n.Pos)
- fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := n.Left.backingArrayPtrLen()
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := n.Right.backingArrayPtrLen()
- return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR)
+func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node {
+ if n.Left().Type().Elem().HasPointers() {
+ Curfn.Func().SetWBPos(n.Pos())
+ fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
+ return mkcall1(fn, n.Type(), init, typename(n.Left().Type().Elem()), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
@@ -3106,53 +3111,53 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
// copy(n.Left, n.Right)
// n.Right can be a slice or string.
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := n.Left.backingArrayPtrLen()
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := n.Right.backingArrayPtrLen()
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem())
+ fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
- return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width))
+ return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left().Type().Elem().Width))
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- nl := temp(n.Left.Type)
- nr := temp(n.Right.Type)
- var l []*Node
- l = append(l, nod(OAS, nl, n.Left))
- l = append(l, nod(OAS, nr, n.Right))
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ nl := temp(n.Left().Type())
+ nr := temp(n.Right().Type())
+ var l []ir.Node
+ l = append(l, ir.Nod(ir.OAS, nl, n.Left()))
+ l = append(l, ir.Nod(ir.OAS, nr, n.Right()))
- nfrm := nod(OSPTR, nr, nil)
- nto := nod(OSPTR, nl, nil)
+ nfrm := ir.Nod(ir.OSPTR, nr, nil)
+ nto := ir.Nod(ir.OSPTR, nl, nil)
- nlen := temp(types.Types[TINT])
+ nlen := temp(types.Types[types.TINT])
// n = len(to)
- l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
+ l = append(l, ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nl, nil)))
// if n > len(frm) { n = len(frm) }
- nif := nod(OIF, nil, nil)
+ nif := ir.Nod(ir.OIF, nil, nil)
- nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
- nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
+ nif.SetLeft(ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil)))
+ nif.PtrBody().Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil)))
l = append(l, nif)
// if to.ptr != frm.ptr { memmove( ... ) }
- ne := nod(OIF, nod(ONE, nto, nfrm), nil)
+ ne := ir.Nod(ir.OIF, ir.Nod(ir.ONE, nto, nfrm), nil)
ne.SetLikely(true)
l = append(l, ne)
fn := syslook("memmove")
- fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
- nwid := temp(types.Types[TUINTPTR])
- setwid := nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))
- ne.Nbody.Append(setwid)
- nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
+ fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
+ nwid := temp(types.Types[types.TUINTPTR])
+ setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR]))
+ ne.PtrBody().Append(setwid)
+ nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width))
call := mkcall1(fn, nil, init, nto, nfrm, nwid)
- ne.Nbody.Append(call)
+ ne.PtrBody().Append(call)
typecheckslice(l, ctxStmt)
walkstmtlist(l)
@@ -3160,7 +3165,7 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
return nlen
}
-func eqfor(t *types.Type) (n *Node, needsize bool) {
+func eqfor(t *types.Type) (n ir.Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
@@ -3172,73 +3177,73 @@ func eqfor(t *types.Type) (n *Node, needsize bool) {
return n, true
case ASPECIAL:
sym := typesymprefix(".eq", t)
- n := newname(sym)
+ n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
- }, []*Node{
- anonfield(types.Types[TBOOL]),
- })
+ }, []ir.Node{
+ anonfield(types.Types[types.TBOOL]),
+ }))
return n, false
}
- Fatalf("eqfor %v", t)
+ base.Fatalf("eqfor %v", t)
return nil, false
}
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
-func walkcompare(n *Node, init *Nodes) *Node {
- if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != OLITERAL && n.Right.Op != OLITERAL {
+func walkcompare(n ir.Node, init *ir.Nodes) ir.Node {
+ if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL {
return walkcompareInterface(n, init)
}
- if n.Left.Type.IsString() && n.Right.Type.IsString() {
+ if n.Left().Type().IsString() && n.Right().Type().IsString() {
return walkcompareString(n, init)
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// Given mixed interface/concrete comparison,
// rewrite into types-equal && data-equal.
// This is efficient, avoids allocations, and avoids runtime calls.
- if n.Left.Type.IsInterface() != n.Right.Type.IsInterface() {
+ if n.Left().Type().IsInterface() != n.Right().Type().IsInterface() {
// Preserve side-effects in case of short-circuiting; see #32187.
- l := cheapexpr(n.Left, init)
- r := cheapexpr(n.Right, init)
+ l := cheapexpr(n.Left(), init)
+ r := cheapexpr(n.Right(), init)
// Swap so that l is the interface value and r is the concrete value.
- if n.Right.Type.IsInterface() {
+ if n.Right().Type().IsInterface() {
l, r = r, l
}
// Handle both == and !=.
- eq := n.Op
- andor := OOROR
- if eq == OEQ {
- andor = OANDAND
+ eq := n.Op()
+ andor := ir.OOROR
+ if eq == ir.OEQ {
+ andor = ir.OANDAND
}
// Check for types equal.
// For empty interface, this is:
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
- var eqtype *Node
- tab := nod(OITAB, l, nil)
- rtyp := typename(r.Type)
- if l.Type.IsEmptyInterface() {
- tab.Type = types.NewPtr(types.Types[TUINT8])
+ var eqtype ir.Node
+ tab := ir.Nod(ir.OITAB, l, nil)
+ rtyp := typename(r.Type())
+ if l.Type().IsEmptyInterface() {
+ tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1)
- eqtype = nod(eq, tab, rtyp)
+ eqtype = ir.Nod(eq, tab, rtyp)
} else {
- nonnil := nod(brcom(eq), nodnil(), tab)
- match := nod(eq, itabType(tab), rtyp)
- eqtype = nod(andor, nonnil, match)
+ nonnil := ir.Nod(brcom(eq), nodnil(), tab)
+ match := ir.Nod(eq, itabType(tab), rtyp)
+ eqtype = ir.Nod(andor, nonnil, match)
}
// Check for data equal.
- eqdata := nod(eq, ifaceData(n.Pos, l, r.Type), r)
+ eqdata := ir.Nod(eq, ifaceData(n.Pos(), l, r.Type()), r)
// Put it all together.
- expr := nod(andor, eqtype, eqdata)
+ expr := ir.Nod(andor, eqtype, eqdata)
n = finishcompare(n, expr, init)
return n
}
@@ -3247,7 +3252,7 @@ func walkcompare(n *Node, init *Nodes) *Node {
// Otherwise back end handles it.
// While we're here, decide whether to
// inline or call an eq alg.
- t := n.Left.Type
+ t := n.Left().Type()
var inline bool
maxcmpsize := int64(4)
@@ -3259,19 +3264,19 @@ func walkcompare(n *Node, init *Nodes) *Node {
switch t.Etype {
default:
- if Debug_libfuzzer != 0 && t.IsInteger() {
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
+ if base.Debug.Libfuzzer != 0 && t.IsInteger() {
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
// If exactly one comparison operand is
// constant, invoke the constcmp functions
// instead, and arrange for the constant
// operand to be the first argument.
- l, r := n.Left, n.Right
- if r.Op == OLITERAL {
+ l, r := n.Left(), n.Right()
+ if r.Op() == ir.OLITERAL {
l, r = r, l
}
- constcmp := l.Op == OLITERAL && r.Op != OLITERAL
+ constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
var fn string
var paramType *types.Type
@@ -3281,81 +3286,81 @@ func walkcompare(n *Node, init *Nodes) *Node {
if constcmp {
fn = "libfuzzerTraceConstCmp1"
}
- paramType = types.Types[TUINT8]
+ paramType = types.Types[types.TUINT8]
case 2:
fn = "libfuzzerTraceCmp2"
if constcmp {
fn = "libfuzzerTraceConstCmp2"
}
- paramType = types.Types[TUINT16]
+ paramType = types.Types[types.TUINT16]
case 4:
fn = "libfuzzerTraceCmp4"
if constcmp {
fn = "libfuzzerTraceConstCmp4"
}
- paramType = types.Types[TUINT32]
+ paramType = types.Types[types.TUINT32]
case 8:
fn = "libfuzzerTraceCmp8"
if constcmp {
fn = "libfuzzerTraceConstCmp8"
}
- paramType = types.Types[TUINT64]
+ paramType = types.Types[types.TUINT64]
default:
- Fatalf("unexpected integer size %d for %v", t.Size(), t)
+ base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
}
init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
}
return n
- case TARRAY:
+ case types.TARRAY:
// We can compare several elements at once with 2/4/8 byte integer compares
inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
- case TSTRUCT:
+ case types.TSTRUCT:
inline = t.NumComponents(types.IgnoreBlankFields) <= 4
}
- cmpl := n.Left
- for cmpl != nil && cmpl.Op == OCONVNOP {
- cmpl = cmpl.Left
+ cmpl := n.Left()
+ for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+ cmpl = cmpl.Left()
}
- cmpr := n.Right
- for cmpr != nil && cmpr.Op == OCONVNOP {
- cmpr = cmpr.Left
+ cmpr := n.Right()
+ for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+ cmpr = cmpr.Left()
}
// Chose not to inline. Call equality function directly.
if !inline {
// eq algs take pointers; cmpl and cmpr must be addressable
if !islvalue(cmpl) || !islvalue(cmpr) {
- Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
}
fn, needsize := eqfor(t)
- call := nod(OCALL, fn, nil)
- call.List.Append(nod(OADDR, cmpl, nil))
- call.List.Append(nod(OADDR, cmpr, nil))
+ call := ir.Nod(ir.OCALL, fn, nil)
+ call.PtrList().Append(ir.Nod(ir.OADDR, cmpl, nil))
+ call.PtrList().Append(ir.Nod(ir.OADDR, cmpr, nil))
if needsize {
- call.List.Append(nodintconst(t.Width))
+ call.PtrList().Append(nodintconst(t.Width))
}
res := call
- if n.Op != OEQ {
- res = nod(ONOT, res, nil)
+ if n.Op() != ir.OEQ {
+ res = ir.Nod(ir.ONOT, res, nil)
}
n = finishcompare(n, res, init)
return n
}
// inline: build boolean expression comparing element by element
- andor := OANDAND
- if n.Op == ONE {
- andor = OOROR
+ andor := ir.OANDAND
+ if n.Op() == ir.ONE {
+ andor = ir.OOROR
}
- var expr *Node
- compare := func(el, er *Node) {
- a := nod(n.Op, el, er)
+ var expr ir.Node
+ compare := func(el, er ir.Node) {
+ a := ir.Nod(n.Op(), el, er)
if expr == nil {
expr = a
} else {
- expr = nod(andor, expr, a)
+ expr = ir.Nod(andor, expr, a)
}
}
cmpl = safeexpr(cmpl, init)
@@ -3367,8 +3372,8 @@ func walkcompare(n *Node, init *Nodes) *Node {
continue
}
compare(
- nodSym(OXDOT, cmpl, sym),
- nodSym(OXDOT, cmpr, sym),
+ nodSym(ir.OXDOT, cmpl, sym),
+ nodSym(ir.OXDOT, cmpr, sym),
)
}
} else {
@@ -3381,45 +3386,45 @@ func walkcompare(n *Node, init *Nodes) *Node {
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
- convType = types.Types[TINT64]
+ convType = types.Types[types.TINT64]
step = 8 / t.Elem().Width
case remains >= 4 && combine32bit:
- convType = types.Types[TUINT32]
+ convType = types.Types[types.TUINT32]
step = 4 / t.Elem().Width
case remains >= 2 && combine16bit:
- convType = types.Types[TUINT16]
+ convType = types.Types[types.TUINT16]
step = 2 / t.Elem().Width
default:
step = 1
}
if step == 1 {
compare(
- nod(OINDEX, cmpl, nodintconst(i)),
- nod(OINDEX, cmpr, nodintconst(i)),
+ ir.Nod(ir.OINDEX, cmpl, nodintconst(i)),
+ ir.Nod(ir.OINDEX, cmpr, nodintconst(i)),
)
i++
remains -= t.Elem().Width
} else {
elemType := t.Elem().ToUnsigned()
- cmplw := nod(OINDEX, cmpl, nodintconst(i))
+ cmplw := ir.Nod(ir.OINDEX, cmpl, nodintconst(i))
cmplw = conv(cmplw, elemType) // convert to unsigned
cmplw = conv(cmplw, convType) // widen
- cmprw := nod(OINDEX, cmpr, nodintconst(i))
+ cmprw := ir.Nod(ir.OINDEX, cmpr, nodintconst(i))
cmprw = conv(cmprw, elemType)
cmprw = conv(cmprw, convType)
// For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will generate a single large load.
for offset := int64(1); offset < step; offset++ {
- lb := nod(OINDEX, cmpl, nodintconst(i+offset))
+ lb := ir.Nod(ir.OINDEX, cmpl, nodintconst(i+offset))
lb = conv(lb, elemType)
lb = conv(lb, convType)
- lb = nod(OLSH, lb, nodintconst(8*t.Elem().Width*offset))
- cmplw = nod(OOR, cmplw, lb)
- rb := nod(OINDEX, cmpr, nodintconst(i+offset))
+ lb = ir.Nod(ir.OLSH, lb, nodintconst(8*t.Elem().Width*offset))
+ cmplw = ir.Nod(ir.OOR, cmplw, lb)
+ rb := ir.Nod(ir.OINDEX, cmpr, nodintconst(i+offset))
rb = conv(rb, elemType)
rb = conv(rb, convType)
- rb = nod(OLSH, rb, nodintconst(8*t.Elem().Width*offset))
- cmprw = nod(OOR, cmprw, rb)
+ rb = ir.Nod(ir.OLSH, rb, nodintconst(8*t.Elem().Width*offset))
+ cmprw = ir.Nod(ir.OOR, cmprw, rb)
}
compare(cmplw, cmprw)
i += step
@@ -3428,13 +3433,13 @@ func walkcompare(n *Node, init *Nodes) *Node {
}
}
if expr == nil {
- expr = nodbool(n.Op == OEQ)
+ expr = nodbool(n.Op() == ir.OEQ)
// We still need to use cmpl and cmpr, in case they contain
// an expression which might panic. See issue 23837.
- t := temp(cmpl.Type)
- a1 := nod(OAS, t, cmpl)
+ t := temp(cmpl.Type())
+ a1 := ir.Nod(ir.OAS, t, cmpl)
a1 = typecheck(a1, ctxStmt)
- a2 := nod(OAS, t, cmpr)
+ a2 := ir.Nod(ir.OAS, t, cmpr)
a2 = typecheck(a2, ctxStmt)
init.Append(a1, a2)
}
@@ -3442,48 +3447,48 @@ func walkcompare(n *Node, init *Nodes) *Node {
return n
}
-func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node {
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
- n = copyexpr(n, n.Type, init)
+ if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 {
+ n = copyexpr(n, n.Type(), init)
}
return conv(n, t)
}
-func walkcompareInterface(n *Node, init *Nodes) *Node {
- n.Right = cheapexpr(n.Right, init)
- n.Left = cheapexpr(n.Left, init)
- eqtab, eqdata := eqinterface(n.Left, n.Right)
- var cmp *Node
- if n.Op == OEQ {
- cmp = nod(OANDAND, eqtab, eqdata)
+func walkcompareInterface(n ir.Node, init *ir.Nodes) ir.Node {
+ n.SetRight(cheapexpr(n.Right(), init))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ eqtab, eqdata := eqinterface(n.Left(), n.Right())
+ var cmp ir.Node
+ if n.Op() == ir.OEQ {
+ cmp = ir.Nod(ir.OANDAND, eqtab, eqdata)
} else {
- eqtab.Op = ONE
- cmp = nod(OOROR, eqtab, nod(ONOT, eqdata, nil))
+ eqtab.SetOp(ir.ONE)
+ cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil))
}
return finishcompare(n, cmp, init)
}
-func walkcompareString(n *Node, init *Nodes) *Node {
+func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node {
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs *Node // const string, non-const string
+ var cs, ncs ir.Node // const string, non-const string
switch {
- case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
+ case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String):
// ignore; will be constant evaluated
- case Isconst(n.Left, CTSTR):
- cs = n.Left
- ncs = n.Right
- case Isconst(n.Right, CTSTR):
- cs = n.Right
- ncs = n.Left
+ case ir.IsConst(n.Left(), constant.String):
+ cs = n.Left()
+ ncs = n.Right()
+ case ir.IsConst(n.Right(), constant.String):
+ cs = n.Right()
+ ncs = n.Left()
}
if cs != nil {
- cmp := n.Op
+ cmp := n.Op()
// Our comparison below assumes that the non-constant string
// is on the left hand side, so rewrite "" cmp x to x cmp "".
// See issue 24817.
- if Isconst(n.Left, CTSTR) {
+ if ir.IsConst(n.Left(), constant.String) {
cmp = brrev(cmp)
}
@@ -3502,12 +3507,12 @@ func walkcompareString(n *Node, init *Nodes) *Node {
combine64bit = thearch.LinkArch.RegSize >= 8
}
- var and Op
+ var and ir.Op
switch cmp {
- case OEQ:
- and = OANDAND
- case ONE:
- and = OOROR
+ case ir.OEQ:
+ and = ir.OANDAND
+ case ir.ONE:
+ and = ir.OOROR
default:
// Don't do byte-wise comparisons for <, <=, etc.
// They're fairly complicated.
@@ -3518,13 +3523,13 @@ func walkcompareString(n *Node, init *Nodes) *Node {
if len(s) > 0 {
ncs = safeexpr(ncs, init)
}
- r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
+ r := ir.Nod(cmp, ir.Nod(ir.OLEN, ncs, nil), nodintconst(int64(len(s))))
remains := len(s)
for i := 0; remains > 0; {
if remains == 1 || !canCombineLoads {
cb := nodintconst(int64(s[i]))
- ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
- r = nod(and, r, nod(cmp, ncb, cb))
+ ncb := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i)))
+ r = ir.Nod(and, r, ir.Nod(cmp, ncb, cb))
remains--
i++
continue
@@ -3533,31 +3538,31 @@ func walkcompareString(n *Node, init *Nodes) *Node {
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
- convType = types.Types[TINT64]
+ convType = types.Types[types.TINT64]
step = 8
case remains >= 4:
- convType = types.Types[TUINT32]
+ convType = types.Types[types.TUINT32]
step = 4
case remains >= 2:
- convType = types.Types[TUINT16]
+ convType = types.Types[types.TUINT16]
step = 2
}
- ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
+ ncsubstr := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i)))
ncsubstr = conv(ncsubstr, convType)
csubstr := int64(s[i])
// Calculate large constant from bytes as sequence of shifts and ors.
// Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will combine this into a single large load.
for offset := 1; offset < step; offset++ {
- b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
+ b := ir.Nod(ir.OINDEX, ncs, nodintconst(int64(i+offset)))
b = conv(b, convType)
- b = nod(OLSH, b, nodintconst(int64(8*offset)))
- ncsubstr = nod(OOR, ncsubstr, b)
+ b = ir.Nod(ir.OLSH, b, nodintconst(int64(8*offset)))
+ ncsubstr = ir.Nod(ir.OOR, ncsubstr, b)
csubstr |= int64(s[i+offset]) << uint8(8*offset)
}
csubstrPart := nodintconst(csubstr)
// Compare "step" bytes as once
- r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
+ r = ir.Nod(and, r, ir.Nod(cmp, csubstrPart, ncsubstr))
remains -= step
i += step
}
@@ -3565,26 +3570,26 @@ func walkcompareString(n *Node, init *Nodes) *Node {
}
}
- var r *Node
- if n.Op == OEQ || n.Op == ONE {
+ var r ir.Node
+ if n.Op() == ir.OEQ || n.Op() == ir.ONE {
// prepare for rewrite below
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
- eqlen, eqmem := eqstring(n.Left, n.Right)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
+ eqlen, eqmem := eqstring(n.Left(), n.Right())
// quick check of len before full compare for == or !=.
// memequal then tests equality up to length len.
- if n.Op == OEQ {
+ if n.Op() == ir.OEQ {
// len(left) == len(right) && memequal(left, right, len)
- r = nod(OANDAND, eqlen, eqmem)
+ r = ir.Nod(ir.OANDAND, eqlen, eqmem)
} else {
// len(left) != len(right) || !memequal(left, right, len)
- eqlen.Op = ONE
- r = nod(OOROR, eqlen, nod(ONOT, eqmem, nil))
+ eqlen.SetOp(ir.ONE)
+ r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil))
}
} else {
// sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
- r = nod(n.Op, r, nodintconst(0))
+ r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING]))
+ r = ir.Nod(n.Op(), r, nodintconst(0))
}
return finishcompare(n, r, init)
@@ -3592,36 +3597,36 @@ func walkcompareString(n *Node, init *Nodes) *Node {
// The result of finishcompare MUST be assigned back to n, e.g.
// n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n, r *Node, init *Nodes) *Node {
+func finishcompare(n, r ir.Node, init *ir.Nodes) ir.Node {
r = typecheck(r, ctxExpr)
- r = conv(r, n.Type)
+ r = conv(r, n.Type())
r = walkexpr(r, init)
return r
}
// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *Node, max int64) bool {
- if n.Type == nil || !n.Type.IsInteger() {
+func bounded(n ir.Node, max int64) bool {
+ if n.Type() == nil || !n.Type().IsInteger() {
return false
}
- sign := n.Type.IsSigned()
- bits := int32(8 * n.Type.Width)
+ sign := n.Type().IsSigned()
+ bits := int32(8 * n.Type().Width)
if smallintconst(n) {
v := n.Int64Val()
return 0 <= v && v < max
}
- switch n.Op {
- case OAND, OANDNOT:
+ switch n.Op() {
+ case ir.OAND, ir.OANDNOT:
v := int64(-1)
switch {
- case smallintconst(n.Left):
- v = n.Left.Int64Val()
- case smallintconst(n.Right):
- v = n.Right.Int64Val()
- if n.Op == OANDNOT {
+ case smallintconst(n.Left()):
+ v = n.Left().Int64Val()
+ case smallintconst(n.Right()):
+ v = n.Right().Int64Val()
+ if n.Op() == ir.OANDNOT {
v = ^v
if !sign {
v &= 1<<uint(bits) - 1
@@ -3632,26 +3637,26 @@ func bounded(n *Node, max int64) bool {
return true
}
- case OMOD:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.OMOD:
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
if 0 <= v && v <= max {
return true
}
}
- case ODIV:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.ODIV:
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
for bits > 0 && v >= 2 {
bits--
v >>= 1
}
}
- case ORSH:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ case ir.ORSH:
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
if v > int64(bits) {
return true
}
@@ -3667,8 +3672,8 @@ func bounded(n *Node, max int64) bool {
}
// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *Node) {
- t := n.Left.Type
+func usemethod(n ir.Node) {
+ t := n.Left().Type()
// Looking for either of:
// Method(int) reflect.Method
@@ -3690,7 +3695,7 @@ func usemethod(n *Node) {
}
if res1 == nil {
- if p0.Type.Etype != TINT {
+ if p0.Type.Etype != types.TINT {
return
}
} else {
@@ -3706,64 +3711,64 @@ func usemethod(n *Node) {
// (including global variables such as numImports - was issue #19028).
// Also need to check for reflect package itself (see Issue #38515).
if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
- Curfn.Func.SetReflectMethod(true)
+ Curfn.Func().SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
- Curfn.Func.lsym.Set(obj.AttrReflectMethod, true)
+ Curfn.Func().LSym.Set(obj.AttrReflectMethod, true)
}
}
-func usefield(n *Node) {
+func usefield(n ir.Node) {
if objabi.Fieldtrack_enabled == 0 {
return
}
- switch n.Op {
+ switch n.Op() {
default:
- Fatalf("usefield %v", n.Op)
+ base.Fatalf("usefield %v", n.Op())
- case ODOT, ODOTPTR:
+ case ir.ODOT, ir.ODOTPTR:
break
}
- if n.Sym == nil {
+ if n.Sym() == nil {
// No field name. This DOTPTR was built by the compiler for access
// to runtime data structures. Ignore.
return
}
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
field := n.Opt().(*types.Field)
if field == nil {
- Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
+ base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym())
}
- if field.Sym != n.Sym || field.Offset != n.Xoffset {
- Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset)
+ if field.Sym != n.Sym() || field.Offset != n.Offset() {
+ base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym(), n.Offset())
}
if !strings.Contains(field.Note, "go:\"track\"") {
return
}
- outer := n.Left.Type
+ outer := n.Left().Type()
if outer.IsPtr() {
outer = outer.Elem()
}
if outer.Sym == nil {
- yyerror("tracked field must be in named struct type")
+ base.Errorf("tracked field must be in named struct type")
}
if !types.IsExported(field.Sym.Name) {
- yyerror("tracked field must be exported (upper case)")
+ base.Errorf("tracked field must be exported (upper case)")
}
sym := tracksym(outer, field)
- if Curfn.Func.FieldTrack == nil {
- Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
+ if Curfn.Func().FieldTrack == nil {
+ Curfn.Func().FieldTrack = make(map[*types.Sym]struct{})
}
- Curfn.Func.FieldTrack[sym] = struct{}{}
+ Curfn.Func().FieldTrack[sym] = struct{}{}
}
-func candiscardlist(l Nodes) bool {
+func candiscardlist(l ir.Nodes) bool {
for _, n := range l.Slice() {
if !candiscard(n) {
return false
@@ -3772,96 +3777,94 @@ func candiscardlist(l Nodes) bool {
return true
}
-func candiscard(n *Node) bool {
+func candiscard(n ir.Node) bool {
if n == nil {
return true
}
- switch n.Op {
+ switch n.Op() {
default:
return false
// Discardable as long as the subpieces are.
- case ONAME,
- ONONAME,
- OTYPE,
- OPACK,
- OLITERAL,
- OADD,
- OSUB,
- OOR,
- OXOR,
- OADDSTR,
- OADDR,
- OANDAND,
- OBYTES2STR,
- ORUNES2STR,
- OSTR2BYTES,
- OSTR2RUNES,
- OCAP,
- OCOMPLIT,
- OMAPLIT,
- OSTRUCTLIT,
- OARRAYLIT,
- OSLICELIT,
- OPTRLIT,
- OCONV,
- OCONVIFACE,
- OCONVNOP,
- ODOT,
- OEQ,
- ONE,
- OLT,
- OLE,
- OGT,
- OGE,
- OKEY,
- OSTRUCTKEY,
- OLEN,
- OMUL,
- OLSH,
- ORSH,
- OAND,
- OANDNOT,
- ONEW,
- ONOT,
- OBITNOT,
- OPLUS,
- ONEG,
- OOROR,
- OPAREN,
- ORUNESTR,
- OREAL,
- OIMAG,
- OCOMPLEX:
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OTYPE,
+ ir.OPACK,
+ ir.OLITERAL,
+ ir.ONIL,
+ ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OADDSTR,
+ ir.OADDR,
+ ir.OANDAND,
+ ir.OBYTES2STR,
+ ir.ORUNES2STR,
+ ir.OSTR2BYTES,
+ ir.OSTR2RUNES,
+ ir.OCAP,
+ ir.OCOMPLIT,
+ ir.OMAPLIT,
+ ir.OSTRUCTLIT,
+ ir.OARRAYLIT,
+ ir.OSLICELIT,
+ ir.OPTRLIT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODOT,
+ ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OLE,
+ ir.OGT,
+ ir.OGE,
+ ir.OKEY,
+ ir.OSTRUCTKEY,
+ ir.OLEN,
+ ir.OMUL,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.ONEW,
+ ir.ONOT,
+ ir.OBITNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.ORUNESTR,
+ ir.OREAL,
+ ir.OIMAG,
+ ir.OCOMPLEX:
break
// Discardable as long as we know it's not division by zero.
- case ODIV, OMOD:
- if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
- break
- }
- if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
+ case ir.ODIV, ir.OMOD:
+ if n.Right().Op() == ir.OLITERAL && constant.Sign(n.Right().Val()) != 0 {
break
}
return false
// Discardable as long as we know it won't fail because of a bad size.
- case OMAKECHAN, OMAKEMAP:
- if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
+ case ir.OMAKECHAN, ir.OMAKEMAP:
+ if ir.IsConst(n.Left(), constant.Int) && constant.Sign(n.Left().Val()) == 0 {
break
}
return false
// Difficult to tell what sizes are okay.
- case OMAKESLICE:
+ case ir.OMAKESLICE:
return false
- case OMAKESLICECOPY:
+ case ir.OMAKESLICECOPY:
return false
}
- if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+ if !candiscard(n.Left()) || !candiscard(n.Right()) || !candiscardlist(n.Init()) || !candiscardlist(n.Body()) || !candiscardlist(n.List()) || !candiscardlist(n.Rlist()) {
return false
}
@@ -3888,67 +3891,67 @@ var wrapCall_prgen int
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *Node, init *Nodes) *Node {
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+func wrapCall(n ir.Node, init *ir.Nodes) ir.Node {
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
- isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
+ isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
// Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
if !isBuiltinCall && n.IsDDD() {
- last := n.List.Len() - 1
- if va := n.List.Index(last); va.Op == OSLICELIT {
- n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
+ last := n.List().Len() - 1
+ if va := n.List().Index(last); va.Op() == ir.OSLICELIT {
+ n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...))
n.SetIsDDD(false)
}
}
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]*Node, n.List.Len())
- t := nod(OTFUNC, nil, nil)
- for i, arg := range n.List.Slice() {
+ origArgs := make([]ir.Node, n.List().Len())
+ t := ir.Nod(ir.OTFUNC, nil, nil)
+ for i, arg := range n.List().Slice() {
s := lookupN("a", i)
- if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
+ if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.Left().Type().IsUnsafePtr() {
origArgs[i] = arg
- arg = arg.Left
- n.List.SetIndex(i, arg)
+ arg = arg.Left()
+ n.List().SetIndex(i, arg)
}
- t.List.Append(symfield(s, arg.Type))
+ t.PtrList().Append(symfield(s, arg.Type()))
}
wrapCall_prgen++
sym := lookupN("wrap·", wrapCall_prgen)
fn := dclfunc(sym, t)
- args := paramNnames(t.Type)
+ args := paramNnames(t.Type())
for i, origArg := range origArgs {
if origArg == nil {
continue
}
- arg := nod(origArg.Op, args[i], nil)
- arg.Type = origArg.Type
+ arg := ir.Nod(origArg.Op(), args[i], nil)
+ arg.SetType(origArg.Type())
args[i] = arg
}
- call := nod(n.Op, nil, nil)
+ call := ir.Nod(n.Op(), nil, nil)
if !isBuiltinCall {
- call.Op = OCALL
- call.Left = n.Left
+ call.SetOp(ir.OCALL)
+ call.SetLeft(n.Left())
call.SetIsDDD(n.IsDDD())
}
- call.List.Set(args)
- fn.Nbody.Set1(call)
+ call.PtrList().Set(args)
+ fn.PtrBody().Set1(call)
funcbody()
fn = typecheck(fn, ctxStmt)
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
xtop = append(xtop, fn)
- call = nod(OCALL, nil, nil)
- call.Left = fn.Func.Nname
- call.List.Set(n.List.Slice())
+ call = ir.Nod(ir.OCALL, nil, nil)
+ call.SetLeft(fn.Func().Nname)
+ call.PtrList().Set(n.List().Slice())
call = typecheck(call, ctxStmt)
call = walkexpr(call, init)
return call
@@ -3959,15 +3962,15 @@ func wrapCall(n *Node, init *Nodes) *Node {
// type syntax expression n.Type.
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *Node, types_ ...*types.Type) *Node {
- n := old.copy()
+func substArgTypes(old ir.Node, types_ ...*types.Type) ir.Node {
+ n := ir.Copy(old)
for _, t := range types_ {
dowidth(t)
}
- n.Type = types.SubstAny(n.Type, &types_)
+ n.SetType(types.SubstAny(n.Type(), &types_))
if len(types_) > 0 {
- Fatalf("substArgTypes: too many argument types")
+ base.Fatalf("substArgTypes: too many argument types")
}
return n
}
@@ -3989,18 +3992,18 @@ func canMergeLoads() bool {
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n *Node) bool {
- return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
+func isRuneCount(n ir.Node) bool {
+ return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES
}
-func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
- if !n.Type.IsPtr() {
- Fatalf("expected pointer type: %v", n.Type)
+func walkCheckPtrAlignment(n ir.Node, init *ir.Nodes, count ir.Node) ir.Node {
+ if !n.Type().IsPtr() {
+ base.Fatalf("expected pointer type: %v", n.Type())
}
- elem := n.Type.Elem()
+ elem := n.Type().Elem()
if count != nil {
if !elem.IsArray() {
- Fatalf("expected array type: %v", elem)
+ base.Fatalf("expected array type: %v", elem)
}
elem = elem.Elem()
}
@@ -4014,14 +4017,14 @@ func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
count = nodintconst(1)
}
- n.Left = cheapexpr(n.Left, init)
- init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[TUNSAFEPTR]), typename(elem), conv(count, types.Types[TUINTPTR])))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left(), types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR])))
return n
}
var walkCheckPtrArithmeticMarker byte
-func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
+func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node {
// Calling cheapexpr(n, init) below leads to a recursive call
// to walkexpr, which leads us back here again. Use n.Opt to
// prevent infinite loops.
@@ -4030,19 +4033,19 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
} else if opt != nil {
// We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
// there's no guarantee that temporarily replacing it is safe, so just hard fail here.
- Fatalf("unexpected Opt: %v", opt)
+ base.Fatalf("unexpected Opt: %v", opt)
}
n.SetOpt(&walkCheckPtrArithmeticMarker)
defer n.SetOpt(nil)
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
- switch n.Left.Op {
- case OCALLFUNC, OCALLMETH, OCALLINTER:
+ switch n.Left().Op() {
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return n
}
- if n.Left.Op == ODOTPTR && isReflectHeaderDataField(n.Left) {
+ if n.Left().Op() == ir.ODOTPTR && isReflectHeaderDataField(n.Left()) {
return n
}
@@ -4052,30 +4055,30 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
// "It is valid both to add and to subtract offsets from a
// pointer in this way. It is also valid to use &^ to round
// pointers, usually for alignment."
- var originals []*Node
- var walk func(n *Node)
- walk = func(n *Node) {
- switch n.Op {
- case OADD:
- walk(n.Left)
- walk(n.Right)
- case OSUB, OANDNOT:
- walk(n.Left)
- case OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- n.Left = cheapexpr(n.Left, init)
- originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OADD:
+ walk(n.Left())
+ walk(n.Right())
+ case ir.OSUB, ir.OANDNOT:
+ walk(n.Left())
+ case ir.OCONVNOP:
+ if n.Left().Type().IsUnsafePtr() {
+ n.SetLeft(cheapexpr(n.Left(), init))
+ originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR]))
}
}
}
- walk(n.Left)
+ walk(n.Left())
n = cheapexpr(n, init)
- slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals)
- slice.Esc = EscNone
+ slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice.SetEsc(EscNone)
- init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice))
+ init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[types.TUNSAFEPTR]), slice))
// TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
// the backing store for multiple calls to checkptrArithmetic.
@@ -4085,6 +4088,6 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
// checkPtr reports whether pointer checking should be enabled for
// function fn at a given level. See debugHelpFooter for defined
// levels.
-func checkPtr(fn *Node, level int) bool {
- return Debug_checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0
+func checkPtr(fn ir.Node, level int) bool {
+ return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0
}
diff --git a/src/cmd/compile/internal/gc/bitset.go b/src/cmd/compile/internal/ir/bitset.go
index ed5eea0a11..29f136296f 100644
--- a/src/cmd/compile/internal/gc/bitset.go
+++ b/src/cmd/compile/internal/ir/bitset.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ir
type bitset8 uint8
diff --git a/src/cmd/compile/internal/gc/class_string.go b/src/cmd/compile/internal/ir/class_string.go
index a4084a7535..866bf1a6b5 100644
--- a/src/cmd/compile/internal/gc/class_string.go
+++ b/src/cmd/compile/internal/ir/class_string.go
@@ -1,6 +1,6 @@
// Code generated by "stringer -type=Class"; DO NOT EDIT.
-package gc
+package ir
import "strconv"
diff --git a/src/cmd/compile/internal/gc/dump.go b/src/cmd/compile/internal/ir/dump.go
index 29eb1c1e48..fe1410969f 100644
--- a/src/cmd/compile/internal/gc/dump.go
+++ b/src/cmd/compile/internal/ir/dump.go
@@ -6,21 +6,23 @@
// for debugging purposes. The code is customized for Node graphs
// and may be used for an alternative view of the node structure.
-package gc
+package ir
import (
- "cmd/compile/internal/types"
- "cmd/internal/src"
"fmt"
"io"
"os"
"reflect"
"regexp"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
)
// dump is like fdump but prints to stderr.
-func dump(root interface{}, filter string, depth int) {
- fdump(os.Stderr, root, filter, depth)
+func DumpAny(root interface{}, filter string, depth int) {
+ FDumpAny(os.Stderr, root, filter, depth)
}
// fdump prints the structure of a rooted data structure
@@ -40,7 +42,7 @@ func dump(root interface{}, filter string, depth int) {
// rather than their type; struct fields with zero values or
// non-matching field names are omitted, and "…" means recursion
// depth has been reached or struct fields have been omitted.
-func fdump(w io.Writer, root interface{}, filter string, depth int) {
+func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
if root == nil {
fmt.Fprintln(w, "nil")
return
@@ -146,11 +148,8 @@ func (p *dumper) dump(x reflect.Value, depth int) {
x = reflect.ValueOf(v.Slice())
case src.XPos:
- p.printf("%s", linestr(v))
+ p.printf("%s", base.FmtPos(v))
return
-
- case *types.Node:
- x = reflect.ValueOf(asNode(v))
}
switch x.Kind() {
@@ -201,9 +200,9 @@ func (p *dumper) dump(x reflect.Value, depth int) {
typ := x.Type()
isNode := false
- if n, ok := x.Interface().(Node); ok {
+ if n, ok := x.Interface().(node); ok {
isNode = true
- p.printf("%s %s {", n.Op.String(), p.addr(x))
+ p.printf("%s %s {", n.op.String(), p.addr(x))
} else {
p.printf("%s {", typ)
}
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index f92f5d0e88..f394219c05 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -2,18 +2,21 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ir
import (
"bytes"
- "cmd/compile/internal/types"
- "cmd/internal/src"
"fmt"
+ "go/constant"
"io"
"strconv"
"strings"
"sync"
"unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
)
// A FmtFlag value is a set of flags (or 0).
@@ -46,7 +49,7 @@ func fmtFlag(s fmt.State, verb rune) FmtFlag {
flag |= FmtSign
}
if s.Flag(' ') {
- Fatalf("FmtUnsigned in format string")
+ base.Fatalf("FmtUnsigned in format string")
}
if _, ok := s.Precision(); ok {
flag |= FmtComma
@@ -96,7 +99,7 @@ func fmtFlag(s fmt.State, verb rune) FmtFlag {
// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode
const (
- FErr fmtMode = iota
+ FErr FmtMode = iota
FDbg
FTypeId
FTypeIdName // same as FTypeId, but use package name instead of prefix
@@ -129,7 +132,7 @@ const (
// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
// update returns the results of applying f to mode.
-func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) {
+func (f FmtFlag) update(mode FmtMode) (FmtFlag, FmtMode) {
switch {
case f&FmtSign != 0:
mode = FDbg
@@ -145,7 +148,7 @@ func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) {
return f, mode
}
-var goopnames = []string{
+var OpNames = []string{
OADDR: "&",
OADD: "+",
OADDSTR: "+",
@@ -215,7 +218,7 @@ func (o Op) GoString() string {
return fmt.Sprintf("%#v", o)
}
-func (o Op) format(s fmt.State, verb rune, mode fmtMode) {
+func (o Op) format(s fmt.State, verb rune, mode FmtMode) {
switch verb {
case 'v':
o.oconv(s, fmtFlag(s, verb), mode)
@@ -225,10 +228,10 @@ func (o Op) format(s fmt.State, verb rune, mode fmtMode) {
}
}
-func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) {
+func (o Op) oconv(s fmt.State, flag FmtFlag, mode FmtMode) {
if flag&FmtSharp != 0 || mode != FDbg {
- if int(o) < len(goopnames) && goopnames[o] != "" {
- fmt.Fprint(s, goopnames[o])
+ if int(o) < len(OpNames) && OpNames[o] != "" {
+ fmt.Fprint(s, OpNames[o])
return
}
}
@@ -237,214 +240,141 @@ func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) {
fmt.Fprint(s, o.String())
}
-type (
- fmtMode int
-
- fmtNodeErr Node
- fmtNodeDbg Node
- fmtNodeTypeId Node
- fmtNodeTypeIdName Node
-
- fmtOpErr Op
- fmtOpDbg Op
- fmtOpTypeId Op
- fmtOpTypeIdName Op
-
- fmtTypeErr types.Type
- fmtTypeDbg types.Type
- fmtTypeTypeId types.Type
- fmtTypeTypeIdName types.Type
-
- fmtSymErr types.Sym
- fmtSymDbg types.Sym
- fmtSymTypeId types.Sym
- fmtSymTypeIdName types.Sym
-
- fmtNodesErr Nodes
- fmtNodesDbg Nodes
- fmtNodesTypeId Nodes
- fmtNodesTypeIdName Nodes
-)
+type FmtMode int
-func (n *fmtNodeErr) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FErr) }
-func (n *fmtNodeDbg) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FDbg) }
-func (n *fmtNodeTypeId) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeId) }
-func (n *fmtNodeTypeIdName) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeIdName) }
-func (n *Node) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) }
-
-func (o fmtOpErr) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FErr) }
-func (o fmtOpDbg) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FDbg) }
-func (o fmtOpTypeId) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeId) }
-func (o fmtOpTypeIdName) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeIdName) }
-func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) }
-
-func (t *fmtTypeErr) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FErr) }
-func (t *fmtTypeDbg) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FDbg) }
-func (t *fmtTypeTypeId) Format(s fmt.State, verb rune) {
- typeFormat((*types.Type)(t), s, verb, FTypeId)
+type fmtNode struct {
+ x Node
+ m FmtMode
}
-func (t *fmtTypeTypeIdName) Format(s fmt.State, verb rune) {
- typeFormat((*types.Type)(t), s, verb, FTypeIdName)
+
+func (f *fmtNode) Format(s fmt.State, verb rune) { nodeFormat(f.x, s, verb, f.m) }
+
+type fmtOp struct {
+ x Op
+ m FmtMode
}
-// func (t *types.Type) Format(s fmt.State, verb rune) // in package types
+func (f *fmtOp) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) }
-func (y *fmtSymErr) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FErr) }
-func (y *fmtSymDbg) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FDbg) }
-func (y *fmtSymTypeId) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FTypeId) }
-func (y *fmtSymTypeIdName) Format(s fmt.State, verb rune) {
- symFormat((*types.Sym)(y), s, verb, FTypeIdName)
+type fmtType struct {
+ x *types.Type
+ m FmtMode
}
-// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) }
+func (f *fmtType) Format(s fmt.State, verb rune) { typeFormat(f.x, s, verb, f.m) }
-func (n fmtNodesErr) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FErr) }
-func (n fmtNodesDbg) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FDbg) }
-func (n fmtNodesTypeId) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeId) }
-func (n fmtNodesTypeIdName) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeIdName) }
-func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) }
+type fmtSym struct {
+ x *types.Sym
+ m FmtMode
+}
+
+func (f *fmtSym) Format(s fmt.State, verb rune) { symFormat(f.x, s, verb, f.m) }
-func (m fmtMode) Fprintf(s fmt.State, format string, args ...interface{}) {
+type fmtNodes struct {
+ x Nodes
+ m FmtMode
+}
+
+func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) }
+
+func (n *node) Format(s fmt.State, verb rune) {
+ FmtNode(n, s, verb)
+}
+
+func FmtNode(n Node, s fmt.State, verb rune) {
+ nodeFormat(n, s, verb, FErr)
+}
+
+func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) }
+
+// func (t *types.Type) Format(s fmt.State, verb rune) // in package types
+// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) }
+func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) }
+
+func (m FmtMode) Fprintf(s fmt.State, format string, args ...interface{}) {
m.prepareArgs(args)
fmt.Fprintf(s, format, args...)
}
-func (m fmtMode) Sprintf(format string, args ...interface{}) string {
+func (m FmtMode) Sprintf(format string, args ...interface{}) string {
m.prepareArgs(args)
return fmt.Sprintf(format, args...)
}
-func (m fmtMode) Sprint(args ...interface{}) string {
+func (m FmtMode) Sprint(args ...interface{}) string {
m.prepareArgs(args)
return fmt.Sprint(args...)
}
-func (m fmtMode) prepareArgs(args []interface{}) {
- switch m {
- case FErr:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpErr(arg)
- case *Node:
- args[i] = (*fmtNodeErr)(arg)
- case *types.Type:
- args[i] = (*fmtTypeErr)(arg)
- case *types.Sym:
- args[i] = (*fmtSymErr)(arg)
- case Nodes:
- args[i] = fmtNodesErr(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
- }
- case FDbg:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpDbg(arg)
- case *Node:
- args[i] = (*fmtNodeDbg)(arg)
- case *types.Type:
- args[i] = (*fmtTypeDbg)(arg)
- case *types.Sym:
- args[i] = (*fmtSymDbg)(arg)
- case Nodes:
- args[i] = fmtNodesDbg(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
- }
- case FTypeId:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpTypeId(arg)
- case *Node:
- args[i] = (*fmtNodeTypeId)(arg)
- case *types.Type:
- args[i] = (*fmtTypeTypeId)(arg)
- case *types.Sym:
- args[i] = (*fmtSymTypeId)(arg)
- case Nodes:
- args[i] = fmtNodesTypeId(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
- }
- case FTypeIdName:
- for i, arg := range args {
- switch arg := arg.(type) {
- case Op:
- args[i] = fmtOpTypeIdName(arg)
- case *Node:
- args[i] = (*fmtNodeTypeIdName)(arg)
- case *types.Type:
- args[i] = (*fmtTypeTypeIdName)(arg)
- case *types.Sym:
- args[i] = (*fmtSymTypeIdName)(arg)
- case Nodes:
- args[i] = fmtNodesTypeIdName(arg)
- case Val, int32, int64, string, types.EType:
- // OK: printing these types doesn't depend on mode
- default:
- Fatalf("mode.prepareArgs type %T", arg)
- }
+func (m FmtMode) prepareArgs(args []interface{}) {
+ for i, arg := range args {
+ switch arg := arg.(type) {
+ case Op:
+ args[i] = &fmtOp{arg, m}
+ case Node:
+ args[i] = &fmtNode{arg, m}
+ case nil:
+ args[i] = &fmtNode{nil, m} // assume this was a node interface
+ case *types.Type:
+ args[i] = &fmtType{arg, m}
+ case *types.Sym:
+ args[i] = &fmtSym{arg, m}
+ case Nodes:
+ args[i] = &fmtNodes{arg, m}
+ case int32, int64, string, types.EType, constant.Value:
+ // OK: printing these types doesn't depend on mode
+ default:
+ base.Fatalf("mode.prepareArgs type %T", arg)
}
- default:
- Fatalf("mode.prepareArgs mode %d", m)
}
}
-func (n *Node) format(s fmt.State, verb rune, mode fmtMode) {
+func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) {
switch verb {
case 'v', 'S', 'L':
- n.nconv(s, fmtFlag(s, verb), mode)
+ nconvFmt(n, s, fmtFlag(s, verb), mode)
case 'j':
- n.jconv(s, fmtFlag(s, verb))
+ jconvFmt(n, s, fmtFlag(s, verb))
default:
fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
}
}
+// EscFmt is set by the escape analysis code to add escape analysis details to the node print.
+var EscFmt func(n Node, short bool) string
+
// *Node details
-func (n *Node) jconv(s fmt.State, flag FmtFlag) {
- c := flag & FmtShort
+func jconvFmt(n Node, s fmt.State, flag FmtFlag) {
+ short := flag&FmtShort != 0
- // Useful to see which nodes in a Node Dump/dumplist are actually identical
- if Debug_dumpptrs != 0 {
+ // Useful to see which nodes in an AST printout are actually identical
+ if base.Debug.DumpPtrs != 0 {
fmt.Fprintf(s, " p(%p)", n)
}
- if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
- fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
+ if !short && n.Name() != nil && n.Name().Vargen != 0 {
+ fmt.Fprintf(s, " g(%d)", n.Name().Vargen)
}
- if Debug_dumpptrs != 0 && c == 0 && n.Name != nil && n.Name.Defn != nil {
+ if base.Debug.DumpPtrs != 0 && !short && n.Name() != nil && n.Name().Defn != nil {
// Useful to see where Defn is set and what node it points to
- fmt.Fprintf(s, " defn(%p)", n.Name.Defn)
+ fmt.Fprintf(s, " defn(%p)", n.Name().Defn)
}
- if n.Pos.IsKnown() {
+ if n.Pos().IsKnown() {
pfx := ""
- switch n.Pos.IsStmt() {
+ switch n.Pos().IsStmt() {
case src.PosNotStmt:
pfx = "_" // "-" would be confusing
case src.PosIsStmt:
pfx = "+"
}
- fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line())
+ fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos().Line())
}
- if c == 0 && n.Xoffset != BADWIDTH {
- fmt.Fprintf(s, " x(%d)", n.Xoffset)
+ if !short && n.Offset() != types.BADWIDTH {
+ fmt.Fprintf(s, " x(%d)", n.Offset())
}
if n.Class() != 0 {
@@ -455,30 +385,13 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
fmt.Fprintf(s, " colas(%v)", n.Colas())
}
- switch n.Esc {
- case EscUnknown:
- break
-
- case EscHeap:
- fmt.Fprint(s, " esc(h)")
-
- case EscNone:
- fmt.Fprint(s, " esc(no)")
-
- case EscNever:
- if c == 0 {
- fmt.Fprint(s, " esc(N)")
+ if EscFmt != nil {
+ if esc := EscFmt(n, short); esc != "" {
+ fmt.Fprintf(s, " %s", esc)
}
-
- default:
- fmt.Fprintf(s, " esc(%d)", n.Esc)
- }
-
- if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
- fmt.Fprintf(s, " ld(%d)", e.loopDepth)
}
- if c == 0 && n.Typecheck() != 0 {
+ if !short && n.Typecheck() != 0 {
fmt.Fprintf(s, " tc(%d)", n.Typecheck())
}
@@ -494,20 +407,20 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
fmt.Fprintf(s, " embedded")
}
- if n.Op == ONAME {
- if n.Name.Addrtaken() {
+ if n.Op() == ONAME {
+ if n.Name().Addrtaken() {
fmt.Fprint(s, " addrtaken")
}
- if n.Name.Assigned() {
+ if n.Name().Assigned() {
fmt.Fprint(s, " assigned")
}
- if n.Name.IsClosureVar() {
+ if n.Name().IsClosureVar() {
fmt.Fprint(s, " closurevar")
}
- if n.Name.Captured() {
+ if n.Name().Captured() {
fmt.Fprint(s, " captured")
}
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Name().IsOutputParamHeapAddr() {
fmt.Fprint(s, " outputparamheapaddr")
}
}
@@ -518,79 +431,46 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
fmt.Fprint(s, " nonnil")
}
- if c == 0 && n.HasCall() {
+ if !short && n.HasCall() {
fmt.Fprint(s, " hascall")
}
- if c == 0 && n.Name != nil && n.Name.Used() {
+ if !short && n.Name() != nil && n.Name().Used() {
fmt.Fprint(s, " used")
}
}
-func (v Val) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- v.vconv(s, fmtFlag(s, verb))
+func FmtConst(v constant.Value, flag FmtFlag) string {
+ if flag&FmtSharp == 0 && v.Kind() == constant.Complex {
+ real, imag := constant.Real(v), constant.Imag(v)
- default:
- fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v)
- }
-}
-
-func (v Val) vconv(s fmt.State, flag FmtFlag) {
- switch u := v.U.(type) {
- case *Mpint:
- if !u.Rune {
- if flag&FmtSharp != 0 {
- fmt.Fprint(s, u.String())
- return
- }
- fmt.Fprint(s, u.GoString())
- return
+ var re string
+ sre := constant.Sign(real)
+ if sre != 0 {
+ re = real.String()
}
- switch x := u.Int64(); {
- case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
- fmt.Fprintf(s, "'%c'", int(x))
-
- case 0 <= x && x < 1<<16:
- fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
-
- case 0 <= x && x <= utf8.MaxRune:
- fmt.Fprintf(s, "'\\U%08x'", uint64(x))
-
- default:
- fmt.Fprintf(s, "('\\x00' + %v)", u)
+ var im string
+ sim := constant.Sign(imag)
+ if sim != 0 {
+ im = imag.String()
}
- case *Mpflt:
- if flag&FmtSharp != 0 {
- fmt.Fprint(s, u.String())
- return
- }
- fmt.Fprint(s, u.GoString())
- return
-
- case *Mpcplx:
- if flag&FmtSharp != 0 {
- fmt.Fprint(s, u.String())
- return
+ switch {
+ case sre == 0 && sim == 0:
+ return "0"
+ case sre == 0:
+ return im + "i"
+ case sim == 0:
+ return re
+ case sim < 0:
+ return fmt.Sprintf("(%s%si)", re, im)
+ default:
+ return fmt.Sprintf("(%s+%si)", re, im)
}
- fmt.Fprint(s, u.GoString())
- return
-
- case string:
- fmt.Fprint(s, strconv.Quote(u))
-
- case bool:
- fmt.Fprint(s, u)
-
- case *NilVal:
- fmt.Fprint(s, "nil")
-
- default:
- fmt.Fprintf(s, "<ctype=%d>", v.Ctype())
}
+
+ return v.String()
}
/*
@@ -603,17 +483,17 @@ s%^ ........*\]%&~%g
s%~ %%g
*/
-func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
+func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) {
if flag&FmtShort == 0 {
switch mode {
case FErr: // This is for the user
- if s.Pkg == builtinpkg || s.Pkg == localpkg {
+ if s.Pkg == BuiltinPkg || s.Pkg == LocalPkg {
b.WriteString(s.Name)
return
}
// If the name was used by multiple packages, display the full path,
- if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
+ if s.Pkg.Name != "" && NumImport[s.Pkg.Name] > 1 {
fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name)
return
}
@@ -664,28 +544,28 @@ func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
b.WriteString(s.Name)
}
-var basicnames = []string{
- TINT: "int",
- TUINT: "uint",
- TINT8: "int8",
- TUINT8: "uint8",
- TINT16: "int16",
- TUINT16: "uint16",
- TINT32: "int32",
- TUINT32: "uint32",
- TINT64: "int64",
- TUINT64: "uint64",
- TUINTPTR: "uintptr",
- TFLOAT32: "float32",
- TFLOAT64: "float64",
- TCOMPLEX64: "complex64",
- TCOMPLEX128: "complex128",
- TBOOL: "bool",
- TANY: "any",
- TSTRING: "string",
- TNIL: "nil",
- TIDEAL: "untyped number",
- TBLANK: "blank",
+var BasicTypeNames = []string{
+ types.TINT: "int",
+ types.TUINT: "uint",
+ types.TINT8: "int8",
+ types.TUINT8: "uint8",
+ types.TINT16: "int16",
+ types.TUINT16: "uint16",
+ types.TINT32: "int32",
+ types.TUINT32: "uint32",
+ types.TINT64: "int64",
+ types.TUINT64: "uint64",
+ types.TUINTPTR: "uintptr",
+ types.TFLOAT32: "float32",
+ types.TFLOAT64: "float64",
+ types.TCOMPLEX64: "complex64",
+ types.TCOMPLEX128: "complex128",
+ types.TBOOL: "bool",
+ types.TANY: "any",
+ types.TSTRING: "string",
+ types.TNIL: "nil",
+ types.TIDEAL: "untyped number",
+ types.TBLANK: "blank",
}
var fmtBufferPool = sync.Pool{
@@ -694,7 +574,7 @@ var fmtBufferPool = sync.Pool{
},
}
-func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string {
+func tconv(t *types.Type, flag FmtFlag, mode FmtMode) string {
buf := fmtBufferPool.Get().(*bytes.Buffer)
buf.Reset()
defer fmtBufferPool.Put(buf)
@@ -707,7 +587,7 @@ func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string {
// flag and mode control exactly what is printed.
// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
// See #16897 before changing the implementation of tconv.
-func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited map[*types.Type]int) {
+func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode FmtMode, visited map[*types.Type]int) {
if off, ok := visited[t]; ok {
// We've seen this type before, so we're trying to print it recursively.
// Print a reference to it instead.
@@ -778,7 +658,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
return
}
- if t.Sym.Pkg == localpkg && t.Vargen != 0 {
+ if t.Sym.Pkg == LocalPkg && t.Vargen != 0 {
b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen))
return
}
@@ -788,7 +668,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
return
}
- if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
+ if int(t.Etype) < len(BasicTypeNames) && BasicTypeNames[t.Etype] != "" {
var name string
switch t {
case types.UntypedBool:
@@ -804,7 +684,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
case types.UntypedComplex:
name = "untyped complex"
default:
- name = basicnames[t.Etype]
+ name = BasicTypeNames[t.Etype]
}
b.WriteString(name)
return
@@ -831,7 +711,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
defer delete(visited, t)
switch t.Etype {
- case TPTR:
+ case types.TPTR:
b.WriteByte('*')
switch mode {
case FTypeId, FTypeIdName:
@@ -842,17 +722,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
}
tconv2(b, t.Elem(), 0, mode, visited)
- case TARRAY:
+ case types.TARRAY:
b.WriteByte('[')
b.WriteString(strconv.FormatInt(t.NumElem(), 10))
b.WriteByte(']')
tconv2(b, t.Elem(), 0, mode, visited)
- case TSLICE:
+ case types.TSLICE:
b.WriteString("[]")
tconv2(b, t.Elem(), 0, mode, visited)
- case TCHAN:
+ case types.TCHAN:
switch t.ChanDir() {
case types.Crecv:
b.WriteString("<-chan ")
@@ -871,13 +751,13 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
}
}
- case TMAP:
+ case types.TMAP:
b.WriteString("map[")
tconv2(b, t.Key(), 0, mode, visited)
b.WriteByte(']')
tconv2(b, t.Elem(), 0, mode, visited)
- case TINTER:
+ case types.TINTER:
if t.IsEmptyInterface() {
b.WriteString("interface {}")
break
@@ -909,7 +789,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
}
b.WriteByte('}')
- case TFUNC:
+ case types.TFUNC:
if flag&FmtShort != 0 {
// no leading func
} else {
@@ -935,7 +815,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
tconv2(b, t.Results(), 0, mode, visited)
}
- case TSTRUCT:
+ case types.TSTRUCT:
if m := t.StructType().Map; m != nil {
mt := m.MapType()
// Format the bucket struct for map[x]y as map.bucket[x]y.
@@ -948,7 +828,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
case mt.Hiter:
b.WriteString("map.iter[")
default:
- Fatalf("unknown internal map type")
+ base.Fatalf("unknown internal map type")
}
tconv2(b, m.Key(), 0, mode, visited)
b.WriteByte(']')
@@ -986,17 +866,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
b.WriteByte('}')
}
- case TFORW:
+ case types.TFORW:
b.WriteString("undefined")
if t.Sym != nil {
b.WriteByte(' ')
sconv2(b, t.Sym, 0, mode)
}
- case TUNSAFEPTR:
+ case types.TUNSAFEPTR:
b.WriteString("unsafe.Pointer")
- case Txxx:
+ case types.Txxx:
b.WriteString("Txxx")
default:
// Don't know how to handle - fall back to detailed prints.
@@ -1005,7 +885,7 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
}
// Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op Op) bool {
+func StmtWithInit(op Op) bool {
switch op {
case OIF, OFOR, OFORUNTIL, OSWITCH:
return true
@@ -1014,38 +894,38 @@ func stmtwithinit(op Op) bool {
return false
}
-func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
+func stmtFmt(n Node, s fmt.State, mode FmtMode) {
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
// and inlining. If it doesn't fit the syntax, emit an enclosing
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
- simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
+ simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op())
// otherwise, print the inits as separate statements
- complexinit := n.Ninit.Len() != 0 && !simpleinit && (mode != FErr)
+ complexinit := n.Init().Len() != 0 && !simpleinit && (mode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
- extrablock := complexinit && stmtwithinit(n.Op)
+ extrablock := complexinit && StmtWithInit(n.Op())
if extrablock {
fmt.Fprint(s, "{")
}
if complexinit {
- mode.Fprintf(s, " %v; ", n.Ninit)
+ mode.Fprintf(s, " %v; ", n.Init())
}
- switch n.Op {
+ switch n.Op() {
case ODCL:
- mode.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
+ mode.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type())
case ODCLFIELD:
- if n.Sym != nil {
- mode.Fprintf(s, "%v %v", n.Sym, n.Left)
+ if n.Sym() != nil {
+ mode.Fprintf(s, "%v %v", n.Sym(), n.Left())
} else {
- mode.Fprintf(s, "%v", n.Left)
+ mode.Fprintf(s, "%v", n.Left())
}
// Don't export "v = <N>" initializing statements, hope they're always
@@ -1053,61 +933,61 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
// the "v = <N>" again.
case OAS:
if n.Colas() && !complexinit {
- mode.Fprintf(s, "%v := %v", n.Left, n.Right)
+ mode.Fprintf(s, "%v := %v", n.Left(), n.Right())
} else {
- mode.Fprintf(s, "%v = %v", n.Left, n.Right)
+ mode.Fprintf(s, "%v = %v", n.Left(), n.Right())
}
case OASOP:
if n.Implicit() {
if n.SubOp() == OADD {
- mode.Fprintf(s, "%v++", n.Left)
+ mode.Fprintf(s, "%v++", n.Left())
} else {
- mode.Fprintf(s, "%v--", n.Left)
+ mode.Fprintf(s, "%v--", n.Left())
}
break
}
- mode.Fprintf(s, "%v %#v= %v", n.Left, n.SubOp(), n.Right)
+ mode.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right())
case OAS2:
if n.Colas() && !complexinit {
- mode.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
+ mode.Fprintf(s, "%.v := %.v", n.List(), n.Rlist())
break
}
fallthrough
case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- mode.Fprintf(s, "%.v = %v", n.List, n.Right)
+ mode.Fprintf(s, "%.v = %v", n.List(), n.Right())
case ORETURN:
- mode.Fprintf(s, "return %.v", n.List)
+ mode.Fprintf(s, "return %.v", n.List())
case ORETJMP:
- mode.Fprintf(s, "retjmp %v", n.Sym)
+ mode.Fprintf(s, "retjmp %v", n.Sym())
case OINLMARK:
- mode.Fprintf(s, "inlmark %d", n.Xoffset)
+ mode.Fprintf(s, "inlmark %d", n.Offset())
case OGO:
- mode.Fprintf(s, "go %v", n.Left)
+ mode.Fprintf(s, "go %v", n.Left())
case ODEFER:
- mode.Fprintf(s, "defer %v", n.Left)
+ mode.Fprintf(s, "defer %v", n.Left())
case OIF:
if simpleinit {
- mode.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
+ mode.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body())
} else {
- mode.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
+ mode.Fprintf(s, "if %v { %v }", n.Left(), n.Body())
}
- if n.Rlist.Len() != 0 {
- mode.Fprintf(s, " else { %v }", n.Rlist)
+ if n.Rlist().Len() != 0 {
+ mode.Fprintf(s, " else { %v }", n.Rlist())
}
case OFOR, OFORUNTIL:
opname := "for"
- if n.Op == OFORUNTIL {
+ if n.Op() == OFORUNTIL {
opname = "foruntil"
}
if mode == FErr { // TODO maybe only if FmtShort, same below
@@ -1117,26 +997,26 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
fmt.Fprint(s, opname)
if simpleinit {
- mode.Fprintf(s, " %v;", n.Ninit.First())
- } else if n.Right != nil {
+ mode.Fprintf(s, " %v;", n.Init().First())
+ } else if n.Right() != nil {
fmt.Fprint(s, " ;")
}
- if n.Left != nil {
- mode.Fprintf(s, " %v", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, " %v", n.Left())
}
- if n.Right != nil {
- mode.Fprintf(s, "; %v", n.Right)
+ if n.Right() != nil {
+ mode.Fprintf(s, "; %v", n.Right())
} else if simpleinit {
fmt.Fprint(s, ";")
}
- if n.Op == OFORUNTIL && n.List.Len() != 0 {
- mode.Fprintf(s, "; %v", n.List)
+ if n.Op() == OFORUNTIL && n.List().Len() != 0 {
+ mode.Fprintf(s, "; %v", n.List())
}
- mode.Fprintf(s, " { %v }", n.Nbody)
+ mode.Fprintf(s, " { %v }", n.Body())
case ORANGE:
if mode == FErr {
@@ -1144,49 +1024,49 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
break
}
- if n.List.Len() == 0 {
- mode.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
+ if n.List().Len() == 0 {
+ mode.Fprintf(s, "for range %v { %v }", n.Right(), n.Body())
break
}
- mode.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
+ mode.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body())
case OSELECT, OSWITCH:
if mode == FErr {
- mode.Fprintf(s, "%v statement", n.Op)
+ mode.Fprintf(s, "%v statement", n.Op())
break
}
- mode.Fprintf(s, "%#v", n.Op)
+ mode.Fprintf(s, "%#v", n.Op())
if simpleinit {
- mode.Fprintf(s, " %v;", n.Ninit.First())
+ mode.Fprintf(s, " %v;", n.Init().First())
}
- if n.Left != nil {
- mode.Fprintf(s, " %v ", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, " %v ", n.Left())
}
- mode.Fprintf(s, " { %v }", n.List)
+ mode.Fprintf(s, " { %v }", n.List())
case OCASE:
- if n.List.Len() != 0 {
- mode.Fprintf(s, "case %.v", n.List)
+ if n.List().Len() != 0 {
+ mode.Fprintf(s, "case %.v", n.List())
} else {
fmt.Fprint(s, "default")
}
- mode.Fprintf(s, ": %v", n.Nbody)
+ mode.Fprintf(s, ": %v", n.Body())
case OBREAK, OCONTINUE, OGOTO, OFALL:
- if n.Sym != nil {
- mode.Fprintf(s, "%#v %v", n.Op, n.Sym)
+ if n.Sym() != nil {
+ mode.Fprintf(s, "%#v %v", n.Op(), n.Sym())
} else {
- mode.Fprintf(s, "%#v", n.Op)
+ mode.Fprintf(s, "%#v", n.Op())
}
case OEMPTY:
break
case OLABEL:
- mode.Fprintf(s, "%v: ", n.Sym)
+ mode.Fprintf(s, "%v: ", n.Sym())
}
if extrablock {
@@ -1194,7 +1074,7 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
}
}
-var opprec = []int{
+var OpPrec = []int{
OALIGNOF: 8,
OAPPEND: 8,
OBYTES2STR: 8,
@@ -1221,6 +1101,7 @@ var opprec = []int{
OMAPLIT: 8,
ONAME: 8,
ONEW: 8,
+ ONIL: 8,
ONONAME: 8,
OOFFSETOF: 8,
OPACK: 8,
@@ -1313,9 +1194,9 @@ var opprec = []int{
OEND: 0,
}
-func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
- for n != nil && n.Implicit() && (n.Op == ODEREF || n.Op == OADDR) {
- n = n.Left
+func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) {
+ for n != nil && n.Implicit() && (n.Op() == ODEREF || n.Op() == OADDR) {
+ n = n.Left()
}
if n == nil {
@@ -1323,8 +1204,8 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
return
}
- nprec := opprec[n.Op]
- if n.Op == OTYPE && n.Sym != nil {
+ nprec := OpPrec[n.Op()]
+ if n.Op() == OTYPE && n.Sym() != nil {
nprec = 8
}
@@ -1333,80 +1214,102 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
return
}
- switch n.Op {
+ switch n.Op() {
case OPAREN:
- mode.Fprintf(s, "(%v)", n.Left)
+ mode.Fprintf(s, "(%v)", n.Left())
+
+ case ONIL:
+ fmt.Fprint(s, "nil")
case OLITERAL: // this is a bit of a mess
if mode == FErr {
- if n.Orig != nil && n.Orig != n {
- n.Orig.exprfmt(s, prec, mode)
+ if n.Orig() != nil && n.Orig() != n {
+ exprFmt(n.Orig(), s, prec, mode)
return
}
- if n.Sym != nil {
- fmt.Fprint(s, smodeString(n.Sym, mode))
+ if n.Sym() != nil {
+ fmt.Fprint(s, smodeString(n.Sym(), mode))
return
}
}
- if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
- n.Orig.exprfmt(s, prec, mode)
- return
- }
- if n.Type != nil && !n.Type.IsUntyped() {
+
+ needUnparen := false
+ if n.Type() != nil && !n.Type().IsUntyped() {
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
- if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
- mode.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
- return
+ if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) {
+ mode.Fprintf(s, "(%v)(", n.Type())
} else {
- mode.Fprintf(s, "%v(%v)", n.Type, n.Val())
- return
+ mode.Fprintf(s, "%v(", n.Type())
+ }
+ needUnparen = true
+ }
+
+ if n.Type() == types.UntypedRune {
+ switch x, ok := constant.Int64Val(n.Val()); {
+ case !ok:
+ fallthrough
+ default:
+ fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
+
+ case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
+ fmt.Fprintf(s, "'%c'", int(x))
+
+ case 0 <= x && x < 1<<16:
+ fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
+
+ case 0 <= x && x <= utf8.MaxRune:
+ fmt.Fprintf(s, "'\\U%08x'", uint64(x))
}
+ } else {
+ fmt.Fprint(s, FmtConst(n.Val(), fmtFlag(s, 'v')))
}
- mode.Fprintf(s, "%v", n.Val())
+ if needUnparen {
+ mode.Fprintf(s, ")")
+ }
- // Special case: name used as local variable in export.
- // _ becomes ~b%d internally; print as _ for export
case ONAME:
- if mode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+ // Special case: name used as local variable in export.
+ // _ becomes ~b%d internally; print as _ for export
+ if mode == FErr && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' {
fmt.Fprint(s, "_")
return
}
fallthrough
- case OPACK, ONONAME:
- fmt.Fprint(s, smodeString(n.Sym, mode))
+ case OPACK, ONONAME, OMETHEXPR:
+ fmt.Fprint(s, smodeString(n.Sym(), mode))
case OTYPE:
- if n.Type == nil && n.Sym != nil {
- fmt.Fprint(s, smodeString(n.Sym, mode))
+ if n.Type() == nil && n.Sym() != nil {
+ fmt.Fprint(s, smodeString(n.Sym(), mode))
return
}
- mode.Fprintf(s, "%v", n.Type)
+ mode.Fprintf(s, "%v", n.Type())
case OTARRAY:
- if n.Left != nil {
- mode.Fprintf(s, "[%v]%v", n.Left, n.Right)
+ if n.Left() != nil {
+ mode.Fprintf(s, "[%v]%v", n.Left(), n.Right())
return
}
- mode.Fprintf(s, "[]%v", n.Right) // happens before typecheck
+ mode.Fprintf(s, "[]%v", n.Right()) // happens before typecheck
case OTMAP:
- mode.Fprintf(s, "map[%v]%v", n.Left, n.Right)
+ mode.Fprintf(s, "map[%v]%v", n.Left(), n.Right())
case OTCHAN:
switch n.TChanDir() {
case types.Crecv:
- mode.Fprintf(s, "<-chan %v", n.Left)
+ mode.Fprintf(s, "<-chan %v", n.Left())
case types.Csend:
- mode.Fprintf(s, "chan<- %v", n.Left)
+ mode.Fprintf(s, "chan<- %v", n.Left())
default:
- if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.TChanDir() == types.Crecv {
- mode.Fprintf(s, "chan (%v)", n.Left)
+ if n.Left() != nil && n.Left().Op() == OTCHAN && n.Left().Sym() == nil && n.Left().TChanDir() == types.Crecv {
+ mode.Fprintf(s, "chan (%v)", n.Left())
} else {
- mode.Fprintf(s, "chan %v", n.Left)
+ mode.Fprintf(s, "chan %v", n.Left())
}
}
@@ -1424,11 +1327,11 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
fmt.Fprint(s, "func literal")
return
}
- if n.Nbody.Len() != 0 {
- mode.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
+ if n.Body().Len() != 0 {
+ mode.Fprintf(s, "%v { %v }", n.Type(), n.Body())
return
}
- mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
+ mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Decl.Body())
case OCOMPLIT:
if mode == FErr {
@@ -1436,103 +1339,103 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
mode.Fprintf(s, "... argument")
return
}
- if n.Right != nil {
- mode.Fprintf(s, "%v{%s}", n.Right, ellipsisIf(n.List.Len() != 0))
+ if n.Right() != nil {
+ mode.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0))
return
}
fmt.Fprint(s, "composite literal")
return
}
- mode.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
+ mode.Fprintf(s, "(%v{ %.v })", n.Right(), n.List())
case OPTRLIT:
- mode.Fprintf(s, "&%v", n.Left)
+ mode.Fprintf(s, "&%v", n.Left())
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
if mode == FErr {
- mode.Fprintf(s, "%v{%s}", n.Type, ellipsisIf(n.List.Len() != 0))
+ mode.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0))
return
}
- mode.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
+ mode.Fprintf(s, "(%v{ %.v })", n.Type(), n.List())
case OKEY:
- if n.Left != nil && n.Right != nil {
- mode.Fprintf(s, "%v:%v", n.Left, n.Right)
+ if n.Left() != nil && n.Right() != nil {
+ mode.Fprintf(s, "%v:%v", n.Left(), n.Right())
return
}
- if n.Left == nil && n.Right != nil {
- mode.Fprintf(s, ":%v", n.Right)
+ if n.Left() == nil && n.Right() != nil {
+ mode.Fprintf(s, ":%v", n.Right())
return
}
- if n.Left != nil && n.Right == nil {
- mode.Fprintf(s, "%v:", n.Left)
+ if n.Left() != nil && n.Right() == nil {
+ mode.Fprintf(s, "%v:", n.Left())
return
}
fmt.Fprint(s, ":")
case OSTRUCTKEY:
- mode.Fprintf(s, "%v:%v", n.Sym, n.Left)
+ mode.Fprintf(s, "%v:%v", n.Sym(), n.Left())
case OCALLPART:
- n.Left.exprfmt(s, nprec, mode)
- if n.Right == nil || n.Right.Sym == nil {
+ exprFmt(n.Left(), s, nprec, mode)
+ if n.Right() == nil || n.Right().Sym() == nil {
fmt.Fprint(s, ".<nil>")
return
}
- mode.Fprintf(s, ".%0S", n.Right.Sym)
+ mode.Fprintf(s, ".%0S", n.Right().Sym())
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
- n.Left.exprfmt(s, nprec, mode)
- if n.Sym == nil {
+ exprFmt(n.Left(), s, nprec, mode)
+ if n.Sym() == nil {
fmt.Fprint(s, ".<nil>")
return
}
- mode.Fprintf(s, ".%0S", n.Sym)
+ mode.Fprintf(s, ".%0S", n.Sym())
case ODOTTYPE, ODOTTYPE2:
- n.Left.exprfmt(s, nprec, mode)
- if n.Right != nil {
- mode.Fprintf(s, ".(%v)", n.Right)
+ exprFmt(n.Left(), s, nprec, mode)
+ if n.Right() != nil {
+ mode.Fprintf(s, ".(%v)", n.Right())
return
}
- mode.Fprintf(s, ".(%v)", n.Type)
+ mode.Fprintf(s, ".(%v)", n.Type())
case OINDEX, OINDEXMAP:
- n.Left.exprfmt(s, nprec, mode)
- mode.Fprintf(s, "[%v]", n.Right)
+ exprFmt(n.Left(), s, nprec, mode)
+ mode.Fprintf(s, "[%v]", n.Right())
case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
- n.Left.exprfmt(s, nprec, mode)
+ exprFmt(n.Left(), s, nprec, mode)
fmt.Fprint(s, "[")
low, high, max := n.SliceBounds()
if low != nil {
- fmt.Fprint(s, low.modeString(mode))
+ fmt.Fprint(s, modeString(low, mode))
}
fmt.Fprint(s, ":")
if high != nil {
- fmt.Fprint(s, high.modeString(mode))
+ fmt.Fprint(s, modeString(high, mode))
}
- if n.Op.IsSlice3() {
+ if n.Op().IsSlice3() {
fmt.Fprint(s, ":")
if max != nil {
- fmt.Fprint(s, max.modeString(mode))
+ fmt.Fprint(s, modeString(max, mode))
}
}
fmt.Fprint(s, "]")
case OSLICEHEADER:
- if n.List.Len() != 2 {
- Fatalf("bad OSLICEHEADER list length %d", n.List.Len())
+ if n.List().Len() != 2 {
+ base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len())
}
- mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left, n.List.First(), n.List.Second())
+ mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second())
case OCOMPLEX, OCOPY:
- if n.Left != nil {
- mode.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
+ if n.Left() != nil {
+ mode.Fprintf(s, "%#v(%v, %v)", n.Op(), n.Left(), n.Right())
} else {
- mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
+ mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List())
}
case OCONV,
@@ -1543,15 +1446,15 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
OSTR2BYTES,
OSTR2RUNES,
ORUNESTR:
- if n.Type == nil || n.Type.Sym == nil {
- mode.Fprintf(s, "(%v)", n.Type)
+ if n.Type() == nil || n.Type().Sym == nil {
+ mode.Fprintf(s, "(%v)", n.Type())
} else {
- mode.Fprintf(s, "%v", n.Type)
+ mode.Fprintf(s, "%v", n.Type())
}
- if n.Left != nil {
- mode.Fprintf(s, "(%v)", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, "(%v)", n.Left())
} else {
- mode.Fprintf(s, "(%.v)", n.List)
+ mode.Fprintf(s, "(%.v)", n.List())
}
case OREAL,
@@ -1570,49 +1473,49 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
OSIZEOF,
OPRINT,
OPRINTN:
- if n.Left != nil {
- mode.Fprintf(s, "%#v(%v)", n.Op, n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, "%#v(%v)", n.Op(), n.Left())
return
}
if n.IsDDD() {
- mode.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
+ mode.Fprintf(s, "%#v(%.v...)", n.Op(), n.List())
return
}
- mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
+ mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List())
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
- n.Left.exprfmt(s, nprec, mode)
+ exprFmt(n.Left(), s, nprec, mode)
if n.IsDDD() {
- mode.Fprintf(s, "(%.v...)", n.List)
+ mode.Fprintf(s, "(%.v...)", n.List())
return
}
- mode.Fprintf(s, "(%.v)", n.List)
+ mode.Fprintf(s, "(%.v)", n.List())
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- if n.List.Len() != 0 { // pre-typecheck
- mode.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
+ if n.List().Len() != 0 { // pre-typecheck
+ mode.Fprintf(s, "make(%v, %.v)", n.Type(), n.List())
return
}
- if n.Right != nil {
- mode.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
+ if n.Right() != nil {
+ mode.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right())
return
}
- if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
- mode.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
+ if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) {
+ mode.Fprintf(s, "make(%v, %v)", n.Type(), n.Left())
return
}
- mode.Fprintf(s, "make(%v)", n.Type)
+ mode.Fprintf(s, "make(%v)", n.Type())
case OMAKESLICECOPY:
- mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type, n.Left, n.Right)
+ mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right())
case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
// Unary
- mode.Fprintf(s, "%#v", n.Op)
- if n.Left != nil && n.Left.Op == n.Op {
+ mode.Fprintf(s, "%#v", n.Op())
+ if n.Left() != nil && n.Left().Op() == n.Op() {
fmt.Fprint(s, " ")
}
- n.Left.exprfmt(s, nprec+1, mode)
+ exprFmt(n.Left(), s, nprec+1, mode)
// Binary
case OADD,
@@ -1635,37 +1538,37 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
OSEND,
OSUB,
OXOR:
- n.Left.exprfmt(s, nprec, mode)
- mode.Fprintf(s, " %#v ", n.Op)
- n.Right.exprfmt(s, nprec+1, mode)
+ exprFmt(n.Left(), s, nprec, mode)
+ mode.Fprintf(s, " %#v ", n.Op())
+ exprFmt(n.Right(), s, nprec+1, mode)
case OADDSTR:
- for i, n1 := range n.List.Slice() {
+ for i, n1 := range n.List().Slice() {
if i != 0 {
fmt.Fprint(s, " + ")
}
- n1.exprfmt(s, nprec, mode)
+ exprFmt(n1, s, nprec, mode)
}
case ODDD:
mode.Fprintf(s, "...")
default:
- mode.Fprintf(s, "<node %v>", n.Op)
+ mode.Fprintf(s, "<node %v>", n.Op())
}
}
-func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) {
- t := n.Type
+func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
+ t := n.Type()
// We almost always want the original.
// TODO(gri) Why the special case for OLITERAL?
- if n.Op != OLITERAL && n.Orig != nil {
- n = n.Orig
+ if n.Op() != OLITERAL && n.Orig() != nil {
+ n = n.Orig()
}
if flag&FmtLong != 0 && t != nil {
- if t.Etype == TNIL {
+ if t.Etype == types.TNIL {
fmt.Fprint(s, "nil")
- } else if n.Op == ONAME && n.Name.AutoTemp() {
+ } else if n.Op() == ONAME && n.Name().AutoTemp() {
mode.Fprintf(s, "%v value", t)
} else {
mode.Fprintf(s, "%v (type %v)", n, t)
@@ -1675,15 +1578,15 @@ func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) {
// TODO inlining produces expressions with ninits. we can't print these yet.
- if opprec[n.Op] < 0 {
- n.stmtfmt(s, mode)
+ if OpPrec[n.Op()] < 0 {
+ stmtFmt(n, s, mode)
return
}
- n.exprfmt(s, 0, mode)
+ exprFmt(n, s, 0, mode)
}
-func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
+func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
recur := flag&FmtShort == 0
if recur {
@@ -1693,88 +1596,88 @@ func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
return
}
- if n.Ninit.Len() != 0 {
- mode.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
+ if n.Init().Len() != 0 {
+ mode.Fprintf(s, "%v-init%v", n.Op(), n.Init())
indent(s)
}
}
- switch n.Op {
+ switch n.Op() {
default:
- mode.Fprintf(s, "%v%j", n.Op, n)
+ mode.Fprintf(s, "%v%j", n.Op(), n)
case OLITERAL:
- mode.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
+ mode.Fprintf(s, "%v-%v%j", n.Op(), n.Val(), n)
- case ONAME, ONONAME:
- if n.Sym != nil {
- mode.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
+ case ONAME, ONONAME, OMETHEXPR:
+ if n.Sym() != nil {
+ mode.Fprintf(s, "%v-%v%j", n.Op(), n.Sym(), n)
} else {
- mode.Fprintf(s, "%v%j", n.Op, n)
+ mode.Fprintf(s, "%v%j", n.Op(), n)
}
- if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
+ if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil {
indent(s)
- mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
+ mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype)
}
case OASOP:
- mode.Fprintf(s, "%v-%v%j", n.Op, n.SubOp(), n)
+ mode.Fprintf(s, "%v-%v%j", n.Op(), n.SubOp(), n)
case OTYPE:
- mode.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
- if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
+ mode.Fprintf(s, "%v %v%j type=%v", n.Op(), n.Sym(), n, n.Type())
+ if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil {
indent(s)
- mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
+ mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype)
}
}
- if n.Op == OCLOSURE && n.Func.Closure != nil && n.Func.Closure.Func.Nname.Sym != nil {
- mode.Fprintf(s, " fnName %v", n.Func.Closure.Func.Nname.Sym)
+ if n.Op() == OCLOSURE && n.Func().Decl != nil && n.Func().Nname.Sym() != nil {
+ mode.Fprintf(s, " fnName %v", n.Func().Nname.Sym())
}
- if n.Sym != nil && n.Op != ONAME {
- mode.Fprintf(s, " %v", n.Sym)
+ if n.Sym() != nil && n.Op() != ONAME {
+ mode.Fprintf(s, " %v", n.Sym())
}
- if n.Type != nil {
- mode.Fprintf(s, " %v", n.Type)
+ if n.Type() != nil {
+ mode.Fprintf(s, " %v", n.Type())
}
if recur {
- if n.Left != nil {
- mode.Fprintf(s, "%v", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, "%v", n.Left())
}
- if n.Right != nil {
- mode.Fprintf(s, "%v", n.Right)
+ if n.Right() != nil {
+ mode.Fprintf(s, "%v", n.Right())
}
- if n.Func != nil && n.Func.Closure != nil && n.Func.Closure.Nbody.Len() != 0 {
+ if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Decl != nil && n.Func().Decl.Body().Len() != 0 {
indent(s)
// The function associated with a closure
- mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Closure)
+ mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func().Decl)
}
- if n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 {
+ if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 {
indent(s)
// The dcls for a func or closure
- mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl))
+ mode.Fprintf(s, "%v-dcl%v", n.Op(), AsNodes(n.Func().Dcl))
}
- if n.List.Len() != 0 {
+ if n.List().Len() != 0 {
indent(s)
- mode.Fprintf(s, "%v-list%v", n.Op, n.List)
+ mode.Fprintf(s, "%v-list%v", n.Op(), n.List())
}
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
indent(s)
- mode.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
+ mode.Fprintf(s, "%v-rlist%v", n.Op(), n.Rlist())
}
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
indent(s)
- mode.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
+ mode.Fprintf(s, "%v-body%v", n.Op(), n.Body())
}
}
}
// "%S" suppresses qualifying with package
-func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) {
+func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) {
switch verb {
case 'v', 'S':
fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode))
@@ -1784,10 +1687,10 @@ func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) {
}
}
-func smodeString(s *types.Sym, mode fmtMode) string { return sconv(s, 0, mode) }
+func smodeString(s *types.Sym, mode FmtMode) string { return sconv(s, 0, mode) }
// See #16897 before changing the implementation of sconv.
-func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string {
+func sconv(s *types.Sym, flag FmtFlag, mode FmtMode) string {
if flag&FmtLong != 0 {
panic("linksymfmt")
}
@@ -1808,7 +1711,7 @@ func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string {
return types.InternString(buf.Bytes())
}
-func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
+func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode FmtMode) {
if flag&FmtLong != 0 {
panic("linksymfmt")
}
@@ -1825,7 +1728,7 @@ func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
symfmt(b, s, flag, mode)
}
-func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visited map[*types.Type]int, funarg types.Funarg) {
+func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode FmtMode, visited map[*types.Type]int, funarg types.Funarg) {
if f == nil {
b.WriteString("<T>")
return
@@ -1841,12 +1744,12 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visite
// Take the name from the original.
if mode == FErr {
- s = origSym(s)
+ s = OrigSym(s)
}
if s != nil && f.Embedded == 0 {
if funarg != types.FunargNone {
- name = asNode(f.Nname).modeString(mode)
+ name = modeString(AsNode(f.Nname), mode)
} else if flag&FmtLong != 0 {
name = mode.Sprintf("%0S", s)
if !types.IsExported(name) && flag&FmtUnsigned == 0 {
@@ -1882,7 +1785,7 @@ func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visite
// "%L" print definition, not name
// "%S" omit 'func' and receiver from function types, short type names
-func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) {
+func typeFormat(t *types.Type, s fmt.State, verb rune, mode FmtMode) {
switch verb {
case 'v', 'S', 'L':
fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode))
@@ -1891,12 +1794,12 @@ func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) {
}
}
-func (n *Node) String() string { return fmt.Sprint(n) }
-func (n *Node) modeString(mode fmtMode) string { return mode.Sprint(n) }
+func (n *node) String() string { return fmt.Sprint(n) }
+func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) }
// "%L" suffix with "(type %T)" where possible
// "%+S" in debug mode, don't recurse, no multiline output
-func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) {
+func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
if n == nil {
fmt.Fprint(s, "<N>")
return
@@ -1906,19 +1809,19 @@ func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) {
switch mode {
case FErr:
- n.nodefmt(s, flag, mode)
+ nodeFmt(n, s, flag, mode)
case FDbg:
dumpdepth++
- n.nodedump(s, flag, mode)
+ nodeDumpFmt(n, s, flag, mode)
dumpdepth--
default:
- Fatalf("unhandled %%N mode: %d", mode)
+ base.Fatalf("unhandled %%N mode: %d", mode)
}
}
-func (l Nodes) format(s fmt.State, verb rune, mode fmtMode) {
+func (l Nodes) format(s fmt.State, verb rune, mode FmtMode) {
switch verb {
case 'v':
l.hconv(s, fmtFlag(s, verb), mode)
@@ -1933,7 +1836,7 @@ func (n Nodes) String() string {
}
// Flags: all those of %N plus '.': separate with comma's instead of semicolons.
-func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode fmtMode) {
+func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode FmtMode) {
if l.Len() == 0 && mode == FDbg {
fmt.Fprint(s, "<nil>")
return
@@ -1948,22 +1851,22 @@ func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode fmtMode) {
}
for i, n := range l.Slice() {
- fmt.Fprint(s, n.modeString(mode))
+ fmt.Fprint(s, modeString(n, mode))
if i+1 < l.Len() {
fmt.Fprint(s, sep)
}
}
}
-func dumplist(s string, l Nodes) {
+func DumpList(s string, l Nodes) {
fmt.Printf("%s%+v\n", s, l)
}
-func fdumplist(w io.Writer, s string, l Nodes) {
+func FDumpList(w io.Writer, s string, l Nodes) {
fmt.Fprintf(w, "%s%+v\n", s, l)
}
-func Dump(s string, n *Node) {
+func Dump(s string, n Node) {
fmt.Printf("%s [%p]%+v\n", s, n, n)
}
@@ -1984,3 +1887,30 @@ func ellipsisIf(b bool) string {
}
return ""
}
+
+// numImport tracks how often a package with a given name is imported.
+// It is used to provide a better error message (by using the package
+// path to disambiguate) if a package that appears multiple times with
+// the same name appears in an error message.
+var NumImport = make(map[string]int)
+
+func InstallTypeFormats() {
+ types.Sconv = func(s *types.Sym, flag, mode int) string {
+ return sconv(s, FmtFlag(flag), FmtMode(mode))
+ }
+ types.Tconv = func(t *types.Type, flag, mode int) string {
+ return tconv(t, FmtFlag(flag), FmtMode(mode))
+ }
+ types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
+ symFormat(sym, s, verb, FmtMode(mode))
+ }
+ types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
+ typeFormat(t, s, verb, FmtMode(mode))
+ }
+}
+
+// Line returns n's position as a string. If n has been inlined,
+// it uses the outermost position where n has been inlined.
+func Line(n Node) string {
+ return base.FmtPos(n.Pos())
+}
diff --git a/src/cmd/compile/internal/ir/ir.go b/src/cmd/compile/internal/ir/ir.go
new file mode 100644
index 0000000000..ad7f692b07
--- /dev/null
+++ b/src/cmd/compile/internal/ir/ir.go
@@ -0,0 +1,12 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import "cmd/compile/internal/types"
+
+var LocalPkg *types.Pkg // package being compiled
+
+// builtinpkg is a fake package that declares the universe block.
+var BuiltinPkg *types.Pkg
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/ir/node.go
index 43358333b8..477d07f502 100644
--- a/src/cmd/compile/internal/gc/syntax.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -4,43 +4,160 @@
// “Abstract” syntax representation.
-package gc
+package ir
import (
- "cmd/compile/internal/ssa"
+ "fmt"
+ "go/constant"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
- "sort"
)
+// A Node is the abstract interface to an IR node.
+type Node interface {
+ // Formatting
+ Format(s fmt.State, verb rune)
+ String() string
+
+ // Source position.
+ Pos() src.XPos
+ SetPos(x src.XPos)
+
+ // For making copies. Mainly used by Copy and SepCopy.
+ RawCopy() Node
+
+ // Abstract graph structure, for generic traversals.
+ Op() Op
+ SetOp(x Op)
+ Orig() Node
+ SetOrig(x Node)
+ SubOp() Op
+ SetSubOp(x Op)
+ Left() Node
+ SetLeft(x Node)
+ Right() Node
+ SetRight(x Node)
+ Init() Nodes
+ PtrInit() *Nodes
+ SetInit(x Nodes)
+ Body() Nodes
+ PtrBody() *Nodes
+ SetBody(x Nodes)
+ List() Nodes
+ SetList(x Nodes)
+ PtrList() *Nodes
+ Rlist() Nodes
+ SetRlist(x Nodes)
+ PtrRlist() *Nodes
+
+ // Fields specific to certain Ops only.
+ Type() *types.Type
+ SetType(t *types.Type)
+ Func() *Func
+ SetFunc(x *Func)
+ Name() *Name
+ SetName(x *Name)
+ Sym() *types.Sym
+ SetSym(x *types.Sym)
+ Offset() int64
+ SetOffset(x int64)
+ Class() Class
+ SetClass(x Class)
+ Likely() bool
+ SetLikely(x bool)
+ SliceBounds() (low, high, max Node)
+ SetSliceBounds(low, high, max Node)
+ Iota() int64
+ SetIota(x int64)
+ Colas() bool
+ SetColas(x bool)
+ NoInline() bool
+ SetNoInline(x bool)
+ Transient() bool
+ SetTransient(x bool)
+ Implicit() bool
+ SetImplicit(x bool)
+ IsDDD() bool
+ SetIsDDD(x bool)
+ Embedded() bool
+ SetEmbedded(x bool)
+ IndexMapLValue() bool
+ SetIndexMapLValue(x bool)
+ TChanDir() types.ChanDir
+ SetTChanDir(x types.ChanDir)
+ ResetAux()
+ HasBreak() bool
+ SetHasBreak(x bool)
+ MarkReadonly()
+ Val() constant.Value
+ HasVal() bool
+ SetVal(v constant.Value)
+ Int64Val() int64
+ Uint64Val() uint64
+ CanInt64() bool
+ BoolVal() bool
+ StringVal() string
+
+ // Storage for analysis passes.
+ Esc() uint16
+ SetEsc(x uint16)
+ Walkdef() uint8
+ SetWalkdef(x uint8)
+ Opt() interface{}
+ SetOpt(x interface{})
+ HasOpt() bool
+ Diag() bool
+ SetDiag(x bool)
+ Bounded() bool
+ SetBounded(x bool)
+ Typecheck() uint8
+ SetTypecheck(x uint8)
+ Initorder() uint8
+ SetInitorder(x uint8)
+ NonNil() bool
+ MarkNonNil()
+ HasCall() bool
+ SetHasCall(x bool)
+
+ // Only for SSA and should be removed when SSA starts
+ // using a more specific type than Node.
+ CanBeAnSSASym()
+}
+
+var _ Node = (*node)(nil)
+
// A Node is a single node in the syntax tree.
// Actually the syntax tree is a syntax DAG, because there is only one
// node with Op=ONAME for a given instance of a variable x.
// The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared.
-type Node struct {
+type node struct {
// Tree structure.
// Generic recursive walks should follow these fields.
- Left *Node
- Right *Node
- Ninit Nodes
- Nbody Nodes
- List Nodes
- Rlist Nodes
+ left Node
+ right Node
+ init Nodes
+ body Nodes
+ list Nodes
+ rlist Nodes
// most nodes
- Type *types.Type
- Orig *Node // original form, for printing, and tracking copies of ONAMEs
+ typ *types.Type
+ orig Node // original form, for printing, and tracking copies of ONAMEs
// func
- Func *Func
+ fn *Func
// ONAME, OTYPE, OPACK, OLABEL, some OLITERAL
- Name *Name
+ name *Name
- Sym *types.Sym // various
- E interface{} // Opt or Val, see methods below
+ sym *types.Sym // various
+ e interface{} // Opt or Val, see methods below
// Various. Usually an offset into a struct. For example:
// - ONAME nodes that refer to local variables use it to identify their stack frame position.
@@ -50,50 +167,85 @@ type Node struct {
// - OINLMARK stores an index into the inlTree data structure.
// - OCLOSURE uses it to store ambient iota value, if any.
// Possibly still more uses. If you find any, document them.
- Xoffset int64
+ offset int64
- Pos src.XPos
+ pos src.XPos
flags bitset32
- Esc uint16 // EscXXX
+ esc uint16 // EscXXX
- Op Op
+ op Op
aux uint8
}
-func (n *Node) ResetAux() {
+func (n *node) Left() Node { return n.left }
+func (n *node) SetLeft(x Node) { n.left = x }
+func (n *node) Right() Node { return n.right }
+func (n *node) SetRight(x Node) { n.right = x }
+func (n *node) Orig() Node { return n.orig }
+func (n *node) SetOrig(x Node) { n.orig = x }
+func (n *node) Type() *types.Type { return n.typ }
+func (n *node) SetType(x *types.Type) { n.typ = x }
+func (n *node) Func() *Func { return n.fn }
+func (n *node) SetFunc(x *Func) { n.fn = x }
+func (n *node) Name() *Name { return n.name }
+func (n *node) SetName(x *Name) { n.name = x }
+func (n *node) Sym() *types.Sym { return n.sym }
+func (n *node) SetSym(x *types.Sym) { n.sym = x }
+func (n *node) Pos() src.XPos { return n.pos }
+func (n *node) SetPos(x src.XPos) { n.pos = x }
+func (n *node) Offset() int64 { return n.offset }
+func (n *node) SetOffset(x int64) { n.offset = x }
+func (n *node) Esc() uint16 { return n.esc }
+func (n *node) SetEsc(x uint16) { n.esc = x }
+func (n *node) Op() Op { return n.op }
+func (n *node) SetOp(x Op) { n.op = x }
+func (n *node) Init() Nodes { return n.init }
+func (n *node) SetInit(x Nodes) { n.init = x }
+func (n *node) PtrInit() *Nodes { return &n.init }
+func (n *node) Body() Nodes { return n.body }
+func (n *node) SetBody(x Nodes) { n.body = x }
+func (n *node) PtrBody() *Nodes { return &n.body }
+func (n *node) List() Nodes { return n.list }
+func (n *node) SetList(x Nodes) { n.list = x }
+func (n *node) PtrList() *Nodes { return &n.list }
+func (n *node) Rlist() Nodes { return n.rlist }
+func (n *node) SetRlist(x Nodes) { n.rlist = x }
+func (n *node) PtrRlist() *Nodes { return &n.rlist }
+
+func (n *node) ResetAux() {
n.aux = 0
}
-func (n *Node) SubOp() Op {
- switch n.Op {
+func (n *node) SubOp() Op {
+ switch n.Op() {
case OASOP, ONAME:
default:
- Fatalf("unexpected op: %v", n.Op)
+ base.Fatalf("unexpected op: %v", n.Op())
}
return Op(n.aux)
}
-func (n *Node) SetSubOp(op Op) {
- switch n.Op {
+func (n *node) SetSubOp(op Op) {
+ switch n.Op() {
case OASOP, ONAME:
default:
- Fatalf("unexpected op: %v", n.Op)
+ base.Fatalf("unexpected op: %v", n.Op())
}
n.aux = uint8(op)
}
-func (n *Node) IndexMapLValue() bool {
- if n.Op != OINDEXMAP {
- Fatalf("unexpected op: %v", n.Op)
+func (n *node) IndexMapLValue() bool {
+ if n.Op() != OINDEXMAP {
+ base.Fatalf("unexpected op: %v", n.Op())
}
return n.aux != 0
}
-func (n *Node) SetIndexMapLValue(b bool) {
- if n.Op != OINDEXMAP {
- Fatalf("unexpected op: %v", n.Op)
+func (n *node) SetIndexMapLValue(b bool) {
+ if n.Op() != OINDEXMAP {
+ base.Fatalf("unexpected op: %v", n.Op())
}
if b {
n.aux = 1
@@ -102,32 +254,32 @@ func (n *Node) SetIndexMapLValue(b bool) {
}
}
-func (n *Node) TChanDir() types.ChanDir {
- if n.Op != OTCHAN {
- Fatalf("unexpected op: %v", n.Op)
+func (n *node) TChanDir() types.ChanDir {
+ if n.Op() != OTCHAN {
+ base.Fatalf("unexpected op: %v", n.Op())
}
return types.ChanDir(n.aux)
}
-func (n *Node) SetTChanDir(dir types.ChanDir) {
- if n.Op != OTCHAN {
- Fatalf("unexpected op: %v", n.Op)
+func (n *node) SetTChanDir(dir types.ChanDir) {
+ if n.Op() != OTCHAN {
+ base.Fatalf("unexpected op: %v", n.Op())
}
n.aux = uint8(dir)
}
-func (n *Node) IsSynthetic() bool {
- name := n.Sym.Name
+func IsSynthetic(n Node) bool {
+ name := n.Sym().Name
return name[0] == '.' || name[0] == '~'
}
// IsAutoTmp indicates if n was created by the compiler as a temporary,
// based on the setting of the .AutoTemp flag in n's Name.
-func (n *Node) IsAutoTmp() bool {
- if n == nil || n.Op != ONAME {
+func IsAutoTmp(n Node) bool {
+ if n == nil || n.Op() != ONAME {
return false
}
- return n.Name.AutoTemp()
+ return n.Name().AutoTemp()
}
const (
@@ -156,51 +308,51 @@ const (
_, nodeEmbedded // ODCLFIELD embedded type
)
-func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) }
-func (n *Node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
-func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
-func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
-
-func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
-func (n *Node) NoInline() bool { return n.flags&nodeNoInline != 0 }
-func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 }
-func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
-func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
-func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
-func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
-func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
-func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
-func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
-func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 }
-func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 }
-func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
-func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
-
-func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
-func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
-func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
-func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
-
-func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
-func (n *Node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
-func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
-func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
-func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
-func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
-func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
-func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
-func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
-func (n *Node) SetHasVal(b bool) { n.flags.set(nodeHasVal, b) }
-func (n *Node) SetHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
-func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
+func (n *node) Class() Class { return Class(n.flags.get3(nodeClass)) }
+func (n *node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
+func (n *node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
+func (n *node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
+
+func (n *node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
+func (n *node) NoInline() bool { return n.flags&nodeNoInline != 0 }
+func (n *node) Implicit() bool { return n.flags&nodeImplicit != 0 }
+func (n *node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
+func (n *node) Diag() bool { return n.flags&nodeDiag != 0 }
+func (n *node) Colas() bool { return n.flags&nodeColas != 0 }
+func (n *node) NonNil() bool { return n.flags&nodeNonNil != 0 }
+func (n *node) Transient() bool { return n.flags&nodeTransient != 0 }
+func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 }
+func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 }
+func (n *node) Likely() bool { return n.flags&nodeLikely != 0 }
+func (n *node) HasVal() bool { return n.flags&nodeHasVal != 0 }
+func (n *node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
+func (n *node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
+
+func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
+func (n *node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
+func (n *node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
+func (n *node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
+
+func (n *node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
+func (n *node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
+func (n *node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
+func (n *node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
+func (n *node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
+func (n *node) SetColas(b bool) { n.flags.set(nodeColas, b) }
+func (n *node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
+func (n *node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
+func (n *node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
+func (n *node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) }
+func (n *node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
+func (n *node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
// MarkNonNil marks a pointer n as being guaranteed non-nil,
// on all code paths, at all times.
// During conversion to SSA, non-nil pointers won't have nil checks
// inserted before dereferencing. See state.exprPtr.
-func (n *Node) MarkNonNil() {
- if !n.Type.IsPtr() && !n.Type.IsUnsafePtr() {
- Fatalf("MarkNonNil(%v), type %v", n, n.Type)
+func (n *node) MarkNonNil() {
+ if !n.Type().IsPtr() && !n.Type().IsUnsafePtr() {
+ base.Fatalf("MarkNonNil(%v), type %v", n, n.Type())
}
n.flags.set(nodeNonNil, true)
}
@@ -209,8 +361,8 @@ func (n *Node) MarkNonNil() {
// When n is an index or slice operation, n does not need bounds checks.
// When n is a dereferencing operation, n does not need nil checks.
// When n is a makeslice+copy operation, n does not need length and cap checks.
-func (n *Node) SetBounded(b bool) {
- switch n.Op {
+func (n *node) SetBounded(b bool) {
+ switch n.Op() {
case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
// No bounds checks needed.
case ODOTPTR, ODEREF:
@@ -219,116 +371,119 @@ func (n *Node) SetBounded(b bool) {
// No length and cap checks needed
// since new slice and copied over slice data have same length.
default:
- Fatalf("SetBounded(%v)", n)
+ base.Fatalf("SetBounded(%v)", n)
}
n.flags.set(nodeBounded, b)
}
// MarkReadonly indicates that n is an ONAME with readonly contents.
-func (n *Node) MarkReadonly() {
- if n.Op != ONAME {
- Fatalf("Node.MarkReadonly %v", n.Op)
+func (n *node) MarkReadonly() {
+ if n.Op() != ONAME {
+ base.Fatalf("Node.MarkReadonly %v", n.Op())
}
- n.Name.SetReadonly(true)
+ n.Name().SetReadonly(true)
// Mark the linksym as readonly immediately
// so that the SSA backend can use this information.
// It will be overridden later during dumpglobls.
- n.Sym.Linksym().Type = objabi.SRODATA
+ n.Sym().Linksym().Type = objabi.SRODATA
}
-// Val returns the Val for the node.
-func (n *Node) Val() Val {
+// Val returns the constant.Value for the node.
+func (n *node) Val() constant.Value {
if !n.HasVal() {
- return Val{}
+ return constant.MakeUnknown()
}
- return Val{n.E}
+ return *n.e.(*constant.Value)
}
-// SetVal sets the Val for the node, which must not have been used with SetOpt.
-func (n *Node) SetVal(v Val) {
+// SetVal sets the constant.Value for the node,
+// which must not have been used with SetOpt.
+func (n *node) SetVal(v constant.Value) {
if n.HasOpt() {
- Debug.h = 1
+ base.Flag.LowerH = 1
Dump("have Opt", n)
- Fatalf("have Opt")
+ base.Fatalf("have Opt")
+ }
+ if n.Op() == OLITERAL {
+ AssertValidTypeForConst(n.Type(), v)
}
- n.SetHasVal(true)
- n.E = v.U
+ n.setHasVal(true)
+ n.e = &v
}
// Opt returns the optimizer data for the node.
-func (n *Node) Opt() interface{} {
+func (n *node) Opt() interface{} {
if !n.HasOpt() {
return nil
}
- return n.E
+ return n.e
}
// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts.
-func (n *Node) SetOpt(x interface{}) {
- if x == nil && n.HasVal() {
+func (n *node) SetOpt(x interface{}) {
+ if x == nil {
+ if n.HasOpt() {
+ n.setHasOpt(false)
+ n.e = nil
+ }
return
}
if n.HasVal() {
- Debug.h = 1
+ base.Flag.LowerH = 1
Dump("have Val", n)
- Fatalf("have Val")
+ base.Fatalf("have Val")
}
- n.SetHasOpt(true)
- n.E = x
+ n.setHasOpt(true)
+ n.e = x
}
-func (n *Node) Iota() int64 {
- return n.Xoffset
+func (n *node) Iota() int64 {
+ return n.Offset()
}
-func (n *Node) SetIota(x int64) {
- n.Xoffset = x
+func (n *node) SetIota(x int64) {
+ n.SetOffset(x)
}
// mayBeShared reports whether n may occur in multiple places in the AST.
// Extra care must be taken when mutating such a node.
-func (n *Node) mayBeShared() bool {
- switch n.Op {
- case ONAME, OLITERAL, OTYPE:
+func MayBeShared(n Node) bool {
+ switch n.Op() {
+ case ONAME, OLITERAL, ONIL, OTYPE:
return true
}
return false
}
-// isMethodExpression reports whether n represents a method expression T.M.
-func (n *Node) isMethodExpression() bool {
- return n.Op == ONAME && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME
-}
-
// funcname returns the name (without the package) of the function n.
-func (n *Node) funcname() string {
- if n == nil || n.Func == nil || n.Func.Nname == nil {
+func FuncName(n Node) string {
+ if n == nil || n.Func() == nil || n.Func().Nname == nil {
return "<nil>"
}
- return n.Func.Nname.Sym.Name
+ return n.Func().Nname.Sym().Name
}
// pkgFuncName returns the name of the function referenced by n, with package prepended.
// This differs from the compiler's internal convention where local functions lack a package
// because the ultimate consumer of this is a human looking at an IDE; package is only empty
// if the compilation package is actually the empty string.
-func (n *Node) pkgFuncName() string {
+func PkgFuncName(n Node) string {
var s *types.Sym
if n == nil {
return "<nil>"
}
- if n.Op == ONAME {
- s = n.Sym
+ if n.Op() == ONAME {
+ s = n.Sym()
} else {
- if n.Func == nil || n.Func.Nname == nil {
+ if n.Func() == nil || n.Func().Nname == nil {
return "<nil>"
}
- s = n.Func.Nname.Sym
+ s = n.Func().Nname.Sym()
}
pkg := s.Pkg
- p := myimportpath
+ p := base.Ctxt.Pkgpath
if pkg != nil && pkg.Path != "" {
p = pkg.Path
}
@@ -339,19 +494,19 @@ func (n *Node) pkgFuncName() string {
}
// The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym.
-func (n *Node) CanBeAnSSASym() {
+func (n *node) CanBeAnSSASym() {
}
// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL).
type Name struct {
- Pack *Node // real package for import . names
+ Pack Node // real package for import . names
Pkg *types.Pkg // pkg for OPACK nodes
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
// For a closure var, the ONAME node of the outer captured variable
- Defn *Node
+ Defn Node
// The ODCLFUNC node (for a static function/method or a closure) in which
// local variable or param is declared.
- Curfn *Node
+ Curfn Node
Param *Param // additional fields for ONAME, OTYPE
Decldepth int32 // declaration loop depth, increased for every loop or label
// Unique number for ONAME nodes within a function. Function outputs
@@ -410,11 +565,11 @@ func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot,
func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
type Param struct {
- Ntype *Node
- Heapaddr *Node // temp holding heap address of param
+ Ntype Node
+ Heapaddr Node // temp holding heap address of param
// ONAME PAUTOHEAP
- Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
+ Stackcopy Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
// ONAME closure linkage
// Consider:
@@ -485,8 +640,8 @@ type Param struct {
//
// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
- Innermost *Node
- Outer *Node
+ Innermost Node
+ Outer Node
// OTYPE & ONAME //go:embed info,
// sharing storage to reduce gc.Param size.
@@ -571,61 +726,60 @@ func (p *Param) SetEmbedFiles(list []string) {
*(*p.Extra).(*embedFileList) = list
}
-// Functions
+// A Func corresponds to a single function in a Go program
+// (and vice versa: each function is denoted by exactly one *Func).
//
-// A simple function declaration is represented as an ODCLFUNC node f
-// and an ONAME node n. They're linked to one another through
-// f.Func.Nname == n and n.Name.Defn == f. When functions are
-// referenced by name in an expression, the function's ONAME node is
-// used directly.
+// There are multiple nodes that represent a Func in the IR.
//
-// Function names have n.Class() == PFUNC. This distinguishes them
-// from variables of function type.
+// The ONAME node (Func.Name) is used for plain references to it.
+// The ODCLFUNC node (Func.Decl) is used for its declaration code.
+// The OCLOSURE node (Func.Closure) is used for a reference to a
+// function literal.
//
-// Confusingly, n.Func and f.Func both exist, but commonly point to
-// different Funcs. (Exception: an OCALLPART's Func does point to its
-// ODCLFUNC's Func.)
+// A Func for an imported function will have only an ONAME node.
+// A declared function or method has an ONAME and an ODCLFUNC.
+// A function literal is represented directly by an OCLOSURE, but it also
+// has an ODCLFUNC (and a matching ONAME) representing the compiled
+// underlying form of the closure, which accesses the captured variables
+// using a special data structure passed in a register.
//
-// A method declaration is represented like functions, except n.Sym
+// A method declaration is represented like functions, except f.Sym
// will be the qualified method name (e.g., "T.m") and
// f.Func.Shortname is the bare method name (e.g., "m").
//
-// Method expressions are represented as ONAME/PFUNC nodes like
-// function names, but their Left and Right fields still point to the
-// type and method, respectively. They can be distinguished from
-// normal functions with isMethodExpression. Also, unlike function
-// name nodes, method expression nodes exist for each method
-// expression. The declaration ONAME can be accessed with
-// x.Type.Nname(), where x is the method expression ONAME node.
-//
-// Method values are represented by ODOTMETH/ODOTINTER when called
-// immediately, and OCALLPART otherwise. They are like method
-// expressions, except that for ODOTMETH/ODOTINTER the method name is
-// stored in Sym instead of Right.
-//
-// Closures are represented by OCLOSURE node c. They link back and
-// forth with the ODCLFUNC via Func.Closure; that is, c.Func.Closure
-// == f and f.Func.Closure == c.
-//
-// Function bodies are stored in f.Nbody, and inline function bodies
-// are stored in n.Func.Inl. Pragmas are stored in f.Func.Pragma.
+// A method expression (T.M) is represented as an OMETHEXPR node,
+// in which n.Left and n.Right point to the type and method, respectively.
+// Each distinct mention of a method expression in the source code
+// constructs a fresh node.
//
-// Imported functions skip the ODCLFUNC, so n.Name.Defn is nil. They
-// also use Dcl instead of Inldcl.
-
-// Func holds Node fields used only with function-like nodes.
+// A method value (t.M) is represented by ODOTMETH/ODOTINTER
+// when it is called directly and by OCALLPART otherwise.
+// These are like method expressions, except that for ODOTMETH/ODOTINTER,
+// the method name is stored in Sym instead of Right.
+// Each OCALLPART ends up being implemented as a new
+// function, a bit like a closure, with its own ODCLFUNC.
+// The OCALLPART has uses n.Func to record the linkage to
+// the generated ODCLFUNC (as n.Func.Decl), but there is no
+// pointer from the Func back to the OCALLPART.
type Func struct {
+ Nname Node // ONAME node
+ Decl Node // ODCLFUNC node
+ OClosure Node // OCLOSURE node
+
Shortname *types.Sym
+
// Extra entry code for the function. For example, allocate and initialize
- // memory for escaping parameters. However, just for OCLOSURE, Enter is a
- // list of ONAME nodes of captured variables
+ // memory for escaping parameters.
Enter Nodes
Exit Nodes
- // ONAME nodes for closure params, each should have closurevar set
- Cvars Nodes
// ONAME nodes for all params/locals for this func/closure, does NOT
// include closurevars until transformclosure runs.
- Dcl []*Node
+ Dcl []Node
+
+ ClosureEnter Nodes // list of ONAME nodes of captured variables
+ ClosureType Node // closure representation type
+ ClosureCalled bool // closure is only immediately called
+ ClosureVars Nodes // closure params; each has closurevar set
// Parents records the parent scope of each scope within a
// function. The root scope (0) has no parent, so the i'th
@@ -641,12 +795,8 @@ type Func struct {
Closgen int
FieldTrack map[*types.Sym]struct{}
- DebugInfo *ssa.FuncDebug
- Ntype *Node // signature
- Top int // top context (ctxCallee, etc)
- Closure *Node // OCLOSURE <-> ODCLFUNC (see header comment above)
- Nname *Node // The ONAME node associated with an ODCLFUNC (both have same Type)
- lsym *obj.LSym
+ DebugInfo interface{}
+ LSym *obj.LSym
Inl *Inline
@@ -658,13 +808,13 @@ type Func struct {
Pragma PragmaFlag // go:xxx function annotations
flags bitset16
- numDefers int // number of defer calls in the function
- numReturns int // number of explicit returns in the function
+ NumDefers int // number of defer calls in the function
+ NumReturns int // number of explicit returns in the function
// nwbrCalls records the LSyms of functions called by this
// function for go:nowritebarrierrec analysis. Only filled in
// if nowritebarrierrecCheck != nil.
- nwbrCalls *[]nowritebarrierrecCallSym
+ NWBRCalls *[]SymAndPos
}
// An Inline holds fields used for function bodies that can be inlined.
@@ -672,8 +822,8 @@ type Inline struct {
Cost int32 // heuristic cost of inlining this function
// Copies of Func.Dcl and Nbody for use during inlining.
- Dcl []*Node
- Body []*Node
+ Dcl []Node
+ Body []Node
}
// A Mark represents a scope boundary.
@@ -729,9 +879,9 @@ func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInlin
func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
-func (f *Func) setWBPos(pos src.XPos) {
- if Debug_wb != 0 {
- Warnl(pos, "write barrier")
+func (f *Func) SetWBPos(pos src.XPos) {
+ if base.Debug.WB != 0 {
+ base.WarnfAt(pos, "write barrier")
}
if !f.WBPos.IsKnown() {
f.WBPos = pos
@@ -754,6 +904,7 @@ const (
OTYPE // type name
OPACK // import
OLITERAL // literal
+ ONIL // nil
// expressions
OADD // Left + Right
@@ -883,6 +1034,7 @@ const (
OALIGNOF // unsafe.Alignof(Left)
OOFFSETOF // unsafe.Offsetof(Left)
OSIZEOF // unsafe.Sizeof(Left)
+ OMETHEXPR // method expression
// statements
OBLOCK // { List } (block of code)
@@ -956,17 +1108,17 @@ const (
// Nodes is a pointer to a slice of *Node.
// For fields that are not used in most nodes, this is used instead of
// a slice to save space.
-type Nodes struct{ slice *[]*Node }
+type Nodes struct{ slice *[]Node }
// asNodes returns a slice of *Node as a Nodes value.
-func asNodes(s []*Node) Nodes {
+func AsNodes(s []Node) Nodes {
return Nodes{&s}
}
// Slice returns the entries in Nodes as a slice.
// Changes to the slice entries (as in s[i] = n) will be reflected in
// the Nodes.
-func (n Nodes) Slice() []*Node {
+func (n Nodes) Slice() []Node {
if n.slice == nil {
return nil
}
@@ -983,25 +1135,25 @@ func (n Nodes) Len() int {
// Index returns the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) Index(i int) *Node {
+func (n Nodes) Index(i int) Node {
return (*n.slice)[i]
}
// First returns the first element of Nodes (same as n.Index(0)).
// It panics if n has no elements.
-func (n Nodes) First() *Node {
+func (n Nodes) First() Node {
return (*n.slice)[0]
}
// Second returns the second element of Nodes (same as n.Index(1)).
// It panics if n has fewer than two elements.
-func (n Nodes) Second() *Node {
+func (n Nodes) Second() Node {
return (*n.slice)[1]
}
// Set sets n to a slice.
// This takes ownership of the slice.
-func (n *Nodes) Set(s []*Node) {
+func (n *Nodes) Set(s []Node) {
if len(s) == 0 {
n.slice = nil
} else {
@@ -1014,18 +1166,18 @@ func (n *Nodes) Set(s []*Node) {
}
// Set1 sets n to a slice containing a single node.
-func (n *Nodes) Set1(n1 *Node) {
- n.slice = &[]*Node{n1}
+func (n *Nodes) Set1(n1 Node) {
+ n.slice = &[]Node{n1}
}
// Set2 sets n to a slice containing two nodes.
-func (n *Nodes) Set2(n1, n2 *Node) {
- n.slice = &[]*Node{n1, n2}
+func (n *Nodes) Set2(n1, n2 Node) {
+ n.slice = &[]Node{n1, n2}
}
// Set3 sets n to a slice containing three nodes.
-func (n *Nodes) Set3(n1, n2, n3 *Node) {
- n.slice = &[]*Node{n1, n2, n3}
+func (n *Nodes) Set3(n1, n2, n3 Node) {
+ n.slice = &[]Node{n1, n2, n3}
}
// MoveNodes sets n to the contents of n2, then clears n2.
@@ -1036,35 +1188,35 @@ func (n *Nodes) MoveNodes(n2 *Nodes) {
// SetIndex sets the i'th element of Nodes to node.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) SetIndex(i int, node *Node) {
+func (n Nodes) SetIndex(i int, node Node) {
(*n.slice)[i] = node
}
// SetFirst sets the first element of Nodes to node.
// It panics if n does not have at least one elements.
-func (n Nodes) SetFirst(node *Node) {
+func (n Nodes) SetFirst(node Node) {
(*n.slice)[0] = node
}
// SetSecond sets the second element of Nodes to node.
// It panics if n does not have at least two elements.
-func (n Nodes) SetSecond(node *Node) {
+func (n Nodes) SetSecond(node Node) {
(*n.slice)[1] = node
}
// Addr returns the address of the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) Addr(i int) **Node {
+func (n Nodes) Addr(i int) *Node {
return &(*n.slice)[i]
}
// Append appends entries to Nodes.
-func (n *Nodes) Append(a ...*Node) {
+func (n *Nodes) Append(a ...Node) {
if len(a) == 0 {
return
}
if n.slice == nil {
- s := make([]*Node, len(a))
+ s := make([]Node, len(a))
copy(s, a)
n.slice = &s
return
@@ -1074,7 +1226,7 @@ func (n *Nodes) Append(a ...*Node) {
// Prepend prepends entries to Nodes.
// If a slice is passed in, this will take ownership of it.
-func (n *Nodes) Prepend(a ...*Node) {
+func (n *Nodes) Prepend(a ...Node) {
if len(a) == 0 {
return
}
@@ -1099,43 +1251,43 @@ func (n *Nodes) AppendNodes(n2 *Nodes) {
// inspect invokes f on each node in an AST in depth-first order.
// If f(n) returns false, inspect skips visiting n's children.
-func inspect(n *Node, f func(*Node) bool) {
+func Inspect(n Node, f func(Node) bool) {
if n == nil || !f(n) {
return
}
- inspectList(n.Ninit, f)
- inspect(n.Left, f)
- inspect(n.Right, f)
- inspectList(n.List, f)
- inspectList(n.Nbody, f)
- inspectList(n.Rlist, f)
+ InspectList(n.Init(), f)
+ Inspect(n.Left(), f)
+ Inspect(n.Right(), f)
+ InspectList(n.List(), f)
+ InspectList(n.Body(), f)
+ InspectList(n.Rlist(), f)
}
-func inspectList(l Nodes, f func(*Node) bool) {
+func InspectList(l Nodes, f func(Node) bool) {
for _, n := range l.Slice() {
- inspect(n, f)
+ Inspect(n, f)
}
}
// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is
// a ready-to-use empty queue.
-type nodeQueue struct {
- ring []*Node
+type NodeQueue struct {
+ ring []Node
head, tail int
}
// empty reports whether q contains no Nodes.
-func (q *nodeQueue) empty() bool {
+func (q *NodeQueue) Empty() bool {
return q.head == q.tail
}
// pushRight appends n to the right of the queue.
-func (q *nodeQueue) pushRight(n *Node) {
+func (q *NodeQueue) PushRight(n Node) {
if len(q.ring) == 0 {
- q.ring = make([]*Node, 16)
+ q.ring = make([]Node, 16)
} else if q.head+len(q.ring) == q.tail {
// Grow the ring.
- nring := make([]*Node, len(q.ring)*2)
+ nring := make([]Node, len(q.ring)*2)
// Copy the old elements.
part := q.ring[q.head%len(q.ring):]
if q.tail-q.head <= len(part) {
@@ -1154,8 +1306,8 @@ func (q *nodeQueue) pushRight(n *Node) {
// popLeft pops a node from the left of the queue. It panics if q is
// empty.
-func (q *nodeQueue) popLeft() *Node {
- if q.empty() {
+func (q *NodeQueue) PopLeft() Node {
+ if q.Empty() {
panic("dequeue empty")
}
n := q.ring[q.head%len(q.ring)]
@@ -1164,28 +1316,352 @@ func (q *nodeQueue) popLeft() *Node {
}
// NodeSet is a set of Nodes.
-type NodeSet map[*Node]struct{}
+type NodeSet map[Node]struct{}
// Has reports whether s contains n.
-func (s NodeSet) Has(n *Node) bool {
+func (s NodeSet) Has(n Node) bool {
_, isPresent := s[n]
return isPresent
}
// Add adds n to s.
-func (s *NodeSet) Add(n *Node) {
+func (s *NodeSet) Add(n Node) {
if *s == nil {
- *s = make(map[*Node]struct{})
+ *s = make(map[Node]struct{})
}
(*s)[n] = struct{}{}
}
// Sorted returns s sorted according to less.
-func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node {
- var res []*Node
+func (s NodeSet) Sorted(less func(Node, Node) bool) []Node {
+ var res []Node
for n := range s {
res = append(res, n)
}
sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
return res
}
+
+func Nod(op Op, nleft, nright Node) Node {
+ return NodAt(base.Pos, op, nleft, nright)
+}
+
+func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
+ var n Node
+ switch op {
+ case ODCLFUNC:
+ var x struct {
+ n node
+ f Func
+ }
+ n = &x.n
+ n.SetFunc(&x.f)
+ n.Func().Decl = n
+ case ONAME:
+ base.Fatalf("use newname instead")
+ case OLABEL, OPACK:
+ var x struct {
+ n node
+ m Name
+ }
+ n = &x.n
+ n.SetName(&x.m)
+ default:
+ n = new(node)
+ }
+ n.SetOp(op)
+ n.SetLeft(nleft)
+ n.SetRight(nright)
+ n.SetPos(pos)
+ n.SetOffset(types.BADWIDTH)
+ n.SetOrig(n)
+ return n
+}
+
+// newnamel returns a new ONAME Node associated with symbol s at position pos.
+// The caller is responsible for setting n.Name.Curfn.
+func NewNameAt(pos src.XPos, s *types.Sym) Node {
+ if s == nil {
+ base.Fatalf("newnamel nil")
+ }
+
+ var x struct {
+ n node
+ m Name
+ p Param
+ }
+ n := &x.n
+ n.SetName(&x.m)
+ n.Name().Param = &x.p
+
+ n.SetOp(ONAME)
+ n.SetPos(pos)
+ n.SetOrig(n)
+
+ n.SetSym(s)
+ return n
+}
+
+// The Class of a variable/function describes the "storage class"
+// of a variable or function. During parsing, storage classes are
+// called declaration contexts.
+type Class uint8
+
+//go:generate stringer -type=Class
+const (
+ Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
+ PEXTERN // global variables
+ PAUTO // local variables
+ PAUTOHEAP // local variables or parameters moved to heap
+ PPARAM // input arguments
+ PPARAMOUT // output results
+ PFUNC // global functions
+
+ // Careful: Class is stored in three bits in Node.flags.
+ _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
+)
+
+type PragmaFlag int16
+
+const (
+ // Func pragmas.
+ Nointerface PragmaFlag = 1 << iota
+ Noescape // func parameters don't escape
+ Norace // func must not have race detector annotations
+ Nosplit // func should not execute on separate stack
+ Noinline // func should not be inlined
+ NoCheckPtr // func should not be instrumented by checkptr
+ CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
+ UintptrEscapes // pointers converted to uintptr escape
+
+ // Runtime-only func pragmas.
+ // See ../../../../runtime/README.md for detailed descriptions.
+ Systemstack // func must run on system stack
+ Nowritebarrier // emit compiler error instead of write barrier
+ Nowritebarrierrec // error on write barrier in this or recursive callees
+ Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
+
+ // Runtime and cgo type pragmas
+ NotInHeap // values of this type must not be heap allocated
+
+ // Go command pragmas
+ GoBuildPragma
+)
+
+type SymAndPos struct {
+ Sym *obj.LSym // LSym of callee
+ Pos src.XPos // line of call
+}
+
+func AsNode(n types.IRNode) Node {
+ if n == nil {
+ return nil
+ }
+ return n.(Node)
+}
+
+var BlankNode Node
+
+// origSym returns the original symbol written by the user.
+func OrigSym(s *types.Sym) *types.Sym {
+ if s == nil {
+ return nil
+ }
+
+ if len(s.Name) > 1 && s.Name[0] == '~' {
+ switch s.Name[1] {
+ case 'r': // originally an unnamed result
+ return nil
+ case 'b': // originally the blank identifier _
+ // TODO(mdempsky): Does s.Pkg matter here?
+ return BlankNode.Sym()
+ }
+ return s
+ }
+
+ if strings.HasPrefix(s.Name, ".anon") {
+ // originally an unnamed or _ name (see subr.go: structargs)
+ return nil
+ }
+
+ return s
+}
+
+// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
+// n must be a slice expression. max is nil if n is a simple slice expression.
+func (n *node) SliceBounds() (low, high, max Node) {
+ if n.List().Len() == 0 {
+ return nil, nil, nil
+ }
+
+ switch n.Op() {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ s := n.List().Slice()
+ return s[0], s[1], nil
+ case OSLICE3, OSLICE3ARR:
+ s := n.List().Slice()
+ return s[0], s[1], s[2]
+ }
+ base.Fatalf("SliceBounds op %v: %v", n.Op(), n)
+ return nil, nil, nil
+}
+
+// SetSliceBounds sets n's slice bounds, where n is a slice expression.
+// n must be a slice expression. If max is non-nil, n must be a full slice expression.
+func (n *node) SetSliceBounds(low, high, max Node) {
+ switch n.Op() {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ if max != nil {
+ base.Fatalf("SetSliceBounds %v given three bounds", n.Op())
+ }
+ s := n.List().Slice()
+ if s == nil {
+ if low == nil && high == nil {
+ return
+ }
+ n.PtrList().Set2(low, high)
+ return
+ }
+ s[0] = low
+ s[1] = high
+ return
+ case OSLICE3, OSLICE3ARR:
+ s := n.List().Slice()
+ if s == nil {
+ if low == nil && high == nil && max == nil {
+ return
+ }
+ n.PtrList().Set3(low, high, max)
+ return
+ }
+ s[0] = low
+ s[1] = high
+ s[2] = max
+ return
+ }
+ base.Fatalf("SetSliceBounds op %v: %v", n.Op(), n)
+}
+
+// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
+// o must be a slicing op.
+func (o Op) IsSlice3() bool {
+ switch o {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ return false
+ case OSLICE3, OSLICE3ARR:
+ return true
+ }
+ base.Fatalf("IsSlice3 op %v", o)
+ return false
+}
+
+func IsConst(n Node, ct constant.Kind) bool {
+ return ConstType(n) == ct
+}
+
+// Int64Val returns n as an int64.
+// n must be an integer or rune constant.
+func (n *node) Int64Val() int64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ x, ok := constant.Int64Val(n.Val())
+ if !ok {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ return x
+}
+
+// CanInt64 reports whether it is safe to call Int64Val() on n.
+func (n *node) CanInt64() bool {
+ if !IsConst(n, constant.Int) {
+ return false
+ }
+
+ // if the value inside n cannot be represented as an int64, the
+ // return value of Int64 is undefined
+ _, ok := constant.Int64Val(n.Val())
+ return ok
+}
+
+// Uint64Val returns n as an uint64.
+// n must be an integer or rune constant.
+func (n *node) Uint64Val() uint64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ x, ok := constant.Uint64Val(n.Val())
+ if !ok {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ return x
+}
+
+// BoolVal returns n as a bool.
+// n must be a boolean constant.
+func (n *node) BoolVal() bool {
+ if !IsConst(n, constant.Bool) {
+ base.Fatalf("BoolVal(%v)", n)
+ }
+ return constant.BoolVal(n.Val())
+}
+
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func (n *node) StringVal() string {
+ if !IsConst(n, constant.String) {
+ base.Fatalf("StringVal(%v)", n)
+ }
+ return constant.StringVal(n.Val())
+}
+
+// rawcopy returns a shallow copy of n.
+// Note: copy or sepcopy (rather than rawcopy) is usually the
+// correct choice (see comment with Node.copy, below).
+func (n *node) RawCopy() Node {
+ copy := *n
+ return &copy
+}
+
+// sepcopy returns a separate shallow copy of n, with the copy's
+// Orig pointing to itself.
+func SepCopy(n Node) Node {
+ n = n.RawCopy()
+ n.SetOrig(n)
+ return n
+}
+
+// copy returns shallow copy of n and adjusts the copy's Orig if
+// necessary: In general, if n.Orig points to itself, the copy's
+// Orig should point to itself as well. Otherwise, if n is modified,
+// the copy's Orig node appears modified, too, and then doesn't
+// represent the original node anymore.
+// (This caused the wrong complit Op to be used when printing error
+// messages; see issues #26855, #27765).
+func Copy(n Node) Node {
+ copy := n.RawCopy()
+ if n.Orig() == n {
+ copy.SetOrig(copy)
+ }
+ return copy
+}
+
+// isNil reports whether n represents the universal untyped zero value "nil".
+func IsNil(n Node) bool {
+ // Check n.Orig because constant propagation may produce typed nil constants,
+ // which don't exist in the Go spec.
+ return n.Orig().Op() == ONIL
+}
+
+func IsBlank(n Node) bool {
+ if n == nil {
+ return false
+ }
+ return n.Sym().IsBlank()
+}
+
+// IsMethod reports whether n is a method.
+// n must be a function or a method.
+func IsMethod(n Node) bool {
+ return n.Type().Recv() != nil
+}
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
new file mode 100644
index 0000000000..d0d3778357
--- /dev/null
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -0,0 +1,177 @@
+// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
+
+package ir
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[OXXX-0]
+ _ = x[ONAME-1]
+ _ = x[ONONAME-2]
+ _ = x[OTYPE-3]
+ _ = x[OPACK-4]
+ _ = x[OLITERAL-5]
+ _ = x[ONIL-6]
+ _ = x[OADD-7]
+ _ = x[OSUB-8]
+ _ = x[OOR-9]
+ _ = x[OXOR-10]
+ _ = x[OADDSTR-11]
+ _ = x[OADDR-12]
+ _ = x[OANDAND-13]
+ _ = x[OAPPEND-14]
+ _ = x[OBYTES2STR-15]
+ _ = x[OBYTES2STRTMP-16]
+ _ = x[ORUNES2STR-17]
+ _ = x[OSTR2BYTES-18]
+ _ = x[OSTR2BYTESTMP-19]
+ _ = x[OSTR2RUNES-20]
+ _ = x[OAS-21]
+ _ = x[OAS2-22]
+ _ = x[OAS2DOTTYPE-23]
+ _ = x[OAS2FUNC-24]
+ _ = x[OAS2MAPR-25]
+ _ = x[OAS2RECV-26]
+ _ = x[OASOP-27]
+ _ = x[OCALL-28]
+ _ = x[OCALLFUNC-29]
+ _ = x[OCALLMETH-30]
+ _ = x[OCALLINTER-31]
+ _ = x[OCALLPART-32]
+ _ = x[OCAP-33]
+ _ = x[OCLOSE-34]
+ _ = x[OCLOSURE-35]
+ _ = x[OCOMPLIT-36]
+ _ = x[OMAPLIT-37]
+ _ = x[OSTRUCTLIT-38]
+ _ = x[OARRAYLIT-39]
+ _ = x[OSLICELIT-40]
+ _ = x[OPTRLIT-41]
+ _ = x[OCONV-42]
+ _ = x[OCONVIFACE-43]
+ _ = x[OCONVNOP-44]
+ _ = x[OCOPY-45]
+ _ = x[ODCL-46]
+ _ = x[ODCLFUNC-47]
+ _ = x[ODCLFIELD-48]
+ _ = x[ODCLCONST-49]
+ _ = x[ODCLTYPE-50]
+ _ = x[ODELETE-51]
+ _ = x[ODOT-52]
+ _ = x[ODOTPTR-53]
+ _ = x[ODOTMETH-54]
+ _ = x[ODOTINTER-55]
+ _ = x[OXDOT-56]
+ _ = x[ODOTTYPE-57]
+ _ = x[ODOTTYPE2-58]
+ _ = x[OEQ-59]
+ _ = x[ONE-60]
+ _ = x[OLT-61]
+ _ = x[OLE-62]
+ _ = x[OGE-63]
+ _ = x[OGT-64]
+ _ = x[ODEREF-65]
+ _ = x[OINDEX-66]
+ _ = x[OINDEXMAP-67]
+ _ = x[OKEY-68]
+ _ = x[OSTRUCTKEY-69]
+ _ = x[OLEN-70]
+ _ = x[OMAKE-71]
+ _ = x[OMAKECHAN-72]
+ _ = x[OMAKEMAP-73]
+ _ = x[OMAKESLICE-74]
+ _ = x[OMAKESLICECOPY-75]
+ _ = x[OMUL-76]
+ _ = x[ODIV-77]
+ _ = x[OMOD-78]
+ _ = x[OLSH-79]
+ _ = x[ORSH-80]
+ _ = x[OAND-81]
+ _ = x[OANDNOT-82]
+ _ = x[ONEW-83]
+ _ = x[ONEWOBJ-84]
+ _ = x[ONOT-85]
+ _ = x[OBITNOT-86]
+ _ = x[OPLUS-87]
+ _ = x[ONEG-88]
+ _ = x[OOROR-89]
+ _ = x[OPANIC-90]
+ _ = x[OPRINT-91]
+ _ = x[OPRINTN-92]
+ _ = x[OPAREN-93]
+ _ = x[OSEND-94]
+ _ = x[OSLICE-95]
+ _ = x[OSLICEARR-96]
+ _ = x[OSLICESTR-97]
+ _ = x[OSLICE3-98]
+ _ = x[OSLICE3ARR-99]
+ _ = x[OSLICEHEADER-100]
+ _ = x[ORECOVER-101]
+ _ = x[ORECV-102]
+ _ = x[ORUNESTR-103]
+ _ = x[OSELRECV-104]
+ _ = x[OSELRECV2-105]
+ _ = x[OIOTA-106]
+ _ = x[OREAL-107]
+ _ = x[OIMAG-108]
+ _ = x[OCOMPLEX-109]
+ _ = x[OALIGNOF-110]
+ _ = x[OOFFSETOF-111]
+ _ = x[OSIZEOF-112]
+ _ = x[OMETHEXPR-113]
+ _ = x[OBLOCK-114]
+ _ = x[OBREAK-115]
+ _ = x[OCASE-116]
+ _ = x[OCONTINUE-117]
+ _ = x[ODEFER-118]
+ _ = x[OEMPTY-119]
+ _ = x[OFALL-120]
+ _ = x[OFOR-121]
+ _ = x[OFORUNTIL-122]
+ _ = x[OGOTO-123]
+ _ = x[OIF-124]
+ _ = x[OLABEL-125]
+ _ = x[OGO-126]
+ _ = x[ORANGE-127]
+ _ = x[ORETURN-128]
+ _ = x[OSELECT-129]
+ _ = x[OSWITCH-130]
+ _ = x[OTYPESW-131]
+ _ = x[OTCHAN-132]
+ _ = x[OTMAP-133]
+ _ = x[OTSTRUCT-134]
+ _ = x[OTINTER-135]
+ _ = x[OTFUNC-136]
+ _ = x[OTARRAY-137]
+ _ = x[ODDD-138]
+ _ = x[OINLCALL-139]
+ _ = x[OEFACE-140]
+ _ = x[OITAB-141]
+ _ = x[OIDATA-142]
+ _ = x[OSPTR-143]
+ _ = x[OCLOSUREVAR-144]
+ _ = x[OCFUNC-145]
+ _ = x[OCHECKNIL-146]
+ _ = x[OVARDEF-147]
+ _ = x[OVARKILL-148]
+ _ = x[OVARLIVE-149]
+ _ = x[ORESULT-150]
+ _ = x[OINLMARK-151]
+ _ = x[ORETJMP-152]
+ _ = x[OGETG-153]
+ _ = x[OEND-154]
+}
+
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
+
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 310, 317, 323, 326, 332, 339, 347, 351, 358, 366, 368, 370, 372, 374, 376, 378, 383, 388, 396, 399, 408, 411, 415, 423, 430, 439, 452, 455, 458, 461, 464, 467, 470, 476, 479, 485, 488, 494, 498, 501, 505, 510, 515, 521, 526, 530, 535, 543, 551, 557, 566, 577, 584, 588, 595, 602, 610, 614, 618, 622, 629, 636, 644, 650, 658, 663, 668, 672, 680, 685, 690, 694, 697, 705, 709, 711, 716, 718, 723, 729, 735, 741, 747, 752, 756, 763, 769, 774, 780, 783, 790, 795, 799, 804, 808, 818, 823, 831, 837, 844, 851, 857, 864, 870, 874, 877}
+
+func (i Op) String() string {
+ if i >= Op(len(_Op_index)-1) {
+ return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Op_name[_Op_index[i]:_Op_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index ce4a216c2e..0a9542fa44 100644
--- a/src/cmd/compile/internal/gc/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package ir
import (
"reflect"
@@ -20,10 +20,10 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 124, 224},
- {Name{}, 32, 56},
- {Param{}, 24, 48},
- {Node{}, 76, 128},
+ {Func{}, 152, 280},
+ {Name{}, 44, 80},
+ {Param{}, 44, 88},
+ {node{}, 88, 152},
}
for _, tt := range tests {
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
new file mode 100644
index 0000000000..9035e90084
--- /dev/null
+++ b/src/cmd/compile/internal/ir/val.go
@@ -0,0 +1,120 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "go/constant"
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+)
+
+func ConstType(n Node) constant.Kind {
+ if n == nil || n.Op() != OLITERAL {
+ return constant.Unknown
+ }
+ return n.Val().Kind()
+}
+
+// ValueInterface returns the constant value stored in n as an interface{}.
+// It returns int64s for ints and runes, float64s for floats,
+// and complex128s for complex values.
+func ConstValue(n Node) interface{} {
+ switch v := n.Val(); v.Kind() {
+ default:
+ base.Fatalf("unexpected constant: %v", v)
+ panic("unreachable")
+ case constant.Bool:
+ return constant.BoolVal(v)
+ case constant.String:
+ return constant.StringVal(v)
+ case constant.Int:
+ return Int64Val(n.Type(), v)
+ case constant.Float:
+ return Float64Val(v)
+ case constant.Complex:
+ return complex(Float64Val(constant.Real(v)), Float64Val(constant.Imag(v)))
+ }
+}
+
+// int64Val returns v converted to int64.
+// Note: if t is uint64, very large values will be converted to negative int64.
+func Int64Val(t *types.Type, v constant.Value) int64 {
+ if t.IsUnsigned() {
+ if x, ok := constant.Uint64Val(v); ok {
+ return int64(x)
+ }
+ } else {
+ if x, ok := constant.Int64Val(v); ok {
+ return x
+ }
+ }
+ base.Fatalf("%v out of range for %v", v, t)
+ panic("unreachable")
+}
+
+func Float64Val(v constant.Value) float64 {
+ if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) {
+ return x + 0 // avoid -0 (should not be needed, but be conservative)
+ }
+ base.Fatalf("bad float64 value: %v", v)
+ panic("unreachable")
+}
+
+func AssertValidTypeForConst(t *types.Type, v constant.Value) {
+ if !ValidTypeForConst(t, v) {
+ base.Fatalf("%v does not represent %v", t, v)
+ }
+}
+
+func ValidTypeForConst(t *types.Type, v constant.Value) bool {
+ switch v.Kind() {
+ case constant.Unknown:
+ return OKForConst[t.Etype]
+ case constant.Bool:
+ return t.IsBoolean()
+ case constant.String:
+ return t.IsString()
+ case constant.Int:
+ return t.IsInteger()
+ case constant.Float:
+ return t.IsFloat()
+ case constant.Complex:
+ return t.IsComplex()
+ }
+
+ base.Fatalf("unexpected constant kind: %v", v)
+ panic("unreachable")
+}
+
+// nodlit returns a new untyped constant with value v.
+func NewLiteral(v constant.Value) Node {
+ n := Nod(OLITERAL, nil, nil)
+ if k := v.Kind(); k != constant.Unknown {
+ n.SetType(idealType(k))
+ n.SetVal(v)
+ }
+ return n
+}
+
+func idealType(ct constant.Kind) *types.Type {
+ switch ct {
+ case constant.String:
+ return types.UntypedString
+ case constant.Bool:
+ return types.UntypedBool
+ case constant.Int:
+ return types.UntypedInt
+ case constant.Float:
+ return types.UntypedFloat
+ case constant.Complex:
+ return types.UntypedComplex
+ }
+ base.Fatalf("unexpected Ctype: %v", ct)
+ return nil
+}
+
+var OKForConst [types.NTYPE]bool
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
index 5e867721c3..2356267df7 100644
--- a/src/cmd/compile/internal/mips/ggen.go
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -5,6 +5,7 @@
package mips
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/mips"
@@ -18,7 +19,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
@@ -28,7 +29,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
- p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
index 9d11c6bf53..bd71b2fcd8 100644
--- a/src/cmd/compile/internal/mips/ssa.go
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -7,7 +7,9 @@ package mips
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@@ -287,7 +289,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@@ -766,8 +768,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpMIPSFPFlagTrue,
ssa.OpMIPSFPFlagFalse:
@@ -796,7 +798,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index 2727c4d8a8..bcadebde4e 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -7,7 +7,9 @@ package mips64
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@@ -261,7 +263,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@@ -724,8 +726,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpMIPS64FPFlagTrue,
ssa.OpMIPS64FPFlagFalse:
@@ -757,7 +759,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index a5a772b491..8f5caf5f99 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -5,6 +5,7 @@
package ppc64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
@@ -16,17 +17,17 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+ p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
- p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
- p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
@@ -66,7 +67,7 @@ func ginsnopdefer(pp *gc.Progs) *obj.Prog {
// on ppc64 in both shared and non-shared modes.
ginsnop(pp)
- if gc.Ctxt.Flag_shared {
+ if base.Ctxt.Flag_shared {
p := pp.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index 3e20c44a4c..32e9be8417 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -5,7 +5,9 @@
package ppc64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@@ -473,7 +475,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -750,7 +752,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
}
- case *obj.LSym, *gc.Node:
+ case *obj.LSym, ir.Node:
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
@@ -1784,7 +1786,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Insert a hint this is not a subroutine return.
pp.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 1})
- if gc.Ctxt.Flag_shared {
+ if base.Ctxt.Flag_shared {
// When compiling Go into PIC, the function we just
// called via pointer might have been implemented in
// a separate module and so overwritten the TOC
@@ -1852,8 +1854,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
// These should be resolved by rules and not make it here.
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
index f7c03fe7c2..18905a4aea 100644
--- a/src/cmd/compile/internal/riscv64/ggen.go
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -5,6 +5,7 @@
package riscv64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
@@ -16,7 +17,7 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
// Adjust the frame to account for LR.
- off += gc.Ctxt.FixedFrameSize()
+ off += base.Ctxt.FixedFrameSize()
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 0beb5b4bd1..c81b6897a6 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -5,7 +5,9 @@
package riscv64
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
@@ -91,7 +93,7 @@ func loadByType(t *types.Type) obj.As {
case 8:
return riscv.AMOVD
default:
- gc.Fatalf("unknown float width for load %d in type %v", width, t)
+ base.Fatalf("unknown float width for load %d in type %v", width, t)
return 0
}
}
@@ -118,7 +120,7 @@ func loadByType(t *types.Type) obj.As {
case 8:
return riscv.AMOV
default:
- gc.Fatalf("unknown width for load %d in type %v", width, t)
+ base.Fatalf("unknown width for load %d in type %v", width, t)
return 0
}
}
@@ -134,7 +136,7 @@ func storeByType(t *types.Type) obj.As {
case 8:
return riscv.AMOVD
default:
- gc.Fatalf("unknown float width for store %d in type %v", width, t)
+ base.Fatalf("unknown float width for store %d in type %v", width, t)
return 0
}
}
@@ -149,7 +151,7 @@ func storeByType(t *types.Type) obj.As {
case 8:
return riscv.AMOV
default:
- gc.Fatalf("unknown width for store %d in type %v", width, t)
+ base.Fatalf("unknown width for store %d in type %v", width, t)
return 0
}
}
@@ -322,7 +324,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
@@ -586,8 +588,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = riscv.REG_ZERO
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpRISCV64LoweredGetClosurePtr:
@@ -598,7 +600,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(riscv.AMOV)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
index 5a837d8574..0e2f48bf4c 100644
--- a/src/cmd/compile/internal/s390x/ggen.go
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -5,6 +5,7 @@
package s390x
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
@@ -23,7 +24,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
}
// Adjust the frame to account for LR.
- off += gc.Ctxt.FixedFrameSize()
+ off += base.Ctxt.FixedFrameSize()
reg := int16(s390x.REGSP)
// If the off cannot fit in a 12-bit unsigned displacement then we
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index 8037357131..366adffd98 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -7,6 +7,7 @@ package s390x
import (
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
@@ -573,7 +574,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -642,8 +643,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpS390XMVC:
vo := v.AuxValAndOff()
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 0fe0337ddf..eeabd81d03 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
@@ -138,7 +139,7 @@ type Frontend interface {
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
- Auto(src.XPos, *types.Type) GCNode
+ Auto(src.XPos, *types.Type) ir.Node
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
@@ -178,24 +179,6 @@ type Frontend interface {
MyImportPath() string
}
-// interface used to hold a *gc.Node (a stack variable).
-// We'd use *gc.Node directly but that would lead to an import cycle.
-type GCNode interface {
- Typ() *types.Type
- String() string
- IsSynthetic() bool
- IsAutoTmp() bool
- StorageClass() StorageClass
-}
-
-type StorageClass uint8
-
-const (
- ClassAuto StorageClass = iota // local stack variable
- ClassParam // argument
- ClassParamOut // return value
-)
-
const go116lateCallExpansion = true
// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 0664013b39..f3ef33d670 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
)
@@ -136,9 +137,9 @@ func dse(f *Func) {
// reaches stores then we delete all the stores. The other operations will then
// be eliminated by the dead code elimination pass.
func elimDeadAutosGeneric(f *Func) {
- addr := make(map[*Value]GCNode) // values that the address of the auto reaches
- elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is
- used := make(map[GCNode]bool) // used autos that must be kept
+ addr := make(map[*Value]ir.Node) // values that the address of the auto reaches
+ elim := make(map[*Value]ir.Node) // values that could be eliminated if the auto is
+ used := make(map[ir.Node]bool) // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
@@ -146,8 +147,8 @@ func elimDeadAutosGeneric(f *Func) {
switch v.Op {
case OpAddr, OpLocalAddr:
// Propagate the address if it points to an auto.
- n, ok := v.Aux.(GCNode)
- if !ok || n.StorageClass() != ClassAuto {
+ n, ok := v.Aux.(ir.Node)
+ if !ok || n.Class() != ir.PAUTO {
return
}
if addr[v] == nil {
@@ -157,8 +158,8 @@ func elimDeadAutosGeneric(f *Func) {
return
case OpVarDef, OpVarKill:
// v should be eliminated if we eliminate the auto.
- n, ok := v.Aux.(GCNode)
- if !ok || n.StorageClass() != ClassAuto {
+ n, ok := v.Aux.(ir.Node)
+ if !ok || n.Class() != ir.PAUTO {
return
}
if elim[v] == nil {
@@ -173,8 +174,8 @@ func elimDeadAutosGeneric(f *Func) {
// for open-coded defers from being removed (since they
// may not be used by the inline code, but will be used by
// panic processing).
- n, ok := v.Aux.(GCNode)
- if !ok || n.StorageClass() != ClassAuto {
+ n, ok := v.Aux.(ir.Node)
+ if !ok || n.Class() != ir.PAUTO {
return
}
if !used[n] {
@@ -221,7 +222,7 @@ func elimDeadAutosGeneric(f *Func) {
}
// Propagate any auto addresses through v.
- node := GCNode(nil)
+ var node ir.Node
for _, a := range args {
if n, ok := addr[a]; ok && !used[n] {
if node == nil {
@@ -298,15 +299,15 @@ func elimUnreadAutos(f *Func) {
// Loop over all ops that affect autos taking note of which
// autos we need and also stores that we might be able to
// eliminate.
- seen := make(map[GCNode]bool)
+ seen := make(map[ir.Node]bool)
var stores []*Value
for _, b := range f.Blocks {
for _, v := range b.Values {
- n, ok := v.Aux.(GCNode)
+ n, ok := v.Aux.(ir.Node)
if !ok {
continue
}
- if n.StorageClass() != ClassAuto {
+ if n.Class() != ir.PAUTO {
continue
}
@@ -334,7 +335,7 @@ func elimUnreadAutos(f *Func) {
// Eliminate stores to unread autos.
for _, store := range stores {
- n, _ := store.Aux.(GCNode)
+ n, _ := store.Aux.(ir.Node)
if seen[n] {
continue
}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index 6353f72897..0d660361b1 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"encoding/hex"
@@ -24,7 +25,7 @@ type FuncDebug struct {
// Slots is all the slots used in the debug info, indexed by their SlotID.
Slots []LocalSlot
// The user variables, indexed by VarID.
- Vars []GCNode
+ Vars []ir.Node
// The slots that make up each variable, indexed by VarID.
VarSlots [][]SlotID
// The location list data, indexed by VarID. Must be processed by PutLocationList.
@@ -165,7 +166,7 @@ func (s *debugState) logf(msg string, args ...interface{}) {
type debugState struct {
// See FuncDebug.
slots []LocalSlot
- vars []GCNode
+ vars []ir.Node
varSlots [][]SlotID
lists [][]byte
@@ -189,7 +190,7 @@ type debugState struct {
// The pending location list entry for each user variable, indexed by VarID.
pendingEntries []pendingEntry
- varParts map[GCNode][]SlotID
+ varParts map[ir.Node][]SlotID
blockDebug []BlockDebug
pendingSlotLocs []VarLoc
liveSlots []liveSlot
@@ -346,7 +347,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
}
if state.varParts == nil {
- state.varParts = make(map[GCNode][]SlotID)
+ state.varParts = make(map[ir.Node][]SlotID)
} else {
for n := range state.varParts {
delete(state.varParts, n)
@@ -360,7 +361,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
state.vars = state.vars[:0]
for i, slot := range f.Names {
state.slots = append(state.slots, slot)
- if slot.N.IsSynthetic() {
+ if ir.IsSynthetic(slot.N) {
continue
}
@@ -379,8 +380,8 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op == OpVarDef || v.Op == OpVarKill {
- n := v.Aux.(GCNode)
- if n.IsSynthetic() {
+ n := v.Aux.(ir.Node)
+ if ir.IsSynthetic(n) {
continue
}
@@ -425,7 +426,7 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
state.initializeCache(f, len(state.varParts), len(state.slots))
for i, slot := range f.Names {
- if slot.N.IsSynthetic() {
+ if ir.IsSynthetic(slot.N) {
continue
}
for _, value := range f.NamedValues[slot] {
@@ -717,8 +718,8 @@ func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register)
switch {
case v.Op == OpVarDef, v.Op == OpVarKill:
- n := v.Aux.(GCNode)
- if n.IsSynthetic() {
+ n := v.Aux.(ir.Node)
+ if ir.IsSynthetic(n) {
break
}
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 3681af6599..180afab33b 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -247,7 +247,7 @@ func expandCalls(f *Func) {
// i.e., the struct select is generated and remains in because it is not applied to an actual structure.
// The OpLoad was created to load the single field of the IData
// This case removes that StructSelect.
- if leafType != selector.Type {
+ if leafType != selector.Type && !selector.Type.IsEmptyInterface() { // empty interface for #42727
f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
}
leaf.copyOf(selector)
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index b4c3e5cfdf..df83383308 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
@@ -36,10 +37,10 @@ func testConfigArch(tb testing.TB, arch string) *Conf {
tb.Fatalf("unknown arch %s", arch)
}
if ctxt.Arch.PtrSize != 8 {
- tb.Fatal("dummyTypes is 64-bit only")
+ tb.Fatal("testTypes is 64-bit only")
}
c := &Conf{
- config: NewConfig(arch, dummyTypes, ctxt, true),
+ config: NewConfig(arch, testTypes, ctxt, true),
tb: tb,
}
return c
@@ -53,108 +54,85 @@ type Conf struct {
func (c *Conf) Frontend() Frontend {
if c.fe == nil {
- c.fe = DummyFrontend{t: c.tb, ctxt: c.config.ctxt}
+ c.fe = TestFrontend{t: c.tb, ctxt: c.config.ctxt}
}
return c.fe
}
-// DummyFrontend is a test-only frontend.
+// TestFrontend is a test-only frontend.
// It assumes 64 bit integers and pointers.
-type DummyFrontend struct {
+type TestFrontend struct {
t testing.TB
ctxt *obj.Link
}
-type DummyAuto struct {
- t *types.Type
- s string
-}
-
-func (d *DummyAuto) Typ() *types.Type {
- return d.t
-}
-
-func (d *DummyAuto) String() string {
- return d.s
-}
-
-func (d *DummyAuto) StorageClass() StorageClass {
- return ClassAuto
-}
-
-func (d *DummyAuto) IsSynthetic() bool {
- return false
-}
-
-func (d *DummyAuto) IsAutoTmp() bool {
- return true
-}
-
-func (DummyFrontend) StringData(s string) *obj.LSym {
+func (TestFrontend) StringData(s string) *obj.LSym {
return nil
}
-func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode {
- return &DummyAuto{t: t, s: "aDummyAuto"}
+func (TestFrontend) Auto(pos src.XPos, t *types.Type) ir.Node {
+ n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
+ n.SetClass(ir.PAUTO)
+ return n
}
-func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
- return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8}
+func (d TestFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
+ return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8}
}
-func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
- return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off + 8}
+func (d TestFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
+ return LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.BytePtr, Off: s.Off + 8}
}
-func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
return LocalSlot{N: s.N, Type: s.Type.Elem().PtrTo(), Off: s.Off},
- LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8},
- LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 16}
+ LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 8},
+ LocalSlot{N: s.N, Type: testTypes.Int, Off: s.Off + 16}
}
-func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.Size() == 16 {
- return LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off + 8}
+ return LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float64, Off: s.Off + 8}
}
- return LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off + 4}
+ return LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: testTypes.Float32, Off: s.Off + 4}
}
-func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
+func (d TestFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.IsSigned() {
- return LocalSlot{N: s.N, Type: dummyTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+ return LocalSlot{N: s.N, Type: testTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
}
- return LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+ return LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: testTypes.UInt32, Off: s.Off}
}
-func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
+func (d TestFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)}
}
-func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
+func (d TestFrontend) SplitArray(s LocalSlot) LocalSlot {
return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
}
-func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
+func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
return LocalSlot{N: parent.N, Type: t, Off: offset}
}
-func (DummyFrontend) Line(_ src.XPos) string {
+func (TestFrontend) Line(_ src.XPos) string {
return "unknown.go:0"
}
-func (DummyFrontend) AllocFrame(f *Func) {
+func (TestFrontend) AllocFrame(f *Func) {
}
-func (d DummyFrontend) Syslook(s string) *obj.LSym {
+func (d TestFrontend) Syslook(s string) *obj.LSym {
return d.ctxt.Lookup(s)
}
-func (DummyFrontend) UseWriteBarrier() bool {
+func (TestFrontend) UseWriteBarrier() bool {
return true // only writebarrier_test cares
}
-func (DummyFrontend) SetWBPos(pos src.XPos) {
+func (TestFrontend) SetWBPos(pos src.XPos) {
}
-func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Log() bool { return true }
+func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Log() bool { return true }
-func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
-func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Debug_checknil() bool { return false }
+func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
+func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Debug_checknil() bool { return false }
-func (d DummyFrontend) MyImportPath() string {
+func (d TestFrontend) MyImportPath() string {
return "my/import/path"
}
-var dummyTypes Types
+var testTypes Types
func init() {
// Initialize just enough of the universe and the types package to make our tests function.
@@ -198,12 +176,12 @@ func init() {
t.Align = uint8(typ.width)
types.Types[typ.et] = t
}
- dummyTypes.SetTypPtrs()
+ testTypes.SetTypPtrs()
}
-func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
+func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
-func (d DummyFrontend) CanSSA(t *types.Type) bool {
- // There are no un-SSAable types in dummy land.
+func (d TestFrontend) CanSSA(t *types.Type) bool {
+ // There are no un-SSAable types in test land.
return true
}
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index a333982389..3dc3a81703 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"fmt"
)
@@ -59,7 +60,7 @@ func (r *Register) GCNum() int16 {
// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
// parent = &{N: s, Type: string}
type LocalSlot struct {
- N GCNode // an ONAME *gc.Node representing a stack location.
+ N ir.Node // an ONAME *gc.Node representing a stack location.
Type *types.Type // type of slot
Off int64 // offset of slot in N
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index d1bad529e7..b36f6b97e1 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/internal/objabi"
"cmd/internal/src"
)
@@ -235,7 +236,7 @@ func nilcheckelim2(f *Func) {
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
- if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(ir.Node).Type().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
index f5a2b3a8c2..1e04b48ba4 100644
--- a/src/cmd/compile/internal/ssa/poset.go
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -136,13 +136,13 @@ type posetNode struct {
// Most internal data structures are pre-allocated and flat, so for instance adding a
// new relation does not cause any allocation. For performance reasons,
// each node has only up to two outgoing edges (like a binary tree), so intermediate
-// "dummy" nodes are required to represent more than two relations. For instance,
+// "extra" nodes are required to represent more than two relations. For instance,
// to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
// following DAG:
//
// A
// / \
-// I dummy
+// I extra
// / \
// J K
//
@@ -223,7 +223,7 @@ func (po *poset) addchild(i1, i2 uint32, strict bool) {
po.setchr(i1, e2)
po.upush(undoSetChr, i1, 0)
} else {
- // If n1 already has two children, add an intermediate dummy
+ // If n1 already has two children, add an intermediate extra
// node to record the relation correctly (without relating
// n2 to other existing nodes). Use a non-deterministic value
// to decide whether to append on the left or the right, to avoid
@@ -231,27 +231,27 @@ func (po *poset) addchild(i1, i2 uint32, strict bool) {
//
// n1
// / \
- // i1l dummy
+ // i1l extra
// / \
// i1r n2
//
- dummy := po.newnode(nil)
+ extra := po.newnode(nil)
if (i1^i2)&1 != 0 { // non-deterministic
- po.setchl(dummy, i1r)
- po.setchr(dummy, e2)
- po.setchr(i1, newedge(dummy, false))
+ po.setchl(extra, i1r)
+ po.setchr(extra, e2)
+ po.setchr(i1, newedge(extra, false))
po.upush(undoSetChr, i1, i1r)
} else {
- po.setchl(dummy, i1l)
- po.setchr(dummy, e2)
- po.setchl(i1, newedge(dummy, false))
+ po.setchl(extra, i1l)
+ po.setchr(extra, e2)
+ po.setchl(i1, newedge(extra, false))
po.upush(undoSetChl, i1, i1l)
}
}
}
// newnode allocates a new node bound to SSA value n.
-// If n is nil, this is a dummy node (= only used internally).
+// If n is nil, this is an extra node (= only used internally).
func (po *poset) newnode(n *Value) uint32 {
i := po.lastidx + 1
po.lastidx++
@@ -380,9 +380,9 @@ func (po *poset) newconst(n *Value) {
case higherptr != 0:
// Higher bound only. To record n < higher, we need
- // a dummy root:
+ // an extra root:
//
- // dummy
+ // extra
// / \
// root \
// / n
@@ -395,11 +395,11 @@ func (po *poset) newconst(n *Value) {
if r2 != po.roots[0] { // all constants should be in root #0
panic("constant not in root #0")
}
- dummy := po.newnode(nil)
- po.changeroot(r2, dummy)
- po.upush(undoChangeRoot, dummy, newedge(r2, false))
- po.addchild(dummy, r2, false)
- po.addchild(dummy, i, false)
+ extra := po.newnode(nil)
+ po.changeroot(r2, extra)
+ po.upush(undoChangeRoot, extra, newedge(r2, false))
+ po.addchild(extra, r2, false)
+ po.addchild(extra, i, false)
po.addchild(i, i2, true)
}
@@ -612,7 +612,7 @@ func (po *poset) findroot(i uint32) uint32 {
panic("findroot didn't find any root")
}
-// mergeroot merges two DAGs into one DAG by creating a new dummy root
+// mergeroot merges two DAGs into one DAG by creating a new extra root
func (po *poset) mergeroot(r1, r2 uint32) uint32 {
// Root #0 is special as it contains all constants. Since mergeroot
// discards r2 as root and keeps r1, make sure that r2 is not root #0,
@@ -1004,7 +1004,7 @@ func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
case !f1 && f2:
// n1 is not in any DAG but n2 is. If n2 is a root, we can put
// n1 in its place as a root; otherwise, we need to create a new
- // dummy root to record the relation.
+ // extra root to record the relation.
i1 = po.newnode(n1)
if po.isroot(i2) {
@@ -1020,17 +1020,17 @@ func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
// Re-parent as follows:
//
- // dummy
+ // extra
// r / \
// \ ===> r i1
// i2 \ /
// i2
//
- dummy := po.newnode(nil)
- po.changeroot(r, dummy)
- po.upush(undoChangeRoot, dummy, newedge(r, false))
- po.addchild(dummy, r, false)
- po.addchild(dummy, i1, false)
+ extra := po.newnode(nil)
+ po.changeroot(r, extra)
+ po.upush(undoChangeRoot, extra, newedge(r, false))
+ po.addchild(extra, r, false)
+ po.addchild(extra, i1, false)
po.addchild(i1, i2, strict)
case f1 && f2:
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 0339b073ae..459a9923f7 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -104,7 +104,7 @@
// If b3 is the primary predecessor of b2, then we use x3 in b2 and
// add a x4:CX->BX copy at the end of b4.
// But the definition of x3 doesn't dominate b2. We should really
-// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
+// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep
// SSA form. For now, we ignore this problem as remaining in strict
// SSA form isn't needed after regalloc. We'll just leave the use
// of x3 not dominated by the definition of x3, and the CX->BX copy
@@ -114,6 +114,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/objabi"
"cmd/internal/src"
@@ -1248,7 +1249,7 @@ func (s *regAllocState) regalloc(f *Func) {
// This forces later liveness analysis to make the
// value live at this point.
v.SetArg(0, s.makeSpill(a, b))
- } else if _, ok := a.Aux.(GCNode); ok && vi.rematerializeable {
+ } else if _, ok := a.Aux.(ir.Node); ok && vi.rematerializeable {
// Rematerializeable value with a gc.Node. This is the address of
// a stack object (e.g. an LEAQ). Keep the object live.
// Change it to VarLive, which is what plive expects for locals.
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 406a3c3ea5..5257d44cfe 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -7,6 +7,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
@@ -156,7 +157,7 @@ func (s *stackAllocState) stackalloc() {
if v.Aux == nil {
f.Fatalf("%s has nil Aux\n", v.LongString())
}
- loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt}
+ loc := LocalSlot{N: v.Aux.(ir.Node), Type: v.Type, Off: v.AuxInt}
if f.pass.debug > stackDebug {
fmt.Printf("stackalloc %s to %s\n", v, loc)
}
diff --git a/src/cmd/compile/internal/syntax/dumper_test.go b/src/cmd/compile/internal/syntax/dumper_test.go
index f84bd2d705..22680dce78 100644
--- a/src/cmd/compile/internal/syntax/dumper_test.go
+++ b/src/cmd/compile/internal/syntax/dumper_test.go
@@ -13,7 +13,7 @@ func TestDump(t *testing.T) {
t.Skip("skipping test in short mode")
}
- // provide a dummy error handler so parsing doesn't stop after first error
+ // provide a no-op error handler so parsing doesn't stop after first error
ast, err := ParseFile(*src_, func(error) {}, nil, CheckBranches)
if err != nil {
t.Error(err)
diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go
index e5b69628ec..306e695a33 100644
--- a/src/cmd/compile/internal/syntax/nodes.go
+++ b/src/cmd/compile/internal/syntax/nodes.go
@@ -116,7 +116,7 @@ func (*decl) aDecl() {}
// All declarations belonging to the same group point to the same Group node.
type Group struct {
- dummy int // not empty so we are guaranteed different Group instances
+ _ int // not empty so we are guaranteed different Group instances
}
// ----------------------------------------------------------------------------
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
index cae2c40f6c..9f1f7e18cb 100644
--- a/src/cmd/compile/internal/syntax/printer_test.go
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -18,7 +18,7 @@ func TestPrint(t *testing.T) {
t.Skip("skipping test in short mode")
}
- // provide a dummy error handler so parsing doesn't stop after first error
+ // provide a no-op error handler so parsing doesn't stop after first error
ast, err := ParseFile(*src_, func(error) {}, nil, 0)
if err != nil {
t.Error(err)
diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go
index 40d3d86ef1..33a02c543d 100644
--- a/src/cmd/compile/internal/types/scope.go
+++ b/src/cmd/compile/internal/types/scope.go
@@ -15,7 +15,7 @@ var Block int32 // current block number
// restored once the block scope ends.
type dsym struct {
sym *Sym // sym == nil indicates stack mark
- def *Node
+ def IRNode
block int32
lastlineno src.XPos // last declaration for diagnostic
}
@@ -79,16 +79,16 @@ func IsDclstackValid() bool {
}
// PkgDef returns the definition associated with s at package scope.
-func (s *Sym) PkgDef() *Node {
+func (s *Sym) PkgDef() IRNode {
return *s.pkgDefPtr()
}
// SetPkgDef sets the definition associated with s at package scope.
-func (s *Sym) SetPkgDef(n *Node) {
+func (s *Sym) SetPkgDef(n IRNode) {
*s.pkgDefPtr() = n
}
-func (s *Sym) pkgDefPtr() **Node {
+func (s *Sym) pkgDefPtr() *IRNode {
// Look for outermost saved declaration, which must be the
// package scope definition, if present.
for _, d := range dclstack {
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
index ea947d8f41..2821d9a3c7 100644
--- a/src/cmd/compile/internal/types/sizeof_test.go
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -20,11 +20,11 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Sym{}, 52, 88},
- {Type{}, 52, 88},
+ {Sym{}, 60, 104},
+ {Type{}, 56, 96},
{Map{}, 20, 40},
{Forward{}, 20, 32},
- {Func{}, 32, 56},
+ {Func{}, 28, 48},
{Struct{}, 16, 32},
{Interface{}, 8, 16},
{Chan{}, 8, 16},
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
index 07bce4d5cd..046104d0dc 100644
--- a/src/cmd/compile/internal/types/sym.go
+++ b/src/cmd/compile/internal/types/sym.go
@@ -33,12 +33,12 @@ type Sym struct {
Name string // object name
// saved and restored by dcopy
- Def *Node // definition: ONAME OTYPE OPACK or OLITERAL
+ Def IRNode // definition: ONAME OTYPE OPACK or OLITERAL
Block int32 // blocknumber to catch redeclaration
Lastlineno src.XPos // last declaration for diagnostic
flags bitset8
- Label *Node // corresponding label (ephemeral)
+ Label IRNode // corresponding label (ephemeral)
Origpkg *Pkg // original package for . import
}
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 023ab9af88..8499a36edc 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -10,10 +10,10 @@ import (
"fmt"
)
-// Dummy Node so we can refer to *Node without actually
-// having a gc.Node. Necessary to break import cycles.
-// TODO(gri) try to eliminate soon
-type Node struct{ _ int }
+// IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir,
+// which would cause an import cycle. The uses in other packages must type assert
+// values of type IRNode to ir.Node or a more specific type.
+type IRNode interface{ Type() *Type }
//go:generate stringer -type EType -trimprefix T
@@ -141,8 +141,8 @@ type Type struct {
methods Fields
allMethods Fields
- Nod *Node // canonical OTYPE node
- Orig *Type // original type (type literal or predefined type)
+ Nod IRNode // canonical OTYPE node
+ Orig *Type // original type (type literal or predefined type)
// Cache of composite types, with this type being the element type.
Cache struct {
@@ -247,8 +247,7 @@ type Func struct {
Results *Type // function results
Params *Type // function params
- Nname *Node
- pkg *Pkg
+ pkg *Pkg
// Argwid is the total width of the function receiver, params, and results.
// It gets calculated via a temporary TFUNCARGS type.
@@ -361,7 +360,7 @@ type Field struct {
// For fields that represent function parameters, Nname points
// to the associated ONAME Node.
- Nname *Node
+ Nname IRNode
// Offset in bytes of this field or method within its enclosing struct
// or interface Type.
@@ -583,10 +582,17 @@ func NewFuncArgs(f *Type) *Type {
return t
}
-func NewField() *Field {
- return &Field{
+func NewField(pos src.XPos, sym *Sym, typ *Type) *Field {
+ f := &Field{
+ Pos: pos,
+ Sym: sym,
+ Type: typ,
Offset: BADWIDTH,
}
+ if typ == nil {
+ f.SetBroke(true)
+ }
+ return f
}
// SubstAny walks t, replacing instances of "any" with successive
@@ -800,26 +806,6 @@ func (t *Type) FuncArgs() *Type {
return t.Extra.(FuncArgs).T
}
-// Nname returns the associated function's nname.
-func (t *Type) Nname() *Node {
- switch t.Etype {
- case TFUNC:
- return t.Extra.(*Func).Nname
- }
- Fatalf("Type.Nname %v %v", t.Etype, t)
- return nil
-}
-
-// Nname sets the associated function's nname.
-func (t *Type) SetNname(n *Node) {
- switch t.Etype {
- case TFUNC:
- t.Extra.(*Func).Nname = n
- default:
- Fatalf("Type.SetNname %v %v", t.Etype, t)
- }
-}
-
// IsFuncArgStruct reports whether t is a struct representing function parameters.
func (t *Type) IsFuncArgStruct() bool {
return t.Etype == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
@@ -1226,7 +1212,7 @@ func (t *Type) IsInteger() bool {
case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
return true
}
- return false
+ return t == UntypedInt || t == UntypedRune
}
func (t *Type) IsSigned() bool {
@@ -1237,12 +1223,20 @@ func (t *Type) IsSigned() bool {
return false
}
+func (t *Type) IsUnsigned() bool {
+ switch t.Etype {
+ case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR:
+ return true
+ }
+ return false
+}
+
func (t *Type) IsFloat() bool {
- return t.Etype == TFLOAT32 || t.Etype == TFLOAT64
+ return t.Etype == TFLOAT32 || t.Etype == TFLOAT64 || t == UntypedFloat
}
func (t *Type) IsComplex() bool {
- return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128
+ return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128 || t == UntypedComplex
}
// IsPtr reports whether t is a regular Go pointer type.
@@ -1279,7 +1273,7 @@ func (t *Type) IsPtrShaped() bool {
// HasNil reports whether the set of values determined by t includes nil.
func (t *Type) HasNil() bool {
switch t.Etype {
- case TCHAN, TFUNC, TINTER, TMAP, TPTR, TSLICE, TUNSAFEPTR:
+ case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR:
return true
}
return false
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
index a36fbca4e0..e7451381b4 100644
--- a/src/cmd/compile/internal/wasm/ssa.go
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -5,7 +5,9 @@
package wasm
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
@@ -33,7 +35,7 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
return p
}
if cnt%8 != 0 {
- gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
for i := int64(0); i < cnt; i += 8 {
@@ -165,8 +167,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpWasmLoweredWB:
@@ -235,7 +237,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
switch v.Aux.(type) {
case *obj.LSym:
gc.AddAux(&p.From, v)
- case *gc.Node:
+ case ir.Node:
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
default:
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
index e137daa3fc..7d628f9b7c 100644
--- a/src/cmd/compile/internal/x86/galign.go
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -5,6 +5,7 @@
package x86
import (
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
@@ -24,10 +25,10 @@ func Init(arch *gc.Arch) {
arch.SoftFloat = true
case "387":
fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n")
- gc.Exit(1)
+ base.Exit(1)
default:
fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
- gc.Exit(1)
+ base.Exit(1)
}
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index fbf76d0c5e..a3aaf03c95 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
@@ -480,9 +481,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Name = obj.NAME_EXTERN
f := math.Float64frombits(uint64(v.AuxInt))
if v.Op == ssa.Op386MOVSDconst1 {
- p.From.Sym = gc.Ctxt.Float64Sym(f)
+ p.From.Sym = base.Ctxt.Float64Sym(f)
} else {
- p.From.Sym = gc.Ctxt.Float32Sym(float32(f))
+ p.From.Sym = base.Ctxt.Float32Sym(float32(f))
}
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -713,7 +714,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
- if x86.CanUse1InsnTLS(gc.Ctxt) {
+ if x86.CanUse1InsnTLS(base.Ctxt) {
// MOVL (TLS), r
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_MEM
@@ -749,7 +750,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// caller's SP is the address of the first arg
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_ADDR
- p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
+ p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
p.From.Name = obj.NAME_PARAM
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -850,8 +851,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
- if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
- gc.Warnl(v.Pos, "generated nil check")
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go
index 3aa64a5ce2..5a33719d87 100644
--- a/src/cmd/compile/main.go
+++ b/src/cmd/compile/main.go
@@ -8,6 +8,7 @@ import (
"cmd/compile/internal/amd64"
"cmd/compile/internal/arm"
"cmd/compile/internal/arm64"
+ "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/mips"
"cmd/compile/internal/mips64"
@@ -50,5 +51,5 @@ func main() {
}
gc.Main(archInit)
- gc.Exit(0)
+ base.Exit(0)
}
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index f5dcd34cc1..5dfd5ee16e 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -38,10 +38,12 @@ var bootstrapDirs = []string{
"cmd/cgo",
"cmd/compile",
"cmd/compile/internal/amd64",
+ "cmd/compile/internal/base",
"cmd/compile/internal/arm",
"cmd/compile/internal/arm64",
"cmd/compile/internal/gc",
"cmd/compile/internal/importer",
+ "cmd/compile/internal/ir",
"cmd/compile/internal/logopt",
"cmd/compile/internal/mips",
"cmd/compile/internal/mips64",
@@ -74,6 +76,7 @@ var bootstrapDirs = []string{
"cmd/internal/sys",
"cmd/link",
"cmd/link/internal/amd64",
+ "cmd/compile/internal/base",
"cmd/link/internal/arm",
"cmd/link/internal/arm64",
"cmd/link/internal/benchmark",
@@ -106,6 +109,7 @@ var bootstrapDirs = []string{
"math/big",
"math/bits",
"sort",
+ "strconv",
}
// File prefixes that are ignored by go/build anyway, and cause
diff --git a/src/cmd/go/internal/base/flag.go b/src/cmd/go/internal/base/flag.go
index c97c744520..677f819682 100644
--- a/src/cmd/go/internal/base/flag.go
+++ b/src/cmd/go/internal/base/flag.go
@@ -8,6 +8,7 @@ import (
"flag"
"cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
"cmd/go/internal/str"
)
@@ -66,4 +67,5 @@ func AddModFlag(flags *flag.FlagSet) {
func AddModCommonFlags(flags *flag.FlagSet) {
flags.BoolVar(&cfg.ModCacheRW, "modcacherw", false, "")
flags.StringVar(&cfg.ModFile, "modfile", "", "")
+ flags.StringVar(&fsys.OverlayFile, "overlay", "", "")
}
diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go
index e3a0e44f82..0264786e5b 100644
--- a/src/cmd/go/internal/fsys/fsys.go
+++ b/src/cmd/go/internal/fsys/fsys.go
@@ -327,12 +327,22 @@ func OverlayPath(path string) (string, bool) {
// Open opens the file at or overlaid on the given path.
func Open(path string) (*os.File, error) {
+ return OpenFile(path, os.O_RDONLY, 0)
+}
+
+// OpenFile opens the file at or overlaid on the given path with the flag and perm.
+func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
cpath := canonicalize(path)
if node, ok := overlay[cpath]; ok {
+ // Opening a file in the overlay.
if node.isDir() {
- return nil, &fs.PathError{Op: "Open", Path: path, Err: errors.New("fsys.Open doesn't support opening directories yet")}
+ return nil, &fs.PathError{Op: "OpenFile", Path: path, Err: errors.New("fsys.OpenFile doesn't support opening directories yet")}
+ }
+ // We can't open overlaid paths for write.
+ if perm != os.FileMode(os.O_RDONLY) {
+ return nil, &fs.PathError{Op: "OpenFile", Path: path, Err: errors.New("overlaid files can't be opened for write")}
}
- return os.Open(node.actualFilePath)
+ return os.OpenFile(node.actualFilePath, flag, perm)
}
if parent, ok := parentIsOverlayFile(filepath.Dir(cpath)); ok {
// The file is deleted explicitly in the Replace map,
@@ -344,7 +354,7 @@ func Open(path string) (*os.File, error) {
Err: fmt.Errorf("file %s does not exist: parent directory %s is replaced by a file in overlay", path, parent),
}
}
- return os.Open(cpath)
+ return os.OpenFile(cpath, flag, perm)
}
// IsDirWithGoFiles reports whether dir is a directory containing Go files
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_filelock.go b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go
index 10e1240efd..efc66461ed 100644
--- a/src/cmd/go/internal/lockedfile/lockedfile_filelock.go
+++ b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go
@@ -10,6 +10,7 @@ import (
"io/fs"
"os"
+ "cmd/go/internal/fsys"
"cmd/go/internal/lockedfile/internal/filelock"
)
@@ -19,7 +20,7 @@ func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
// calls for Linux and Windows anyway, so it's simpler to use that approach
// consistently.
- f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm)
+ f, err := fsys.OpenFile(name, flag&^os.O_TRUNC, perm)
if err != nil {
return nil, err
}
diff --git a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
index 51681381d7..70d6eddf2d 100644
--- a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
+++ b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
@@ -12,6 +12,8 @@ import (
"os"
"strings"
"time"
+
+ "cmd/go/internal/fsys"
)
// Opening an exclusive-use file returns an error.
@@ -56,7 +58,7 @@ func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
// If the file was unpacked or created by some other program, it might not
// have the ModeExclusive bit set. Set it before we call OpenFile, so that we
// can be confident that a successful OpenFile implies exclusive use.
- if fi, err := os.Stat(name); err == nil {
+ if fi, err := fsys.Stat(name); err == nil {
if fi.Mode()&fs.ModeExclusive == 0 {
if err := os.Chmod(name, fi.Mode()|fs.ModeExclusive); err != nil {
return nil, err
@@ -69,7 +71,7 @@ func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
nextSleep := 1 * time.Millisecond
const maxSleep = 500 * time.Millisecond
for {
- f, err := os.OpenFile(name, flag, perm|fs.ModeExclusive)
+ f, err := fsys.OpenFile(name, flag, perm|fs.ModeExclusive)
if err == nil {
return f, nil
}
diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go
index 4e73960e80..38c473d36b 100644
--- a/src/cmd/go/internal/modcmd/vendor.go
+++ b/src/cmd/go/internal/modcmd/vendor.go
@@ -18,6 +18,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
"cmd/go/internal/imports"
"cmd/go/internal/modload"
@@ -259,7 +260,7 @@ func matchPotentialSourceFile(dir string, info fs.FileInfo) bool {
return false
}
if strings.HasSuffix(info.Name(), ".go") {
- f, err := os.Open(filepath.Join(dir, info.Name()))
+ f, err := fsys.Open(filepath.Join(dir, info.Name()))
if err != nil {
base.Fatalf("go mod vendor: %v", err)
}
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 13106de2f2..ecb0142524 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -45,7 +45,9 @@ import (
"cmd/go/internal/search"
"cmd/go/internal/work"
+ "golang.org/x/mod/modfile"
"golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
)
var CmdGet = &base.Command{
@@ -462,25 +464,19 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
// what's changing and gives more examples.
}
+ if !modload.HasModRoot() {
+ return
+ }
+
// Everything succeeded. Update go.mod.
+ oldReqs := reqsFromGoMod(modload.ModFile())
+
modload.AllowWriteGoMod()
modload.WriteGoMod()
modload.DisallowWriteGoMod()
- // Ensure .info files are cached for each module in the build list.
- // This ensures 'go list -m all' can succeed later if offline.
- // 'go get' only loads .info files for queried versions. 'go list -m' needs
- // them to add timestamps to the output.
- //
- // This is best effort since build commands don't need .info files to load
- // the build list.
- //
- // TODO(golang.org/issue/40775): ListModules resets modload.loader, which
- // contains information about direct dependencies that WriteGoMod uses.
- // Refactor to avoid these kinds of global side effects.
- if modload.HasModRoot() {
- modload.ListModules(ctx, []string{"all"}, false, false, false)
- }
+ newReqs := reqsFromGoMod(modload.ModFile())
+ r.reportChanges(oldReqs, newReqs)
}
// parseArgs parses command-line arguments and reports errors.
@@ -1571,70 +1567,85 @@ func (r *resolver) checkPackagesAndRetractions(ctx context.Context, pkgPatterns
})
}
<-r.work.Idle()
+ var retractPath string
for _, r := range retractions {
if r.err != nil {
fmt.Fprintf(os.Stderr, "go: warning: %v\n", r.err)
+ if retractPath == "" {
+ retractPath = r.m.Path
+ } else {
+ retractPath = "<module>"
+ }
}
}
+ if retractPath != "" {
+ fmt.Fprintf(os.Stderr, "go: run 'go get %s@latest' to switch to the latest unretracted version\n", retractPath)
+ }
}
-// reportChanges logs resolved version changes to os.Stderr.
-func (r *resolver) reportChanges(queries []*query) {
- for _, q := range queries {
- if q.version == "none" {
- continue
- }
-
- if q.pattern == "all" {
- // To reduce noise for "all", describe module version changes rather than
- // package versions.
- seen := make(map[module.Version]bool)
- for _, m := range q.resolved {
- if seen[m] {
- continue
- }
- seen[m] = true
-
- before := r.initialSelected(m.Path)
- if before == m.Version {
- continue // m was resolved, but not changed
- }
+// reportChanges logs version changes to os.Stderr.
+//
+// reportChanges only logs changes to modules named on the command line and to
+// explicitly required modules in go.mod. Most changes to indirect requirements
+// are not relevant to the user and are not logged.
+//
+// reportChanges should be called after WriteGoMod.
+func (r *resolver) reportChanges(oldReqs, newReqs []module.Version) {
+ type change struct {
+ path, old, new string
+ }
+ changes := make(map[string]change)
- was := ""
- if before != "" {
- was = fmt.Sprintf(" (was %s)", before)
- }
- fmt.Fprintf(os.Stderr, "go: %v added %s %s%s\n", q, m.Path, m.Version, was)
- }
- continue
+ // Collect changes in modules matched by command line arguments.
+ for path, reason := range r.resolvedVersion {
+ old := r.initialVersion[path]
+ new := reason.version
+ if old != new && (old != "" || new != "none") {
+ changes[path] = change{path, old, new}
}
+ }
- for _, m := range q.resolved {
- before := r.initialSelected(m.Path)
- if before == m.Version {
- continue // m was resolved, but not changed
- }
+ // Collect changes to explicit requirements in go.mod.
+ for _, req := range oldReqs {
+ path := req.Path
+ old := req.Version
+ new := r.buildListVersion[path]
+ if old != new {
+ changes[path] = change{path, old, new}
+ }
+ }
+ for _, req := range newReqs {
+ path := req.Path
+ old := r.initialVersion[path]
+ new := req.Version
+ if old != new {
+ changes[path] = change{path, old, new}
+ }
+ }
- was := ""
- if before != "" {
- was = fmt.Sprintf(" (was %s)", before)
- }
- switch {
- case q.isWildcard():
- if q.matchesPath(m.Path) {
- fmt.Fprintf(os.Stderr, "go: matched %v as %s %s%s\n", q, m.Path, m.Version, was)
- } else {
- fmt.Fprintf(os.Stderr, "go: matched %v in %s %s%s\n", q, m.Path, m.Version, was)
- }
- case q.matchesPackages:
- fmt.Fprintf(os.Stderr, "go: found %v in %s %s%s\n", q, m.Path, m.Version, was)
- default:
- fmt.Fprintf(os.Stderr, "go: found %v in %s %s%s\n", q, m.Path, m.Version, was)
- }
+ sortedChanges := make([]change, 0, len(changes))
+ for _, c := range changes {
+ sortedChanges = append(sortedChanges, c)
+ }
+ sort.Slice(sortedChanges, func(i, j int) bool {
+ return sortedChanges[i].path < sortedChanges[j].path
+ })
+ for _, c := range sortedChanges {
+ if c.old == "" {
+ fmt.Fprintf(os.Stderr, "go get: added %s %s\n", c.path, c.new)
+ } else if c.new == "none" || c.new == "" {
+ fmt.Fprintf(os.Stderr, "go get: removed %s %s\n", c.path, c.old)
+ } else if semver.Compare(c.new, c.old) > 0 {
+ fmt.Fprintf(os.Stderr, "go get: upgraded %s %s => %s\n", c.path, c.old, c.new)
+ } else {
+ fmt.Fprintf(os.Stderr, "go get: downgraded %s %s => %s\n", c.path, c.old, c.new)
}
}
- // TODO(#33284): Also print relevant upgrades.
+ // TODO(golang.org/issue/33284): attribute changes to command line arguments.
+ // For modules matched by command line arguments, this probably isn't
+ // necessary, but it would be useful for unmatched direct dependencies of
+ // the main module.
}
// resolve records that module m must be at its indicated version (which may be
@@ -1715,6 +1726,14 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi
return true
}
+func reqsFromGoMod(f *modfile.File) []module.Version {
+ reqs := make([]module.Version, len(f.Require))
+ for i, r := range f.Require {
+ reqs[i] = r.Mod
+ }
+ return reqs
+}
+
// isNoSuchModuleVersion reports whether err indicates that the requested module
// does not exist at the requested version, either because the module does not
// exist at all or because it does not include that specific version.
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index b9e344045d..8ad5f834de 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -75,7 +75,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
return moduleInfo(ctx, m, fromBuildList, listRetracted)
}
- for _, m := range LoadedModules() {
+ for _, m := range buildList {
if m.Path == path {
fromBuildList := true
return moduleInfo(ctx, m, fromBuildList, listRetracted)
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index 5b9984a492..896adebbb1 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -49,15 +49,6 @@ func LoadAllModules(ctx context.Context) []module.Version {
return capVersionSlice(buildList)
}
-// LoadedModules returns the list of module requirements loaded or set by a
-// previous call (typically LoadAllModules or LoadPackages), starting with the
-// Target module and in a deterministic (stable) order.
-//
-// The caller must not modify the returned list, but may append to it.
-func LoadedModules() []module.Version {
- return capVersionSlice(buildList)
-}
-
// Selected returns the selected version of the module with the given path, or
// the empty string if the given module has no selected version
// (either because it is not required or because it is the Target module).
@@ -82,9 +73,6 @@ func Selected(path string) (version string) {
// build list: they could be lower than existing requirements or conflict with
// versions in mustSelect.)
//
-// After performing the requested edits, EditBuildList returns the updated build
-// list.
-//
// If the versions listed in mustSelect are mutually incompatible (due to one of
// the listed modules requiring a higher version of another), EditBuildList
// returns a *ConstraintError and leaves the build list in its previous state.
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
index eb0a366f92..ce5671728e 100644
--- a/src/cmd/go/internal/modload/import.go
+++ b/src/cmd/go/internal/modload/import.go
@@ -477,7 +477,7 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile
if isLocal {
for d := dir; d != mdir && len(d) > len(mdir); {
haveGoMod := haveGoModCache.Do(d, func() interface{} {
- fi, err := os.Stat(filepath.Join(d, "go.mod"))
+ fi, err := fsys.Stat(filepath.Join(d, "go.mod"))
return err == nil && !fi.IsDir()
}).(bool)
@@ -531,7 +531,7 @@ func fetch(ctx context.Context, mod module.Version, needSum bool) (dir string, i
// dirInModule does not report errors for missing modules,
// so if we don't report the error now, later failures will be
// very mysterious.
- if _, err := os.Stat(dir); err != nil {
+ if _, err := fsys.Stat(dir); err != nil {
if os.IsNotExist(err) {
// Semantically the module version itself “exists” — we just don't
// have its source code. Remove the equivalence to os.ErrNotExist,
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index 8fe71a2448..a9b77c82b3 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -206,7 +206,7 @@ func Init() {
base.Fatalf("missing $GOPATH")
}
gopath = list[0]
- if _, err := os.Stat(filepath.Join(gopath, "go.mod")); err == nil {
+ if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil {
base.Fatalf("$GOPATH/go.mod exists but should not")
}
@@ -407,7 +407,7 @@ func CreateModFile(ctx context.Context, modPath string) {
modRoot = base.Cwd
Init()
modFilePath := ModFilePath()
- if _, err := os.Stat(modFilePath); err == nil {
+ if _, err := fsys.Stat(modFilePath); err == nil {
base.Fatalf("go: %s already exists", modFilePath)
}
@@ -605,7 +605,7 @@ func setDefaultBuildMod() {
return
}
- if fi, err := os.Stat(filepath.Join(modRoot, "vendor")); err == nil && fi.IsDir() {
+ if fi, err := fsys.Stat(filepath.Join(modRoot, "vendor")); err == nil && fi.IsDir() {
modGo := "unspecified"
if index.goVersionV != "" {
if semver.Compare(index.goVersionV, "v1.14") >= 0 {
@@ -685,7 +685,7 @@ func findModuleRoot(dir string) (root string) {
// Look for enclosing go.mod.
for {
- if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+ if fi, err := fsys.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
return dir
}
d := filepath.Dir(dir)
@@ -709,7 +709,7 @@ func findAltConfig(dir string) (root, name string) {
}
for {
for _, name := range altConfigs {
- if fi, err := os.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() {
+ if fi, err := fsys.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() {
return dir, name
}
}
@@ -853,7 +853,7 @@ func MinReqs() mvs.Reqs {
retain = append(retain, m.Path)
}
}
- min, err := mvs.Req(Target, retain, Reqs())
+ min, err := mvs.Req(Target, retain, &mvsReqs{buildList: buildList})
if err != nil {
base.Fatalf("go: %v", err)
}
@@ -985,7 +985,7 @@ func keepSums(addDirect bool) map[module.Version]bool {
keep := make(map[module.Version]bool)
var mu sync.Mutex
reqs := &keepSumReqs{
- Reqs: Reqs(),
+ Reqs: &mvsReqs{buildList: buildList},
visit: func(m module.Version) {
// If we build using a replacement module, keep the sum for the replacement,
// since that's the code we'll actually use during a build.
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index 302330278e..732c4af92b 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -800,7 +800,7 @@ func loadFromRoots(params loaderParams) *loader {
}
var err error
- reqs := Reqs()
+ reqs := &mvsReqs{buildList: buildList}
buildList, err = mvs.BuildList(Target, reqs)
if err != nil {
base.Fatalf("go: %v", err)
@@ -842,7 +842,7 @@ func loadFromRoots(params loaderParams) *loader {
}
// Recompute buildList with all our additions.
- reqs = Reqs()
+ reqs = &mvsReqs{buildList: buildList}
buildList, err = mvs.BuildList(Target, reqs)
if err != nil {
// If an error was found in a newly added module, report the package
diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go
index e9601c3e7c..ede07be4bf 100644
--- a/src/cmd/go/internal/modload/modfile.go
+++ b/src/cmd/go/internal/modload/modfile.go
@@ -114,9 +114,9 @@ func CheckRetractions(ctx context.Context, m module.Version) error {
// Find the latest version of the module.
// Ignore exclusions from the main module's go.mod.
- // We may need to account for the current version: for example,
- // v2.0.0+incompatible is not "latest" if v1.0.0 is current.
- rev, err := Query(ctx, path, "latest", Selected(path), nil)
+ const ignoreSelected = ""
+ var allowAll AllowedFunc
+ rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll)
if err != nil {
return &entry{nil, err}
}
diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go
index db57b3ec5f..167d6819b0 100644
--- a/src/cmd/go/internal/modload/mvs.go
+++ b/src/cmd/go/internal/modload/mvs.go
@@ -11,7 +11,6 @@ import (
"sort"
"cmd/go/internal/modfetch"
- "cmd/go/internal/mvs"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
@@ -23,16 +22,6 @@ type mvsReqs struct {
buildList []module.Version
}
-// Reqs returns the current module requirement graph.
-// Future calls to EditBuildList do not affect the operation
-// of the returned Reqs.
-func Reqs() mvs.Reqs {
- r := &mvsReqs{
- buildList: buildList,
- }
- return r
-}
-
func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) {
if mod == Target {
// Use the build list as it existed when r was constructed, not the current
diff --git a/src/cmd/go/internal/modload/mvs_test.go b/src/cmd/go/internal/modload/mvs_test.go
index 0cb376ec3c..50e93c381f 100644
--- a/src/cmd/go/internal/modload/mvs_test.go
+++ b/src/cmd/go/internal/modload/mvs_test.go
@@ -2,19 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package modload_test
+package modload
import (
"testing"
-
- "cmd/go/internal/modload"
)
func TestReqsMax(t *testing.T) {
type testCase struct {
a, b, want string
}
- reqs := modload.Reqs()
+ reqs := new(mvsReqs)
for _, tc := range []testCase{
{a: "v0.1.0", b: "v0.2.0", want: "v0.2.0"},
{a: "v0.2.0", b: "v0.1.0", want: "v0.2.0"},
@@ -27,7 +25,7 @@ func TestReqsMax(t *testing.T) {
} {
max := reqs.Max(tc.a, tc.b)
if max != tc.want {
- t.Errorf("Reqs().Max(%q, %q) = %q; want %q", tc.a, tc.b, max, tc.want)
+ t.Errorf("(%T).Max(%q, %q) = %q; want %q", reqs, tc.a, tc.b, max, tc.want)
}
}
}
diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go
index 57cbb282a8..18738cf59e 100644
--- a/src/cmd/go/internal/search/search.go
+++ b/src/cmd/go/internal/search/search.go
@@ -295,7 +295,7 @@ func (m *Match) MatchDirs() {
if !top && cfg.ModulesEnabled {
// Ignore other modules found in subdirectories.
- if fi, err := os.Stat(filepath.Join(path, "go.mod")); err == nil && !fi.IsDir() {
+ if fi, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil && !fi.IsDir() {
return filepath.SkipDir
}
}
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index e0aa691659..21b2289dff 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -267,6 +267,11 @@ func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) {
}
if mask&OmitModCommonFlags == 0 {
base.AddModCommonFlags(&cmd.Flag)
+ } else {
+ // Add the overlay flag even when we don't add the rest of the mod common flags.
+ // This only affects 'go get' in GOPATH mode, but add the flag anyway for
+ // consistency.
+ cmd.Flag.StringVar(&fsys.OverlayFile, "overlay", "", "")
}
cmd.Flag.StringVar(&cfg.BuildContext.InstallSuffix, "installsuffix", "", "")
cmd.Flag.Var(&load.BuildLdflags, "ldflags", "")
@@ -279,8 +284,6 @@ func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) {
cmd.Flag.BoolVar(&cfg.BuildTrimpath, "trimpath", false, "")
cmd.Flag.BoolVar(&cfg.BuildWork, "work", false, "")
- cmd.Flag.StringVar(&fsys.OverlayFile, "overlay", "", "")
-
// Undocumented, unstable debugging flags.
cmd.Flag.StringVar(&cfg.DebugActiongraph, "debug-actiongraph", "", "")
cmd.Flag.StringVar(&cfg.DebugTrace, "debug-trace", "", "")
@@ -840,11 +843,6 @@ func installOutsideModule(ctx context.Context, args []string) {
}
// Check that named packages are all provided by the same module.
- for _, mod := range modload.LoadedModules() {
- if mod.Path == installMod.Path && mod.Version != installMod.Version {
- base.Fatalf("go install: %s: module requires a higher version of itself (%s)", installMod, mod.Version)
- }
- }
for _, pkg := range mainPkgs {
if pkg.Module == nil {
// Packages in std, cmd, and their vendored dependencies
diff --git a/src/cmd/go/testdata/mod/example.com_retract_incompatible_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_retract_incompatible_v1.0.0.txt
new file mode 100644
index 0000000000..a987685e24
--- /dev/null
+++ b/src/cmd/go/testdata/mod/example.com_retract_incompatible_v1.0.0.txt
@@ -0,0 +1,19 @@
+The v1.0.0 release of example.com/retract/incompatible retracts
+v2.0.0+incompatible.
+
+-- .mod --
+module example.com/retract/incompatible
+
+go 1.16
+
+retract v2.0.0+incompatible
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/retract/incompatible
+
+go 1.16
+
+retract v2.0.0+incompatible
+-- incompatible.go --
+package incompatible
diff --git a/src/cmd/go/testdata/mod/example.com_retract_incompatible_v2.0.0+incompatible.txt b/src/cmd/go/testdata/mod/example.com_retract_incompatible_v2.0.0+incompatible.txt
new file mode 100644
index 0000000000..c668dbb7a9
--- /dev/null
+++ b/src/cmd/go/testdata/mod/example.com_retract_incompatible_v2.0.0+incompatible.txt
@@ -0,0 +1,9 @@
+The v1.0.0 release of example.com/retract/incompatible retracts
+v2.0.0+incompatible.
+
+-- .mod --
+module example.com/retract/incompatible
+-- .info --
+{"Version":"v2.0.0+incompatible"}
+-- incompatible.go --
+package incompatible
diff --git a/src/cmd/go/testdata/script/mod_get_changes.txt b/src/cmd/go/testdata/script/mod_get_changes.txt
new file mode 100644
index 0000000000..3287b2a609
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_get_changes.txt
@@ -0,0 +1,70 @@
+# When adding a requirement, 'go get' prints a message for the requirement
+# and for changed explicit dependencies. 'go get' does not print messages
+# for changed indirect dependencies.
+go list -m all
+! stdout golang.org/x/text
+go get -d rsc.io/quote@v1.5.2
+stderr '^go get: added rsc.io/quote v1.5.2$'
+stderr '^go get: upgraded rsc.io/sampler v1.0.0 => v1.3.0$'
+! stderr '^go get.*golang.org/x/text'
+go list -m all
+stdout golang.org/x/text
+cmp go.mod go.mod.upgrade
+
+# When removing a requirement, 'go get' prints a message for the requiremnent
+# and for changed explicit dependencies. 'go get' does not print messages
+# for changed indirect dependencies.
+go get -d rsc.io/sampler@none
+stderr '^go get: downgraded rsc.io/quote v1.5.2 => v1.3.0$'
+stderr '^go get: removed rsc.io/sampler v1.3.0$'
+! stderr '^go get.*golang.org/x/text'
+cmp go.mod go.mod.downgrade
+
+# When removing or downgrading a requirement, 'go get' also prints a message
+# for explicit dependencies removed as a consequence.
+cp go.mod.usequote go.mod
+go get -d rsc.io/quote@v1.5.1
+stderr '^go get: downgraded rsc.io/quote v1.5.2 => v1.5.1$'
+stderr '^go get: removed usequote v0.0.0$'
+
+-- go.mod --
+module m
+
+go 1.16
+
+require rsc.io/sampler v1.0.0
+-- go.sum --
+rsc.io/sampler v1.0.0 h1:SRJnjyQ07sAtq6G4RcfJEmz8JxqLyj3PoGXG2VhbDWo=
+rsc.io/sampler v1.0.0/go.mod h1:cqxpM3ZVz9VtirqxZPmrWzkQ+UkiNiGtkrN+B+i8kx8=
+-- go.mod.upgrade --
+module m
+
+go 1.16
+
+require (
+ rsc.io/quote v1.5.2 // indirect
+ rsc.io/sampler v1.3.0
+)
+-- go.mod.downgrade --
+module m
+
+go 1.16
+
+require (
+ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
+ rsc.io/quote v1.3.0 // indirect
+)
+-- go.mod.usequote --
+module m
+
+go 1.16
+
+require usequote v0.0.0
+
+replace usequote => ./usequote
+-- usequote/go.mod --
+module usequote
+
+go 1.16
+
+require rsc.io/quote v1.5.2
diff --git a/src/cmd/go/testdata/script/mod_get_retract.txt b/src/cmd/go/testdata/script/mod_get_retract.txt
index 13a47bc359..6e328eb592 100644
--- a/src/cmd/go/testdata/script/mod_get_retract.txt
+++ b/src/cmd/go/testdata/script/mod_get_retract.txt
@@ -11,6 +11,7 @@ cp go.mod.orig go.mod
go mod edit -require example.com/retract/self/prev@v1.9.0
go get -d example.com/retract/self/prev
stderr '^go: warning: example.com/retract/self/prev@v1.9.0: retracted by module author: self$'
+stderr '^go: run ''go get example.com/retract/self/prev@latest'' to switch to the latest unretracted version$'
go list -m example.com/retract/self/prev
stdout '^example.com/retract/self/prev v1.9.0$'
diff --git a/src/cmd/go/testdata/script/mod_gonoproxy.txt b/src/cmd/go/testdata/script/mod_gonoproxy.txt
index 7ead946c24..546605da21 100644
--- a/src/cmd/go/testdata/script/mod_gonoproxy.txt
+++ b/src/cmd/go/testdata/script/mod_gonoproxy.txt
@@ -18,6 +18,12 @@ env GOPRIVATE='*/quote,*/*mple*,golang.org/x'
env GONOPROXY=none # that is, proxy all despite GOPRIVATE
go get -d rsc.io/quote
+# Download .info files needed for 'go list -m all' later.
+# TODO(#42723): either 'go list -m' should not read these files,
+# or 'go get' and 'go mod tidy' should download them.
+go list -m all
+stdout '^golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c$'
+
# When GOPROXY is not empty but contains no entries, an error should be reported.
env GOPROXY=','
! go get -d golang.org/x/text
diff --git a/src/cmd/go/testdata/script/mod_overlay.txt b/src/cmd/go/testdata/script/mod_overlay.txt
new file mode 100644
index 0000000000..92e79c725a
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_overlay.txt
@@ -0,0 +1,254 @@
+# Test overlays that affect go.mod files
+
+# The go.mod file can exist only in the overlay.
+cd $WORK/gopath/src/no-go-mod
+go list -overlay overlay.json .
+stdout example.com/simple
+
+# Check content of overlaid go.mod is used.
+cd $WORK/gopath/src/overlay-go-mod
+go list -overlay overlay.json .
+stdout use.this/module/name
+
+# Check content of overlaid go.mod in a replacement module is used.
+# The go.mod in the replacement module is missing a requirement
+# that the overlay has, so it will fail to list without the overlay.
+cd $WORK/gopath/src/overlay-replaced-go-mod
+! go list -deps .
+go list -deps -overlay overlay.json .
+
+# Overlaid go.mod is not rewritten by 'go get'.
+cd $WORK/gopath/src/get-doesnt-add-dep
+cp $WORK/overlay/get_doesnt_add_dep_go_mod $WORK/want_go_mod
+! go get -d -overlay overlay.json .
+stderr 'overlaid files can''t be opened for write'
+cmp $WORK/overlay/get_doesnt_add_dep_go_mod $WORK/want_go_mod
+
+# Content of overlaid go.sum is used.
+# The go.sum in the module directory has garbage values for its
+# hashes, but the overlaid file has the correct values. If
+# the correct go.sum is used with the overlay, 'go get .' should
+# not report a security error.
+cd $WORK/gopath/src/overlay-sum-used
+! go get -d .
+stderr 'SECURITY ERROR'
+! go mod verify
+stderr 'SECURITY ERROR'
+go get -d -overlay overlay.json .
+go mod verify -overlay overlay.json
+# Overlaid go.sum is not rewritten.
+# Copy an incomplete file to the overlay file, and expect an error
+# attempting to update the file
+cp incomplete-sum-file $WORK/overlay/overlay-sum-used-correct-sums
+! go get -d -overlay overlay.json .
+stderr 'overlaid files can''t be opened for write'
+cmp incomplete-sum-file $WORK/overlay/overlay-sum-used-correct-sums
+! go mod tidy -overlay overlay.json
+stderr 'overlaid files can''t be opened for write'
+cmp incomplete-sum-file $WORK/overlay/overlay-sum-used-correct-sums
+
+# -overlay works with -modfile.
+# There's an empty go.mod file in the directory, and the file alternate.mod is
+# overlaid to the true go.mod file, so the -modfile flag and the overlay
+# mechanism need to work together to determine the name of the module.
+cd $WORK/gopath/src/overlay-and-dash-modfile
+go list -modfile=alternate.mod -overlay overlay.json .
+stdout 'found.the/module'
+# Even with -modfile, overlaid files can't be opened for write.
+! go get -modfile=alternate.mod -overlay overlay.json -d rsc.io/quote
+stderr 'overlaid files can''t be opened for write'
+
+# Carving out a module by adding an overlaid go.mod file
+cd $WORK/gopath/src/carve
+go list ./... # without an overlay, hasmod is carved out and nomod isn't
+stdout carve/nomod
+! stdout carve/hasmod
+go list -overlay overlay_carve_module.json ./... # The overlay carves out nomod, leaving nothing
+! stdout .
+stderr 'matched no packages'
+go list -overlay overlay_uncarve_module.json ./... # The overlay uncarves out hasmod
+stdout carve/nomod
+stdout carve/hasmod
+
+# Carving out a module by adding an overlaid go.mod file and using
+# -modfile to write to that file.
+cd $WORK/gopath/src/carve2/nomod
+go list -overlay overlay.json all
+! stdout ^carve2$
+stdout ^carve2/nomod$
+# Editing go.mod file fails because overlay is read only
+! go get -overlay overlay.json -d rsc.io/quote
+stderr 'overlaid files can''t be opened for write'
+! grep rsc.io/quote $WORK/overlay/carve2-nomod-go.mod
+# Editing go.mod file succeeds because we use -modfile to redirect to same file
+go get -overlay overlay.json -modfile $WORK/overlay/carve2-nomod-go.mod -d rsc.io/quote
+grep rsc.io/quote $WORK/overlay/carve2-nomod-go.mod
+
+-- no-go-mod/file.go --
+package simple
+-- no-go-mod/overlay.json --
+{
+ "Replace": {
+ "go.mod": "../../../overlay/simple_go_mod"
+ }
+}
+-- $WORK/overlay/simple_go_mod --
+module example.com/simple
+-- overlay-go-mod/file.go --
+package name
+-- overlay-go-mod/go.mod --
+module dont.use/this/module/name
+-- overlay-go-mod/overlay.json --
+{
+ "Replace": {
+ "go.mod": "../../../overlay/use_this_go_mod"
+ }
+}
+-- $WORK/overlay/use_this_go_mod --
+module use.this/module/name
+-- overlay-replaced-go-mod/go.mod --
+module m
+
+go 1.15
+
+require replaced/mod v1.0.0
+replace replaced/mod v1.0.0 => ../replaced-mod
+replace dep/mod v1.0.0 => ../dep-mod
+-- overlay-replaced-go-mod/source.go --
+package m
+
+import "replaced/mod/foo"
+
+func main() {
+ foo.f()
+}
+-- overlay-replaced-go-mod/overlay.json --
+{
+ "Replace": {
+ "../replaced-mod/go.mod": "../../../overlay/replacement_module_go_mod"
+ }
+}
+-- replaced-mod/go.mod --
+module replaced/mod
+-- replaced-mod/foo/foo.go --
+package foo
+
+import "dep/mod/foo"
+
+func f() { foo.g() }
+-- dep-mod/go.mod --
+invalid
+-- dep-mod/foo/foo.go --
+package foo
+
+func g() { fmt.Println("hello") }
+-- $WORK/overlay/replacement_module_go_mod --
+module replaced/mod
+
+require dep/mod v1.0.0
+
+-- get-doesnt-add-dep/overlay.json --
+{
+ "Replace": {
+ "go.mod": "../../../overlay/get_doesnt_add_dep_go_mod"
+ }
+}
+-- get-doesnt-add-dep/p.go --
+package p
+
+import "dependency/mod"
+
+func f() { mod.G() }
+-- get-doesnt-add-dep-dependency/go.mod --
+module dependency/mod
+-- get-doesnt-add-dep-dependency/mod.go --
+package mod
+
+func G() {}
+-- $WORK/overlay/get_doesnt_add_dep_go_mod --
+module get.doesnt/add/dep
+
+replace dependency/mod v1.0.0 => ../get-doesnt-add-dep-dependency
+-- overlay-sum-used/go.mod --
+module overlay.sum/used
+
+require rsc.io/quote v1.5.0
+-- overlay-sum-used/p.go --
+package p
+
+import "rsc.io/quote"
+
+func f() string {
+ return quote.Hello()
+}
+-- overlay-sum-used/incomplete-sum-file --
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:pvCbr/wm8HzDD3fVywevekufpn6tCGPY3spdHeZJEsw=
+rsc.io/quote v1.5.0 h1:6fJa6E+wGadANKkUMlZ0DhXFpoKlslOQDCo259XtdIE=
+rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
+-- overlay-sum-used/overlay.json --
+{
+ "Replace": {
+ "go.sum": "../../../overlay/overlay-sum-used-correct-sums"
+ }
+}
+-- overlay-sum-used/go.sum --
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:garbage+hash
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:garbage+hash
+rsc.io/quote v1.5.0 h1:garbage+hash
+rsc.io/quote v1.5.0/go.mod h1:garbage+hash
+rsc.io/sampler v1.3.0 h1:garbage+hash
+rsc.io/sampler v1.3.0/go.mod h1:garbage+hash
+-- $WORK/overlay/overlay-sum-used-correct-sums --
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:pvCbr/wm8HzDD3fVywevekufpn6tCGPY3spdHeZJEsw=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+rsc.io/quote v1.5.0 h1:6fJa6E+wGadANKkUMlZ0DhXFpoKlslOQDCo259XtdIE=
+rsc.io/quote v1.5.0/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
+rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+-- overlay-and-dash-modfile/p.go --
+package module
+-- overlay-and-dash-modfile/go.mod --
+-- overlay-and-dash-modfile/overlay.json --
+{
+ "Replace": {
+ "alternate.mod": "../../../overlay/overlay-and-dash-modfile-alternate-mod"
+ }
+}
+-- $WORK/overlay/overlay-and-dash-modfile-alternate-mod --
+module found.the/module
+-- carve/go.mod --
+module carve
+-- carve/overlay_carve_module.json --
+{
+ "Replace": {
+ "nomod/go.mod": "../../../overlay/carve-nomod-go-mod"
+ }
+}
+-- carve/overlay_uncarve_module.json --
+{
+ "Replace": {
+ "hasmod/go.mod": ""
+ }
+}
+-- carve/hasmod/a.go --
+package hasmod
+-- carve/hasmod/go.mod --
+module carve/hasmod
+-- carve/nomod/b.go --
+package nomod
+-- $WORK/overlay/carve-nomod-go-mod --
+module carve/nomod
+-- carve2/go.mod --
+module carve2
+-- carve2/p.go --
+package p
+-- carve2/nomod/overlay.json --
+{
+ "Replace": {
+ "go.mod": "../../../../overlay/carve2-nomod-go.mod"
+ }
+}
+-- carve2/nomod/b.go --
+package nomod
+-- $WORK/overlay/carve2-nomod-go.mod --
+module carve2/nomod
diff --git a/src/cmd/go/testdata/script/mod_retract_incompatible.txt b/src/cmd/go/testdata/script/mod_retract_incompatible.txt
new file mode 100644
index 0000000000..61538e8024
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_retract_incompatible.txt
@@ -0,0 +1,15 @@
+# The current version of a module should not be considered when loading
+# retractions. If the current version is +incompatible, we should not prefer
+# +incompatible versions when looking for retractions.
+# Verifies #42601.
+
+go mod init m
+
+# Request a +incompatible version retracted in v1.0.0.
+go get -d example.com/retract/incompatible@v2.0.0+incompatible
+stderr '^go: warning: example.com/retract/incompatible@v2.0.0\+incompatible: retracted by module author$'
+
+# We should still see a warning if the +incompatible was previously in the
+# build list.
+go get -d example.com/retract/incompatible@v2.0.0+incompatible
+stderr '^go: warning: example.com/retract/incompatible@v2.0.0\+incompatible: retracted by module author$'
diff --git a/src/cmd/internal/pkgpath/pkgpath.go b/src/cmd/internal/pkgpath/pkgpath.go
index 0b24468be6..40a040a81a 100644
--- a/src/cmd/internal/pkgpath/pkgpath.go
+++ b/src/cmd/internal/pkgpath/pkgpath.go
@@ -50,9 +50,12 @@ func ToSymbolFunc(cmd, tmpdir string) (func(string) string, error) {
return nil, err
}
- // New mangling: expect go.l..u00e4ufer.Run
- // Old mangling: expect go.l__ufer.Run
- if bytes.Contains(buf, []byte("go.l..u00e4ufer.Run")) {
+ // Original mangling: go.l__ufer.Run
+ // Mangling v2: go.l..u00e4ufer.Run
+ // Mangling v3: go_0l_u00e4ufer.Run
+ if bytes.Contains(buf, []byte("go_0l_u00e4ufer.Run")) {
+ return toSymbolV3, nil
+ } else if bytes.Contains(buf, []byte("go.l..u00e4ufer.Run")) {
return toSymbolV2, nil
} else if bytes.Contains(buf, []byte("go.l__ufer.Run")) {
return toSymbolV1, nil
@@ -82,7 +85,7 @@ func toSymbolV1(ppath string) string {
return strings.Map(clean, ppath)
}
-// toSymbolV2 converts a package path using the newer mangling scheme.
+// toSymbolV2 converts a package path using the second mangling scheme.
func toSymbolV2(ppath string) string {
// This has to build at boostrap time, so it has to build
// with Go 1.4, so we don't use strings.Builder.
@@ -112,3 +115,60 @@ func toSymbolV2(ppath string) string {
}
return string(bsl)
}
+
+// v3UnderscoreCodes maps from a character that supports an underscore
+// encoding to the underscore encoding character.
+var v3UnderscoreCodes = map[byte]byte{
+ '_': '_',
+ '.': '0',
+ '/': '1',
+ '*': '2',
+ ',': '3',
+ '{': '4',
+ '}': '5',
+ '[': '6',
+ ']': '7',
+ '(': '8',
+ ')': '9',
+ '"': 'a',
+ ' ': 'b',
+ ';': 'c',
+}
+
+// toSymbolV3 converts a package path using the third mangling scheme.
+func toSymbolV3(ppath string) string {
+ // This has to build at boostrap time, so it has to build
+ // with Go 1.4, so we don't use strings.Builder.
+ bsl := make([]byte, 0, len(ppath))
+ changed := false
+ for _, c := range ppath {
+ if ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') || ('0' <= c && c <= '9') {
+ bsl = append(bsl, byte(c))
+ continue
+ }
+
+ if c < 0x80 {
+ if u, ok := v3UnderscoreCodes[byte(c)]; ok {
+ bsl = append(bsl, '_', u)
+ changed = true
+ continue
+ }
+ }
+
+ var enc string
+ switch {
+ case c < 0x80:
+ enc = fmt.Sprintf("_x%02x", c)
+ case c < 0x10000:
+ enc = fmt.Sprintf("_u%04x", c)
+ default:
+ enc = fmt.Sprintf("_U%08x", c)
+ }
+ bsl = append(bsl, enc...)
+ changed = true
+ }
+ if !changed {
+ return ppath
+ }
+ return string(bsl)
+}
diff --git a/src/cmd/internal/pkgpath/pkgpath_test.go b/src/cmd/internal/pkgpath/pkgpath_test.go
index 7355f81bae..232e803a60 100644
--- a/src/cmd/internal/pkgpath/pkgpath_test.go
+++ b/src/cmd/internal/pkgpath/pkgpath_test.go
@@ -24,6 +24,9 @@ func init() {
case "v2":
os.Stdout.WriteString(`.string "go.l..u00e4ufer.Run"`)
os.Exit(0)
+ case "v3":
+ os.Stdout.WriteString(`.string "go_0l_u00e4ufer.Run"`)
+ os.Exit(0)
case "error":
os.Stdout.WriteString(`unknown string`)
os.Exit(0)
@@ -46,6 +49,10 @@ func TestToSymbolFunc(t *testing.T) {
mangled: "p..u00e4..u4e16..U0001f703",
},
{
+ env: "v3",
+ mangled: "p_u00e4_u4e16_U0001f703",
+ },
+ {
env: "error",
fail: true,
},
@@ -75,32 +82,37 @@ func TestToSymbolFunc(t *testing.T) {
}
var symbolTests = []struct {
- input, v1, v2 string
+ input, v1, v2, v3 string
}{
{
"",
"",
"",
+ "",
},
{
"bytes",
"bytes",
"bytes",
+ "bytes",
},
{
"net/http",
"net_http",
"net..z2fhttp",
+ "net_1http",
},
{
"golang.org/x/net/http",
"golang_org_x_net_http",
"golang.x2eorg..z2fx..z2fnet..z2fhttp",
+ "golang_0org_1x_1net_1http",
},
{
"pä世.🜃",
"p____",
"p..u00e4..u4e16.x2e..U0001f703",
+ "p_u00e4_u4e16_0_U0001f703",
},
}
@@ -119,3 +131,11 @@ func TestV2(t *testing.T) {
}
}
}
+
+func TestV3(t *testing.T) {
+ for _, test := range symbolTests {
+ if got, want := toSymbolV3(test.input), test.v3; got != want {
+ t.Errorf("toSymbolV3(%q) = %q, want %q", test.input, got, want)
+ }
+ }
+}
diff --git a/src/go/constant/value.go b/src/go/constant/value.go
index 116c7575d9..4a89ef3b94 100644
--- a/src/go/constant/value.go
+++ b/src/go/constant/value.go
@@ -17,6 +17,7 @@ import (
"go/token"
"math"
"math/big"
+ "math/bits"
"strconv"
"strings"
"sync"
@@ -610,7 +611,11 @@ func Make(x interface{}) Value {
func BitLen(x Value) int {
switch x := x.(type) {
case int64Val:
- return i64toi(x).val.BitLen()
+ u := uint64(x)
+ if x < 0 {
+ u = uint64(-x)
+ }
+ return 64 - bits.LeadingZeros64(u)
case intVal:
return x.val.BitLen()
case unknownVal:
@@ -1018,52 +1023,55 @@ func match(x, y Value) (_, _ Value) {
}
// ord(x) <= ord(y)
- switch x := x.(type) {
+ // Prefer to return the original x and y arguments when possible,
+ // to avoid unnecessary heap allocations.
+
+ switch x1 := x.(type) {
case boolVal, *stringVal, complexVal:
return x, y
case int64Val:
- switch y := y.(type) {
+ switch y.(type) {
case int64Val:
return x, y
case intVal:
- return i64toi(x), y
+ return i64toi(x1), y
case ratVal:
- return i64tor(x), y
+ return i64tor(x1), y
case floatVal:
- return i64tof(x), y
+ return i64tof(x1), y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
case intVal:
- switch y := y.(type) {
+ switch y.(type) {
case intVal:
return x, y
case ratVal:
- return itor(x), y
+ return itor(x1), y
case floatVal:
- return itof(x), y
+ return itof(x1), y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
case ratVal:
- switch y := y.(type) {
+ switch y.(type) {
case ratVal:
return x, y
case floatVal:
- return rtof(x), y
+ return rtof(x1), y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
case floatVal:
- switch y := y.(type) {
+ switch y.(type) {
case floatVal:
return x, y
case complexVal:
- return vtoc(x), y
+ return vtoc(x1), y
}
}
diff --git a/src/go/constant/value_test.go b/src/go/constant/value_test.go
index 1a5025cbbd..1ad6784f9a 100644
--- a/src/go/constant/value_test.go
+++ b/src/go/constant/value_test.go
@@ -655,3 +655,24 @@ func BenchmarkStringAdd(b *testing.B) {
})
}
}
+
+var bitLenTests = []struct {
+ val int64
+ want int
+}{
+ {0, 0},
+ {1, 1},
+ {-16, 5},
+ {1 << 61, 62},
+ {1 << 62, 63},
+ {-1 << 62, 63},
+ {-1 << 63, 64},
+}
+
+func TestBitLen(t *testing.T) {
+ for _, test := range bitLenTests {
+ if got := BitLen(MakeInt64(test.val)); got != test.want {
+ t.Errorf("%v: got %v, want %v", test.val, got, test.want)
+ }
+ }
+}
diff --git a/src/internal/poll/copy_file_range_linux.go b/src/internal/poll/copy_file_range_linux.go
index 1635bb1bfc..fc34aef4cb 100644
--- a/src/internal/poll/copy_file_range_linux.go
+++ b/src/internal/poll/copy_file_range_linux.go
@@ -35,6 +35,7 @@ func kernelVersion() (major int, minor int) {
if vi >= len(values) {
break
}
+ value = 0
}
}
switch vi {
diff --git a/src/net/sock_linux.go b/src/net/sock_linux.go
index 4d91001937..9f62ed3dee 100644
--- a/src/net/sock_linux.go
+++ b/src/net/sock_linux.go
@@ -27,6 +27,7 @@ func kernelVersion() (major int, minor int) {
if vi >= len(values) {
break
}
+ value = 0
}
}
switch vi {
diff --git a/src/runtime/metrics/description.go b/src/runtime/metrics/description.go
index 9d3611b64c..32af5d1727 100644
--- a/src/runtime/metrics/description.go
+++ b/src/runtime/metrics/description.go
@@ -113,14 +113,13 @@ var allDesc = []Description{
Kind: KindUint64,
},
{
- Name: "/memory/classes/heap/stacks:bytes",
- Description: "Memory allocated from the heap that is reserved for stack space. Not all of it is necessarily " +
- "simultaneously in use, but it may not be used for any other purpose.",
- Kind: KindUint64,
+ Name: "/memory/classes/heap/stacks:bytes",
+ Description: "Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.",
+ Kind: KindUint64,
},
{
Name: "/memory/classes/heap/unused:bytes",
- Description: "Memory that is reserved for heap objects but is otherwise not currently used to hold heap objects.",
+ Description: "Memory that is reserved for heap objects but is not currently used to hold heap objects.",
Kind: KindUint64,
},
{
diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go
index f58cdcdd03..42b5bc3724 100644
--- a/src/runtime/metrics/doc.go
+++ b/src/runtime/metrics/doc.go
@@ -86,12 +86,11 @@ Supported metrics
/memory/classes/heap/stacks:bytes
Memory allocated from the heap that is reserved for stack
- space. Not all of it is necessarily simultaneously in use, but
- it may not be used for any other purpose.
+ space, whether or not it is currently in-use.
/memory/classes/heap/unused:bytes
- Memory that is reserved for heap objects but is otherwise not
- currently used to hold heap objects.
+ Memory that is reserved for heap objects but is not currently
+ used to hold heap objects.
/memory/classes/metadata/mcache/free:bytes
Memory that is reserved for runtime mcache structures, but
diff --git a/src/strconv/bytealg.go b/src/strconv/bytealg.go
new file mode 100644
index 0000000000..7f66f2a8bb
--- /dev/null
+++ b/src/strconv/bytealg.go
@@ -0,0 +1,14 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !compiler_bootstrap
+
+package strconv
+
+import "internal/bytealg"
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ return bytealg.IndexByteString(s, c) != -1
+}
diff --git a/src/strconv/bytealg_bootstrap.go b/src/strconv/bytealg_bootstrap.go
new file mode 100644
index 0000000000..a3a547d1b6
--- /dev/null
+++ b/src/strconv/bytealg_bootstrap.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build compiler_bootstrap
+
+package strconv
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/strconv/eisel_lemire.go b/src/strconv/eisel_lemire.go
index 6c7f852eba..fecd1b9345 100644
--- a/src/strconv/eisel_lemire.go
+++ b/src/strconv/eisel_lemire.go
@@ -29,7 +29,7 @@ func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) {
// Exp10 Range.
if man == 0 {
if neg {
- f = math.Float64frombits(0x80000000_00000000) // Negative zero.
+ f = math.Float64frombits(0x8000000000000000) // Negative zero.
}
return f, true
}
@@ -39,7 +39,7 @@ func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) {
// Normalization.
clz := bits.LeadingZeros64(man)
- man <<= clz
+ man <<= uint(clz)
const float64ExponentBias = 1023
retExp2 := uint64(217706*exp10>>16+64+float64ExponentBias) - uint64(clz)
@@ -84,9 +84,9 @@ func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) {
if retExp2-1 >= 0x7FF-1 {
return 0, false
}
- retBits := retExp2<<52 | retMantissa&0x000FFFFF_FFFFFFFF
+ retBits := retExp2<<52 | retMantissa&0x000FFFFFFFFFFFFF
if neg {
- retBits |= 0x80000000_00000000
+ retBits |= 0x8000000000000000
}
return math.Float64frombits(retBits), true
}
@@ -114,7 +114,7 @@ func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) {
// Normalization.
clz := bits.LeadingZeros64(man)
- man <<= clz
+ man <<= uint(clz)
const float32ExponentBias = 127
retExp2 := uint64(217706*exp10>>16+64+float32ExponentBias) - uint64(clz)
@@ -122,13 +122,13 @@ func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) {
xHi, xLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][1])
// Wider Approximation.
- if xHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && xLo+man < man {
+ if xHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && xLo+man < man {
yHi, yLo := bits.Mul64(man, detailedPowersOfTen[exp10-detailedPowersOfTenMinExp10][0])
mergedHi, mergedLo := xHi, xLo+yHi
if mergedLo < xLo {
mergedHi++
}
- if mergedHi&0x3F_FFFFFFFF == 0x3F_FFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
+ if mergedHi&0x3FFFFFFFFF == 0x3FFFFFFFFF && mergedLo+1 == 0 && yLo+man < man {
return 0, false
}
xHi, xLo = mergedHi, mergedLo
@@ -140,7 +140,7 @@ func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) {
retExp2 -= 1 ^ msb
// Half-way Ambiguity.
- if xLo == 0 && xHi&0x3F_FFFFFFFF == 0 && retMantissa&3 == 1 {
+ if xLo == 0 && xHi&0x3FFFFFFFFF == 0 && retMantissa&3 == 1 {
return 0, false
}
diff --git a/src/strconv/quote.go b/src/strconv/quote.go
index bcbdbc514d..4ffa10b72e 100644
--- a/src/strconv/quote.go
+++ b/src/strconv/quote.go
@@ -7,7 +7,6 @@
package strconv
import (
- "internal/bytealg"
"unicode/utf8"
)
@@ -436,11 +435,6 @@ func Unquote(s string) (string, error) {
return string(buf), nil
}
-// contains reports whether the string contains the byte c.
-func contains(s string, c byte) bool {
- return bytealg.IndexByteString(s, c) != -1
-}
-
// bsearch16 returns the smallest i such that a[i] >= x.
// If there is no such i, bsearch16 returns len(a).
func bsearch16(a []uint16, x uint16) int {
diff --git a/src/sync/atomic/doc.go b/src/sync/atomic/doc.go
index ff4ad80049..805ef956d5 100644
--- a/src/sync/atomic/doc.go
+++ b/src/sync/atomic/doc.go
@@ -43,15 +43,14 @@ import (
"unsafe"
)
-// BUG(rsc): On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX.
+// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX.
//
// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
//
-// On ARM, x86-32, and 32-bit MIPS,
-// it is the caller's responsibility to arrange for 64-bit
-// alignment of 64-bit words accessed atomically. The first word in a
-// variable or in an allocated struct, array, or slice can be relied upon to be
-// 64-bit aligned.
+// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility
+// to arrange for 64-bit alignment of 64-bit words accessed atomically.
+// The first word in a variable or in an allocated struct, array, or slice can
+// be relied upon to be 64-bit aligned.
// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
func SwapInt32(addr *int32, new int32) (old int32)
diff --git a/src/sync/once.go b/src/sync/once.go
index bf4b80c867..8844314e7e 100644
--- a/src/sync/once.go
+++ b/src/sync/once.go
@@ -15,7 +15,7 @@ type Once struct {
// done indicates whether the action has been performed.
// It is first in the struct because it is used in the hot path.
// The hot path is inlined at every call site.
- // Placing done first allows more compact instructions on some architectures (amd64/x86),
+ // Placing done first allows more compact instructions on some architectures (amd64/386),
// and fewer instructions (to calculate offset) on other architectures.
done uint32
m Mutex
diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go
index c54feec56a..d99da00089 100644
--- a/src/syscall/dll_windows.go
+++ b/src/syscall/dll_windows.go
@@ -20,6 +20,8 @@ type DLLError struct {
func (e *DLLError) Error() string { return e.Msg }
+func (e *DLLError) Unwrap() error { return e.Err }
+
// Implemented in ../runtime/syscall_windows.go.
func Syscall(trap, nargs, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
diff --git a/test/const2.go b/test/const2.go
index 048d0cb9f3..d104a2fa71 100644
--- a/test/const2.go
+++ b/test/const2.go
@@ -19,3 +19,14 @@ const LargeB = LargeA * LargeA * LargeA
const LargeC = LargeB * LargeB * LargeB // GC_ERROR "constant multiplication overflow"
const AlsoLargeA = LargeA << 400 << 400 >> 400 >> 400 // GC_ERROR "constant shift overflow"
+
+// Issue #42732.
+
+const a = 1e+500000000
+const b = a * a // ERROR "constant multiplication overflow"
+const c = b * b
+
+const MaxInt512 = (1<<256 - 1) * (1<<256 + 1)
+const _ = MaxInt512 + 1 // ERROR "constant addition overflow"
+const _ = MaxInt512 ^ -1 // ERROR "constant bitwise XOR overflow"
+const _ = ^MaxInt512 // ERROR "constant bitwise complement overflow"
diff --git a/test/fixedbugs/bug340.go b/test/fixedbugs/bug340.go
index 118bbacc22..a067940408 100644
--- a/test/fixedbugs/bug340.go
+++ b/test/fixedbugs/bug340.go
@@ -12,6 +12,7 @@ func main() {
var x interface{}
switch t := x.(type) {
case 0: // ERROR "type"
- t.x = 1 // ERROR "type interface \{\}|reference to undefined field or method"
+ t.x = 1
+ x.x = 1 // ERROR "type interface \{\}|reference to undefined field or method"
}
}
diff --git a/test/fixedbugs/issue20232.go b/test/fixedbugs/issue20232.go
index f91c74936b..7a0300a4c4 100644
--- a/test/fixedbugs/issue20232.go
+++ b/test/fixedbugs/issue20232.go
@@ -6,6 +6,7 @@
package main
-const _ = 6e5518446744 // ERROR "malformed constant: 6e5518446744 \(exponent overflow\)"
+const x = 6e5518446744 // ERROR "malformed constant: 6e5518446744"
+const _ = x * x
const _ = 1e-1000000000
-const _ = 1e+1000000000 // ERROR "constant too large"
+const _ = 1e+1000000000 // ERROR "malformed constant: 1e\+1000000000"
diff --git a/test/fixedbugs/issue42727.go b/test/fixedbugs/issue42727.go
new file mode 100644
index 0000000000..40081708b1
--- /dev/null
+++ b/test/fixedbugs/issue42727.go
@@ -0,0 +1,23 @@
+// compile
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Ensure that late expansion correctly handles an OpLoad with type interface{}
+
+package p
+
+type iface interface {
+ m()
+}
+
+type it interface{}
+
+type makeIface func() iface
+
+func f() {
+ var im makeIface
+ e := im().(it)
+ _ = &e
+}
diff --git a/test/fixedbugs/issue42753.go b/test/fixedbugs/issue42753.go
new file mode 100644
index 0000000000..a998d1d3b3
--- /dev/null
+++ b/test/fixedbugs/issue42753.go
@@ -0,0 +1,13 @@
+// compile -d=ssa/check/on
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func f() uint32 {
+ s := "\x01"
+ x := -int32(s[0])
+ return uint32(x) & 0x7fffffff
+}
diff --git a/test/fixedbugs/issue42790.go b/test/fixedbugs/issue42790.go
new file mode 100644
index 0000000000..d83a02247a
--- /dev/null
+++ b/test/fixedbugs/issue42790.go
@@ -0,0 +1,9 @@
+// compile
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+const _ = -uint(len(string(1<<32)) - len("\uFFFD"))