aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal
diff options
context:
space:
mode:
authorMatthew Dempsky <mdempsky@google.com>2021-01-22 15:35:11 -0800
committerMatthew Dempsky <mdempsky@google.com>2021-01-22 16:32:37 -0800
commit6e46c8fbb5ba440cc503193220476c204100fb40 (patch)
tree67472f55b9b1f5ab66cbcfd39cf154c1f8c2011c /src/cmd/compile/internal
parente4ef30a66751c39bdd24764763531f1a4c325845 (diff)
parent7e0a81d2806b073c6455f73b10fbf2c811703f46 (diff)
downloadgo-6e46c8fbb5ba440cc503193220476c204100fb40.tar.gz
go-6e46c8fbb5ba440cc503193220476c204100fb40.zip
[dev.typeparams] all: merge dev.regabi (7e0a81d) into dev.typeparams
As with CL 285875, this required resolving some conflicts around handling of //go:embed directives. Still further work is needed to reject uses of //go:embed in files that don't import "embed", so this is left as a TODO. (When this code was written for dev.typeparams, we were still leaning towards not requiring the "embed" import.) Also, the recent support for inlining closures (CL 283112) interacts poorly with -G=3 mode. There are some known issues with this code already (#43818), so for now this CL disables inlining of closures when in -G=3 mode with a TODO to revisit this once closure inlining is working fully. Conflicts: - src/cmd/compile/internal/noder/noder.go - src/cmd/compile/internal/typecheck/dcl.go - src/cmd/compile/internal/typecheck/func.go - test/run.go Merge List: + 2021-01-22 7e0a81d280 [dev.regabi] all: merge master (dab3e5a) into dev.regabi + 2021-01-22 dab3e5affe runtime: switch runtime to libc for openbsd/amd64 + 2021-01-22 a1b53d85da cmd/go: add documentation for test and xtest fields output by go list + 2021-01-22 b268b60774 runtime: remove pthread_kill/pthread_self for openbsd + 2021-01-22 ec4051763d runtime: fix typo in mgcscavenge.go + 2021-01-22 7ece3a7b17 net/http: fix flaky TestDisableKeepAliveUpgrade + 2021-01-22 50cba0506f time: clarify Timer.Reset behavior on AfterFunc Timers + 2021-01-22 cf10e69f17 doc/go1.16: mention net/http.Transport.GetProxyConnectHeader + 2021-01-22 ec1b945265 doc/go1.16: mention path/filepath.WalkDir + 2021-01-22 11def3d40b doc/go1.16: mention syscall.AllThreadsSyscall + 2021-01-21 07b0235609 doc/go1.16: add notes about package-specific fs.FS changes + 2021-01-21 e2b4f1fea5 doc/go1.16: minor formatting fix + 2021-01-21 9f43a9e07b doc/go1.16: mention new debug/elf constants + 2021-01-21 3c2f11ba5b cmd/go: overwrite program name with full path + 2021-01-21 953d1feca9 all: introduce and use internal/execabs + 2021-01-21 b186e4d70d cmd/go: add test case for cgo CC setting + 2021-01-21 5a8a2265fb cmd/cgo: report exec errors a bit more clearly + 2021-01-21 46e2e2e9d9 cmd/go: pass resolved CC, GCCGO to cgo + 2021-01-21 3d40895e36 runtime: switch openbsd/arm64 to pthreads + 2021-01-21 d95ca91380 crypto/elliptic: fix P-224 field reduction + 2021-01-21 d7e71c01ad [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for dwarf + 2021-01-21 5248f59a22 [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for SSA + 2021-01-21 970d8b6cb2 [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet in inlining + 2021-01-21 68a4664475 [dev.regabi] cmd/compile: remove tempAssigns in walkCall1 + 2021-01-21 fd9a391cdd [dev.regabi] cmd/compile: remove CallExpr.Rargs + 2021-01-21 19a6db6b63 [dev.regabi] cmd/compile: make sure mkcall* passed non-nil init + 2021-01-21 9f036844db [dev.regabi] cmd/compile: use ir.DoChildren directly in inlining + 2021-01-21 213c3905e9 [dev.regabi] cmd/compile: use node walked flag to prevent double walk for walkSelect + 2021-01-20 1760d736f6 [dev.regabi] cmd/compile: exporting, importing, and inlining functions with OCLOSURE + 2021-01-20 ecf4ebf100 cmd/internal/moddeps: check content of all modules in GOROOT + 2021-01-20 92cb157cf3 [dev.regabi] cmd/compile: late expansion of return values + 2021-01-20 d2d155d1ae runtime: don't adjust timer pp field in timerWaiting status + 2021-01-20 803d18fc6c cmd/go: set Incomplete field on go list output if no files match embed + 2021-01-20 6e243ce71d cmd/go: have go mod vendor copy embedded files in subdirs + 2021-01-20 be28e5abc5 cmd/go: fix mod_get_fallback test + 2021-01-20 928bda4f4a runtime: convert openbsd/amd64 locking to libc + 2021-01-19 824f2d635c cmd/go: allow go fmt to complete when embedded file is missing + 2021-01-19 0575e35e50 cmd/compile: require 'go 1.16' go.mod line for //go:embed + 2021-01-19 9423d50d53 [dev.regabi] cmd/compile: use '%q' for printing rune values less than 128 + 2021-01-19 ccb2e90688 cmd/link: exit before Asmb2 if error + 2021-01-19 ca5774a5a5 embed: treat uninitialized FS as empty + 2021-01-19 d047c91a6c cmd/link,runtime: switch openbsd/amd64 to pthreads + 2021-01-19 61debffd97 runtime: factor out usesLibcall + 2021-01-19 9fed39d281 runtime: factor out mStackIsSystemAllocated + 2021-01-19 a2f825c542 [dev.regabi] cmd/compile: directly create go.map and go.track symbols + 2021-01-19 4a4212c0e5 [dev.regabi] cmd/compile: refactor Linksym creation + 2021-01-19 4f5c603c0f [dev.regabi] cmd/compile: cleanup callTargetLSym + 2021-01-18 dbab079835 runtime: free Windows event handles after last lock is dropped + 2021-01-18 5a8fbb0d2d os: do not close syscall.Stdin in TestReadStdin + 2021-01-18 422f38fb6c [dev.regabi] cmd/compile: move stack objects to liveness + 2021-01-18 6113db0bb4 [dev.regabi] cmd/compile: convert OPANIC argument to interface{} during typecheck + 2021-01-18 4c835f9169 [dev.regabi] cmd/compile: use LinksymOffsetExpr in TypePtr/ItabAddr + 2021-01-18 0ffa1ead6e [dev.regabi] cmd/compile: use *obj.LSym instead of *ir.Name for staticdata functions + 2021-01-17 7e0fa38aad [dev.regabi] cmd/compile: remove unneeded packages from ir.Pkgs + 2021-01-17 99a5db11ac [dev.regabi] cmd/compile: use LinksymOffsetExpr in walkConvInterface + 2021-01-17 87845d14f9 [dev.regabi] cmd/compile: add ir.TailCallStmt + 2021-01-17 e3027c6828 [dev.regabi] cmd/compile: fix linux-amd64-noopt builder + 2021-01-17 59ff93fe64 [dev.regabi] cmd/compile: rename NameOffsetExpr to LinksymOffsetExpr + 2021-01-17 82b9cae700 [dev.regabi] cmd/compile: change ir.NameOffsetExpr to use *obj.LSym instead of *Name + 2021-01-17 88956fc4b1 [dev.regabi] cmd/compile: stop analyze NameOffsetExpr.Name_ in escape analysis + 2021-01-17 7ce2a8383d [dev.regabi] cmd/compile: simplify stack temp initialization + 2021-01-17 ba0e8a92fa [dev.regabi] cmd/compile: refactor temp construction in walk + 2021-01-17 78e5aabcdb [dev.regabi] cmd/compile: replace Node.HasCall with walk.mayCall + 2021-01-16 6de9423445 [dev.regabi] cmd/compile: cleanup OAS2FUNC ordering + 2021-01-16 a956a0e909 [dev.regabi] cmd/compile, runtime: fix up comments/error messages from recent renames + 2021-01-16 ab3b67abfd [dev.regabi] cmd/compile: remove ONEWOBJ + 2021-01-16 c9b1445ac8 [dev.regabi] cmd/compile: remove TypeAssertExpr {Src,Dst}Type fields + 2021-01-15 682a1d2176 runtime: detect errors in DuplicateHandle + 2021-01-15 9f83418b83 cmd/link: remove GOROOT write in TestBuildForTvOS + 2021-01-15 ec9470162f cmd/compile: allow embed into any string or byte slice type + 2021-01-15 54198b04db cmd/compile: disallow embed of var inside func + 2021-01-15 b386c735e7 cmd/go: fix go generate docs + 2021-01-15 bb5075a525 syscall: remove RtlGenRandom and move it into internal/syscall + 2021-01-15 1deae0b597 os: invoke processKiller synchronously in testKillProcess + 2021-01-15 03a875137f [dev.regabi] cmd/compile: unexport reflectdata.WriteType + 2021-01-15 14537e6e54 [dev.regabi] cmd/compile: move stkobj symbol generation to SSA + 2021-01-15 ab523fc510 [dev.regabi] cmd/compile: don't promote Byval CaptureVars if Addrtaken + 2021-01-15 ff196c3e84 crypto/x509: update iOS bundled roots to version 55188.40.9 + 2021-01-15 b7a698c73f [dev.regabi] test: disable test on windows because expected contains path separators. + 2021-01-15 4be7af23f9 [dev.regabi] cmd/compile: fix ICE during ir.Dump + 2021-01-14 e125ccd10e cmd/go: in 'go mod edit', validate versions given to -retract and -exclude + 2021-01-14 eb330020dc cmd/dist, cmd/go: pass -arch for C compilation on Darwin + 2021-01-14 84e8a06f62 cmd/cgo: remove unnecessary space in cgo export header + 2021-01-14 0c86b999c3 cmd/test2json: document passing -test.paniconexit0 + 2021-01-14 9135795891 cmd/go/internal/load: report positions for embed errors + 2021-01-14 35b9c66601 [dev.regabi] cmd/compile,cmd/link: additional code review suggestions for CL 270863 + 2021-01-14 d9b79e53bb cmd/compile: fix wrong complement for arm64 floating-point comparisons + 2021-01-14 c73232d08f cmd/go/internal/load: refactor setErrorPos to PackageError.setPos + 2021-01-14 6aa28d3e06 go/build: report positions for go:embed directives + 2021-01-14 9734fd482d [dev.regabi] cmd/compile: use node walked flag to prevent double walk for walkSwitch + 2021-01-14 f97983249a [dev.regabi] cmd/compile: move more PAUTOHEAP to SSA construction + 2021-01-14 4476300425 [dev.regabi] cmd/compile: use byte for CallExpr.Use + 2021-01-14 5a5ab24689 [dev.regabi] cmd/compile: do not rely on CallExpr.Rargs for detect already walked calls + 2021-01-14 983ac4b086 [dev.regabi] cmd/compile: fix ICE when initializing blank vars + 2021-01-13 7eb31d999c cmd/go: add hints to more missing sum error messages + 2021-01-13 d6d4673728 [dev.regabi] cmd/compile: fix GOEXPERIMENT=regabi builder + 2021-01-13 c41b999ad4 [dev.regabi] cmd/compile: refactor abiutils from "gc" into new "abi" + 2021-01-13 861707a8c8 [dev.regabi] cmd/compile: added limited //go:registerparams pragma for new ABI dev + 2021-01-13 c1370e918f [dev.regabi] cmd/compile: add code to support register ABI spills around morestack calls + 2021-01-13 2abd24f3b7 [dev.regabi] test: make run.go error messages slightly more informative + 2021-01-13 9a19481acb [dev.regabi] cmd/compile: make ordering for InvertFlags more stable + 2021-01-12 ba76567bc2 cmd/go/internal/modload: delete unused *mvsReqs.next method + 2021-01-12 665def2c11 encoding/asn1: document unmarshaling behavior for IMPLICIT string fields + 2021-01-11 81ea89adf3 cmd/go: fix non-script staleness checks interacting badly with GOFLAGS + 2021-01-11 759309029f doc: update editors.html for Go 1.16 + 2021-01-11 c3b4c7093a cmd/internal/objfile: don't require runtime.symtab symbol for XCOFF + 2021-01-08 59bfc18e34 cmd/go: add hint to read 'go help vcs' to GOVCS errors + 2021-01-08 cd6f3a54e4 cmd/go: revise 'go help' documentation for modules + 2021-01-08 6192b98751 cmd/go: make hints in error messages more consistent + 2021-01-08 25886cf4bd cmd/go: preserve sums for indirect deps fetched by 'go mod download' + 2021-01-08 6250833911 runtime/metrics: mark histogram metrics as cumulative + 2021-01-08 8f6a9acbb3 runtime/metrics: remove unused StopTheWorld Description field + 2021-01-08 6598c65646 cmd/compile: fix exponential-time init-cycle reporting + 2021-01-08 fefad1dc85 test: fix timeout code for invoking compiler + 2021-01-08 6728118e0a cmd/go: pass signals forward during "go tool" + 2021-01-08 e65c543f3c go/build/constraint: add parser for build tag constraint expressions + 2021-01-08 0c5afc4fb7 testing/fstest,os: clarify racy behavior of TestFS + 2021-01-08 32afcc9436 runtime/metrics: change unit on *-by-size metrics to match bucket unit + 2021-01-08 c6513bca5a io/fs: minor corrections to Glob doc + 2021-01-08 304f769ffc cmd/compile: don't short-circuit copies whose source is volatile + 2021-01-08 ae97717133 runtime,runtime/metrics: use explicit histogram boundaries + 2021-01-08 a9ccd2d795 go/build: skip string literal while findEmbed + 2021-01-08 d92f8add32 archive/tar: fix typo in comment + 2021-01-08 cab1202183 cmd/link: accept extra blocks in TestFallocate + 2021-01-08 ee4d32249b io/fs: minor corrections to Glob release date + 2021-01-08 54bd1ccce2 cmd: update to latest golang.org/x/tools + 2021-01-07 9ec21a8f34 Revert "reflect: support multiple keys in struct tags" + 2021-01-07 091414b5b7 io/fs: correct WalkDirFunc documentation + 2021-01-07 9b55088d6b doc/go1.16: add release note for disallowing non-ASCII import paths + 2021-01-07 fa90aaca7d cmd/compile: fix late expand_calls leaf type for OpStructSelect/OpArraySelect + 2021-01-07 7cee66d4cb cmd/go: add documentation for Embed fields in go list output + 2021-01-07 e60cffa4ca html/template: attach functions to namespace + 2021-01-07 6da2d3b7d7 cmd/link: fix typo in asm.go + 2021-01-07 df81a15819 runtime: check mips64 VDSO clock_gettime return code + 2021-01-06 4787e906cf crypto/x509: rollback new CertificateRequest fields + 2021-01-06 c9658bee93 cmd/go: make module suggestion more friendly + 2021-01-06 4c668b25c6 runtime/metrics: fix panic message for Float64Histogram + 2021-01-06 d2131704a6 net/http/httputil: fix deadlock in DumpRequestOut + 2021-01-05 3e1e13ce6d cmd/go: set cfg.BuildMod to "readonly" by default with no module root + 2021-01-05 0b0d004983 cmd/go: pass embedcfg to gccgo if supported + 2021-01-05 1b85e7c057 cmd/go: don't scan gccgo standard library packages for imports + 2021-01-05 6b37b15d95 runtime: don't take allglock in tracebackothers + 2021-01-04 9eef49cfa6 math/rand: fix typo in comment + 2021-01-04 b01fb2af9e testing/fstest: fix typo in error message + 2021-01-01 3dd5867605 doc: 2021 is the Year of the Gopher + 2020-12-31 95ce805d14 io/fs: remove darwin/arm64 special condition + 2020-12-30 20d0991b86 lib/time, time/tzdata: update tzdata to 2020f + 2020-12-30 ed301733bb misc/cgo/testcarchive: remove special flags for Darwin/ARM + 2020-12-30 0ae2e032f2 misc/cgo/test: enable TestCrossPackageTests on darwin/arm64 + 2020-12-29 780b4de16b misc/ios: fix wording for command line instructions + 2020-12-29 b4a71c95d2 doc/go1.16: reference misc/ios/README for how to build iOS programs + 2020-12-29 f83e0f6616 misc/ios: add to README how to build ios executables + 2020-12-28 4fd9455882 io/fs: fix typo in comment Change-Id: If24bb93f1e1e7deb1d92ba223c85940ab93b2732
Diffstat (limited to 'src/cmd/compile/internal')
-rw-r--r--src/cmd/compile/internal/abi/abiutils.go (renamed from src/cmd/compile/internal/gc/abiutils.go)42
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go20
-rw-r--r--src/cmd/compile/internal/base/base.go4
-rw-r--r--src/cmd/compile/internal/base/flag.go2
-rw-r--r--src/cmd/compile/internal/base/link.go36
-rw-r--r--src/cmd/compile/internal/base/print.go2
-rw-r--r--src/cmd/compile/internal/bitvec/bv.go2
-rw-r--r--src/cmd/compile/internal/deadcode/deadcode.go2
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwarf.go36
-rw-r--r--src/cmd/compile/internal/escape/escape.go192
-rw-r--r--src/cmd/compile/internal/gc/compile.go21
-rw-r--r--src/cmd/compile/internal/gc/main.go24
-rw-r--r--src/cmd/compile/internal/gc/obj.go6
-rw-r--r--src/cmd/compile/internal/inline/inl.go372
-rw-r--r--src/cmd/compile/internal/ir/const.go2
-rw-r--r--src/cmd/compile/internal/ir/expr.go47
-rw-r--r--src/cmd/compile/internal/ir/fmt.go52
-rw-r--r--src/cmd/compile/internal/ir/func.go9
-rw-r--r--src/cmd/compile/internal/ir/mini.go7
-rw-r--r--src/cmd/compile/internal/ir/name.go48
-rw-r--r--src/cmd/compile/internal/ir/node.go41
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go65
-rw-r--r--src/cmd/compile/internal/ir/op_string.go143
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/ir/stmt.go34
-rw-r--r--src/cmd/compile/internal/ir/symtab.go20
-rw-r--r--src/cmd/compile/internal/liveness/bvset.go2
-rw-r--r--src/cmd/compile/internal/liveness/plive.go59
-rw-r--r--src/cmd/compile/internal/noder/decl.go8
-rw-r--r--src/cmd/compile/internal/noder/import.go2
-rw-r--r--src/cmd/compile/internal/noder/lex.go3
-rw-r--r--src/cmd/compile/internal/noder/noder.go42
-rw-r--r--src/cmd/compile/internal/objw/prog.go2
-rw-r--r--src/cmd/compile/internal/pkginit/init.go4
-rw-r--r--src/cmd/compile/internal/pkginit/initorder.go18
-rw-r--r--src/cmd/compile/internal/reflectdata/alg.go2
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go147
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go22
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go81
-rw-r--r--src/cmd/compile/internal/ssa/func.go9
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go32
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules4
-rw-r--r--src/cmd/compile/internal/ssa/html.go2
-rw-r--r--src/cmd/compile/internal/ssa/location.go26
-rw-r--r--src/cmd/compile/internal/ssa/op.go10
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go40
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go43
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go12
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go16
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go4
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go8
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go16
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go16
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go8
-rw-r--r--src/cmd/compile/internal/ssagen/abi.go14
-rw-r--r--src/cmd/compile/internal/ssagen/nowb.go4
-rw-r--r--src/cmd/compile/internal/ssagen/pgen.go2
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go465
-rw-r--r--src/cmd/compile/internal/staticdata/data.go70
-rw-r--r--src/cmd/compile/internal/staticdata/embed.go58
-rw-r--r--src/cmd/compile/internal/staticinit/sched.go60
-rw-r--r--src/cmd/compile/internal/test/abiutils_test.go (renamed from src/cmd/compile/internal/gc/abiutils_test.go)10
-rw-r--r--src/cmd/compile/internal/test/abiutilsaux_test.go (renamed from src/cmd/compile/internal/gc/abiutilsaux_test.go)19
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue38068.go2
-rw-r--r--src/cmd/compile/internal/typebits/typebits.go12
-rw-r--r--src/cmd/compile/internal/typecheck/const.go2
-rw-r--r--src/cmd/compile/internal/typecheck/dcl.go12
-rw-r--r--src/cmd/compile/internal/typecheck/expr.go6
-rw-r--r--src/cmd/compile/internal/typecheck/func.go44
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go52
-rw-r--r--src/cmd/compile/internal/typecheck/iimport.go92
-rw-r--r--src/cmd/compile/internal/typecheck/stmt.go8
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go10
-rw-r--r--src/cmd/compile/internal/typecheck/syms.go15
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go16
-rw-r--r--src/cmd/compile/internal/types/alg.go4
-rw-r--r--src/cmd/compile/internal/types/fmt.go2
-rw-r--r--src/cmd/compile/internal/types/size.go41
-rw-r--r--src/cmd/compile/internal/types/sym.go47
-rw-r--r--src/cmd/compile/internal/types/type.go4
-rw-r--r--src/cmd/compile/internal/types2/stdlib_test.go2
-rw-r--r--src/cmd/compile/internal/walk/assign.go55
-rw-r--r--src/cmd/compile/internal/walk/builtin.go37
-rw-r--r--src/cmd/compile/internal/walk/closure.go2
-rw-r--r--src/cmd/compile/internal/walk/compare.go4
-rw-r--r--src/cmd/compile/internal/walk/complit.go46
-rw-r--r--src/cmd/compile/internal/walk/convert.go42
-rw-r--r--src/cmd/compile/internal/walk/expr.go60
-rw-r--r--src/cmd/compile/internal/walk/order.go97
-rw-r--r--src/cmd/compile/internal/walk/race.go9
-rw-r--r--src/cmd/compile/internal/walk/range.go24
-rw-r--r--src/cmd/compile/internal/walk/select.go15
-rw-r--r--src/cmd/compile/internal/walk/stmt.go23
-rw-r--r--src/cmd/compile/internal/walk/switch.go7
-rw-r--r--src/cmd/compile/internal/walk/temp.go40
-rw-r--r--src/cmd/compile/internal/walk/walk.go300
102 files changed, 1972 insertions, 1743 deletions
diff --git a/src/cmd/compile/internal/gc/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
index 5822c088f9..3ac59e6f75 100644
--- a/src/cmd/compile/internal/gc/abiutils.go
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package abi
import (
"cmd/compile/internal/types"
@@ -28,7 +28,35 @@ type ABIParamResultInfo struct {
intSpillSlots int
floatSpillSlots int
offsetToSpillArea int64
- config ABIConfig // to enable String() method
+ config *ABIConfig // to enable String() method
+}
+
+func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
+ return a.inparams
+}
+
+func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
+ return a.outparams
+}
+
+func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
+ return a.inparams[i]
+}
+
+func (a *ABIParamResultInfo) OutParam(i int) ABIParamAssignment {
+ return a.outparams[i]
+}
+
+func (a *ABIParamResultInfo) IntSpillCount() int {
+ return a.intSpillSlots
+}
+
+func (a *ABIParamResultInfo) FloatSpillCount() int {
+ return a.floatSpillSlots
+}
+
+func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
+ return a.offsetToSpillArea
}
// RegIndex stores the index into the set of machine registers used by
@@ -66,11 +94,17 @@ type ABIConfig struct {
regAmounts RegAmounts
}
+// NewABIConfig returns a new ABI configuration for an architecture with
+// iRegsCount integer/pointer registers and fRegsCount floating point registers.
+func NewABIConfig(iRegsCount, fRegsCount int) *ABIConfig {
+ return &ABIConfig{RegAmounts{iRegsCount, fRegsCount}}
+}
+
// ABIAnalyze takes a function type 't' and an ABI rules description
// 'config' and analyzes the function to determine how its parameters
// and results will be passed (in registers or on the stack), returning
// an ABIParamResultInfo object that holds the results of the analysis.
-func ABIAnalyze(t *types.Type, config ABIConfig) ABIParamResultInfo {
+func ABIAnalyze(t *types.Type, config *ABIConfig) ABIParamResultInfo {
setup()
s := assignState{
rTotal: config.regAmounts,
@@ -124,7 +158,7 @@ func (c *RegAmounts) regString(r RegIndex) string {
// toString method renders an ABIParamAssignment in human-readable
// form, suitable for debugging or unit testing.
-func (ri *ABIParamAssignment) toString(config ABIConfig) string {
+func (ri *ABIParamAssignment) toString(config *ABIConfig) string {
regs := "R{"
for _, r := range ri.Registers {
regs += " " + config.regAmounts.regString(r)
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 8d25fa8592..73e74e1219 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -1056,7 +1056,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssa.OpARM64LessThanF,
ssa.OpARM64LessEqualF,
ssa.OpARM64GreaterThanF,
- ssa.OpARM64GreaterEqualF:
+ ssa.OpARM64GreaterEqualF,
+ ssa.OpARM64NotLessThanF,
+ ssa.OpARM64NotLessEqualF,
+ ssa.OpARM64NotGreaterThanF,
+ ssa.OpARM64NotGreaterEqualF:
// generate boolean values using CSET
p := s.Prog(arm64.ACSET)
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
@@ -1100,10 +1104,16 @@ var condBits = map[ssa.Op]int16{
ssa.OpARM64GreaterThanU: arm64.COND_HI,
ssa.OpARM64GreaterEqual: arm64.COND_GE,
ssa.OpARM64GreaterEqualU: arm64.COND_HS,
- ssa.OpARM64LessThanF: arm64.COND_MI,
- ssa.OpARM64LessEqualF: arm64.COND_LS,
- ssa.OpARM64GreaterThanF: arm64.COND_GT,
- ssa.OpARM64GreaterEqualF: arm64.COND_GE,
+ ssa.OpARM64LessThanF: arm64.COND_MI, // Less than
+ ssa.OpARM64LessEqualF: arm64.COND_LS, // Less than or equal to
+ ssa.OpARM64GreaterThanF: arm64.COND_GT, // Greater than
+ ssa.OpARM64GreaterEqualF: arm64.COND_GE, // Greater than or equal to
+
+ // The following condition codes have unordered to handle comparisons related to NaN.
+ ssa.OpARM64NotLessThanF: arm64.COND_PL, // Greater than, equal to, or unordered
+ ssa.OpARM64NotLessEqualF: arm64.COND_HI, // Greater than or unordered
+ ssa.OpARM64NotGreaterThanF: arm64.COND_LE, // Less than, equal to or unordered
+ ssa.OpARM64NotGreaterEqualF: arm64.COND_LT, // Less than or unordered
}
var blockJump = map[ssa.BlockKind]struct {
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
index 5a30fa6a33..3b9bc3a8af 100644
--- a/src/cmd/compile/internal/base/base.go
+++ b/src/cmd/compile/internal/base/base.go
@@ -6,12 +6,8 @@ package base
import (
"os"
-
- "cmd/internal/obj"
)
-var Ctxt *obj.Link
-
var atExitFuncs []func()
func AtExit(f func()) {
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index 3602fb947d..d8ca9885cb 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -175,7 +175,7 @@ func ParseFlags() {
if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
log.Fatalf("%s/%s does not support -shared", objabi.GOOS, objabi.GOARCH)
}
- parseSpectre(Flag.Spectre) // left as string for recordFlags
+ parseSpectre(Flag.Spectre) // left as string for RecordFlags
Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
Ctxt.Flag_optimize = Flag.N == 0
diff --git a/src/cmd/compile/internal/base/link.go b/src/cmd/compile/internal/base/link.go
new file mode 100644
index 0000000000..49fe4352b2
--- /dev/null
+++ b/src/cmd/compile/internal/base/link.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "cmd/internal/obj"
+)
+
+var Ctxt *obj.Link
+
+// TODO(mdempsky): These should probably be obj.Link methods.
+
+// PkgLinksym returns the linker symbol for name within the given
+// package prefix. For user packages, prefix should be the package
+// path encoded with objabi.PathToPrefix.
+func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
+ if name == "_" {
+ // TODO(mdempsky): Cleanup callers and Fatalf instead.
+ return linksym(prefix, "_", abi)
+ }
+ return linksym(prefix, prefix+"."+name, abi)
+}
+
+// Linkname returns the linker symbol for the given name as it might
+// appear within a //go:linkname directive.
+func Linkname(name string, abi obj.ABI) *obj.LSym {
+ return linksym("_", name, abi)
+}
+
+// linksym is an internal helper function for implementing the above
+// exported APIs.
+func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
+ return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
+}
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index 9855dfdad0..668c600d31 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -121,7 +121,7 @@ func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
lasterror.syntax = pos
} else {
// only one of multiple equal non-syntax errors per line
- // (flusherrors shows only one of them, so we filter them
+ // (FlushErrors shows only one of them, so we filter them
// here as best as we can (they may not appear in order)
// so that we don't count them here and exit early, and
// then have nothing to show for.)
diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go
index 1e084576d1..bcac1fe351 100644
--- a/src/cmd/compile/internal/bitvec/bv.go
+++ b/src/cmd/compile/internal/bitvec/bv.go
@@ -37,7 +37,7 @@ func NewBulk(nbit int32, count int32) Bulk {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
- base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return Bulk{
words: make([]uint32, size),
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
index c409320fc4..520203787f 100644
--- a/src/cmd/compile/internal/deadcode/deadcode.go
+++ b/src/cmd/compile/internal/deadcode/deadcode.go
@@ -75,7 +75,7 @@ func stmts(nn *ir.Nodes) {
// might be the target of a goto. See issue 28616.
if body := body; len(body) != 0 {
switch body[(len(body) - 1)].Op() {
- case ir.ORETURN, ir.ORETJMP, ir.OPANIC:
+ case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
if i > lastLabel {
cut = true
}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index ff249c1f4e..dd22c033cc 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -28,7 +28,7 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope,
if fn.Nname != nil {
expect := fn.Linksym()
if fnsym.ABI() == obj.ABI0 {
- expect = fn.Sym().LinksymABI0()
+ expect = fn.LinksymABI(obj.ABI0)
}
if fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
@@ -136,7 +136,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*ir.Name
- var selected map[*ir.Name]bool
+ var selected ir.NameSet
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
@@ -161,7 +161,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
// For non-SSA-able arguments, however, the correct information
// is known -- they have a single home on the stack.
for _, n := range dcl {
- if _, found := selected[n]; found {
+ if selected.Has(n) {
continue
}
c := n.Sym().Name[0]
@@ -186,19 +186,11 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
isReturnValue := (n.Class == ir.PPARAMOUT)
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- } else if n.Class == ir.PAUTOHEAP {
- // If dcl in question has been promoted to heap, do a bit
- // of extra work to recover original class (auto or param);
- // see issue 30908. This insures that we get the proper
- // signature in the abstract function DIE, but leaves a
- // misleading location for the param (we want pointer-to-heap
- // and not stack).
+ }
+ if n.Esc() == ir.EscHeap {
+ // The variable in question has been promoted to the heap.
+ // Its address is in n.Heapaddr.
// TODO(thanm): generate a better location expression
- stackcopy := n.Stackcopy
- if stackcopy != nil && (stackcopy.Class == ir.PPARAM || stackcopy.Class == ir.PPARAMOUT) {
- abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
- isReturnValue = (stackcopy.Class == ir.PPARAMOUT)
- }
}
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
@@ -252,10 +244,10 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
-func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
+func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
var vars []*dwarf.Var
var decls []*ir.Name
- selected := make(map[*ir.Name]bool)
+ var selected ir.NameSet
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
@@ -263,7 +255,7 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf
decls = append(decls, n)
vars = append(vars, createSimpleVar(fnsym, n))
- selected[n] = true
+ selected.Add(n)
}
return decls, vars, selected
}
@@ -320,19 +312,19 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
-func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
var decls []*ir.Name
var vars []*dwarf.Var
- ssaVars := make(map[*ir.Name]bool)
+ var ssaVars ir.NameSet
for varID, dvar := range debugInfo.Vars {
n := dvar
- ssaVars[n] = true
+ ssaVars.Add(n)
for _, slot := range debugInfo.VarSlots[varID] {
- ssaVars[debugInfo.Slots[slot].N] = true
+ ssaVars.Add(debugInfo.Slots[slot].N)
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index bee3878f10..883e68a730 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -218,6 +218,10 @@ func Batch(fns []*ir.Func, recursive bool) {
// Construct data-flow graph from syntax trees.
for _, fn := range fns {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore escape %v", fn)
+ ir.Dump(s, fn)
+ }
b.initFunc(fn)
}
for _, fn := range fns {
@@ -534,8 +538,8 @@ func (e *escape) stmt(n ir.Node) {
e.stmts(n.Call.Init())
e.call(nil, n.Call, n)
- case ir.ORETJMP:
- // TODO(mdempsky): What do? esc.go just ignores it.
+ case ir.OTAILCALL:
+ // TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
}
}
@@ -585,7 +589,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
default:
base.Fatalf("unexpected expr: %v", n)
- case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR:
+ case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
// nop
case ir.ONAME:
@@ -598,10 +602,6 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
}
e.flow(k, e.oldLoc(n))
- case ir.ONAMEOFFSET:
- n := n.(*ir.NameOffsetExpr)
- e.expr(k, n.Name_)
-
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
n := n.(*ir.UnaryExpr)
e.discard(n.X)
@@ -856,7 +856,7 @@ func (e *escape) discards(l ir.Nodes) {
}
}
-// addr evaluates an addressable expression n and returns an EscHole
+// addr evaluates an addressable expression n and returns a hole
// that represents storing into the represented location.
func (e *escape) addr(n ir.Node) hole {
if n == nil || ir.IsBlank(n) {
@@ -875,9 +875,8 @@ func (e *escape) addr(n ir.Node) hole {
break
}
k = e.oldLoc(n).asHole()
- case ir.ONAMEOFFSET:
- n := n.(*ir.NameOffsetExpr)
- k = e.addr(n.Name_)
+ case ir.OLINKSYMOFFSET:
+ break
case ir.ODOT:
n := n.(*ir.SelectorExpr)
k = e.addr(n.X)
@@ -1658,7 +1657,14 @@ func (b *batch) finish(fns []*ir.Func) {
// Update n.Esc based on escape analysis results.
if loc.escapes {
- if n.Op() != ir.ONAME {
+ if n.Op() == ir.ONAME {
+ if base.Flag.CompilingRuntime {
+ base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n)
+ }
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+ }
+ } else {
if base.Flag.LowerM != 0 {
base.WarnfAt(n.Pos(), "%v escapes to heap", n)
}
@@ -1668,7 +1674,6 @@ func (b *batch) finish(fns []*ir.Func) {
}
}
n.SetEsc(ir.EscHeap)
- addrescapes(n)
} else {
if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
base.WarnfAt(n.Pos(), "%v does not escape", n)
@@ -1779,7 +1784,7 @@ func (l leaks) Encode() string {
return s
}
-// parseLeaks parses a binary string representing an EscLeaks.
+// parseLeaks parses a binary string representing a leaks
func parseLeaks(s string) leaks {
var l leaks
if !strings.HasPrefix(s, "esc:") {
@@ -2014,165 +2019,6 @@ func HeapAllocReason(n ir.Node) string {
return ""
}
-// addrescapes tags node n as having had its address taken
-// by "increasing" the "value" of n.Esc to EscHeap.
-// Storage is allocated as necessary to allow the address
-// to be taken.
-func addrescapes(n ir.Node) {
- switch n.Op() {
- default:
- // Unexpected Op, probably due to a previous type error. Ignore.
-
- case ir.ODEREF, ir.ODOTPTR:
- // Nothing to do.
-
- case ir.ONAME:
- n := n.(*ir.Name)
- if n == ir.RegFP {
- break
- }
-
- // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
- // on PPARAM it means something different.
- if n.Class == ir.PAUTO && n.Esc() == ir.EscNever {
- break
- }
-
- // If a closure reference escapes, mark the outer variable as escaping.
- if n.IsClosureVar() {
- addrescapes(n.Defn)
- break
- }
-
- if n.Class != ir.PPARAM && n.Class != ir.PPARAMOUT && n.Class != ir.PAUTO {
- break
- }
-
- // This is a plain parameter or local variable that needs to move to the heap,
- // but possibly for the function outside the one we're compiling.
- // That is, if we have:
- //
- // func f(x int) {
- // func() {
- // global = &x
- // }
- // }
- //
- // then we're analyzing the inner closure but we need to move x to the
- // heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
- oldfn := ir.CurFunc
- ir.CurFunc = n.Curfn
- ln := base.Pos
- base.Pos = ir.CurFunc.Pos()
- moveToHeap(n)
- ir.CurFunc = oldfn
- base.Pos = ln
-
- // ODOTPTR has already been introduced,
- // so these are the non-pointer ODOT and OINDEX.
- // In &x[0], if x is a slice, then x does not
- // escape--the pointer inside x does, but that
- // is always a heap pointer anyway.
- case ir.ODOT:
- n := n.(*ir.SelectorExpr)
- addrescapes(n.X)
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- if !n.X.Type().IsSlice() {
- addrescapes(n.X)
- }
- case ir.OPAREN:
- n := n.(*ir.ParenExpr)
- addrescapes(n.X)
- case ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- addrescapes(n.X)
- }
-}
-
-// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *ir.Name) {
- if base.Flag.LowerR != 0 {
- ir.Dump("MOVE", n)
- }
- if base.Flag.CompilingRuntime {
- base.Errorf("%v escapes to heap, not allowed in runtime", n)
- }
- if n.Class == ir.PAUTOHEAP {
- ir.Dump("n", n)
- base.Fatalf("double move to heap")
- }
-
- // Allocate a local stack variable to hold the pointer to the heap copy.
- // temp will add it to the function declaration list automatically.
- heapaddr := typecheck.Temp(types.NewPtr(n.Type()))
- heapaddr.SetSym(typecheck.Lookup("&" + n.Sym().Name))
- heapaddr.SetPos(n.Pos())
-
- // Unset AutoTemp to persist the &foo variable name through SSA to
- // liveness analysis.
- // TODO(mdempsky/drchase): Cleaner solution?
- heapaddr.SetAutoTemp(false)
-
- // Parameters have a local stack copy used at function start/end
- // in addition to the copy in the heap that may live longer than
- // the function.
- if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
- if n.FrameOffset() == types.BADWIDTH {
- base.Fatalf("addrescapes before param assignment")
- }
-
- // We rewrite n below to be a heap variable (indirection of heapaddr).
- // Preserve a copy so we can still write code referring to the original,
- // and substitute that copy into the function declaration list
- // so that analyses of the local (on-stack) variables use it.
- stackcopy := typecheck.NewName(n.Sym())
- stackcopy.SetType(n.Type())
- stackcopy.SetFrameOffset(n.FrameOffset())
- stackcopy.Class = n.Class
- stackcopy.Heapaddr = heapaddr
- if n.Class == ir.PPARAMOUT {
- // Make sure the pointer to the heap copy is kept live throughout the function.
- // The function could panic at any point, and then a defer could recover.
- // Thus, we need the pointer to the heap copy always available so the
- // post-deferreturn code can copy the return value back to the stack.
- // See issue 16095.
- heapaddr.SetIsOutputParamHeapAddr(true)
- }
- n.Stackcopy = stackcopy
-
- // Substitute the stackcopy into the function variable list so that
- // liveness and other analyses use the underlying stack slot
- // and not the now-pseudo-variable n.
- found := false
- for i, d := range ir.CurFunc.Dcl {
- if d == n {
- ir.CurFunc.Dcl[i] = stackcopy
- found = true
- break
- }
- // Parameters are before locals, so can stop early.
- // This limits the search even in functions with many local variables.
- if d.Class == ir.PAUTO {
- break
- }
- }
- if !found {
- base.Fatalf("cannot find %v in local variable list", n)
- }
- ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
- }
-
- // Modify n in place so that uses of n now mean indirection of the heapaddr.
- n.Class = ir.PAUTOHEAP
- n.SetFrameOffset(0)
- n.Heapaddr = heapaddr
- n.SetEsc(ir.EscHeap)
- if base.Flag.LowerM != 0 {
- base.WarnfAt(n.Pos(), "moved to heap: %v", n)
- }
-}
-
// This special tag is applied to uintptr variables
// that we believe may hold unsafe.Pointers for
// calls into assembly functions.
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
index 410b3e90ea..ba67c58c45 100644
--- a/src/cmd/compile/internal/gc/compile.go
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -13,7 +13,6 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
- "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@@ -73,7 +72,7 @@ func enqueueFunc(fn *ir.Func) {
func prepareFunc(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
- // (e.g. in markTypeUsedInInterface).
+ // (e.g. in MarkTypeUsedInInterface).
ssagen.InitLSym(fn, true)
// Calculate parameter offsets.
@@ -84,24 +83,6 @@ func prepareFunc(fn *ir.Func) {
walk.Walk(fn)
ir.CurFunc = nil // enforce no further uses of CurFunc
typecheck.DeclContext = ir.PEXTERN
-
- // Make sure type syms are declared for all types that might
- // be types of stack objects. We need to do this here
- // because symbols must be allocated before the parallel
- // phase of the compiler.
- for _, n := range fn.Dcl {
- switch n.Class {
- case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
- if liveness.ShouldTrack(n) && n.Addrtaken() {
- reflectdata.WriteType(n.Type())
- // Also make sure we allocate a linker symbol
- // for the stack object data, for the same reason.
- if fn.LSym.Func().StackObjects == nil {
- fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
- }
- }
- }
- }
}
// compileFunctions compiles all functions in compilequeue.
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 9ecdd510b1..726a0685d5 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -96,16 +96,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab
- ir.Pkgs.Itablink = types.NewPkg("go.itablink", "go.itablink")
- ir.Pkgs.Itablink.Prefix = "go.itablink" // not go%2eitablink
-
- ir.Pkgs.Track = types.NewPkg("go.track", "go.track")
- ir.Pkgs.Track.Prefix = "go.track" // not go%2etrack
-
- // pseudo-package used for map zero values
- ir.Pkgs.Map = types.NewPkg("go.map", "go.map")
- ir.Pkgs.Map.Prefix = "go.map"
-
// pseudo-package used for methods with anonymous receivers
ir.Pkgs.Go = types.NewPkg("go", "")
@@ -121,7 +111,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
log.Fatalf("compiler not built with support for -t")
}
- // Enable inlining (after recordFlags, to avoid recording the rewritten -l). For now:
+ // Enable inlining (after RecordFlags, to avoid recording the rewritten -l). For now:
// default: inlining on. (Flag.LowerL == 1)
// -l: inlining off (Flag.LowerL == 0)
// -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1)
@@ -160,12 +150,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ssagen.Arch.LinkArch.Init(base.Ctxt)
startProfile()
- if base.Flag.Race {
- ir.Pkgs.Race = types.NewPkg("runtime/race", "")
- }
- if base.Flag.MSan {
- ir.Pkgs.Msan = types.NewPkg("runtime/msan", "")
- }
if base.Flag.Race || base.Flag.MSan {
base.Flag.Cfg.Instrumenting = true
}
@@ -193,7 +177,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
typecheck.Target = new(ir.Package)
typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
- typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock?
+ typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
@@ -261,7 +245,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
escape.Funcs(typecheck.Target.Decls)
// Collect information for go:nowritebarrierrec
- // checking. This must happen before transformclosure.
+ // checking. This must happen before transforming closures during Walk
// We'll do the final check after write barriers are
// inserted.
if base.Flag.CompilingRuntime {
@@ -269,7 +253,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
}
// Prepare for SSA compilation.
- // This must be before peekitabs, because peekitabs
+ // This must be before CompileITabs, because CompileITabs
// can trigger function compilation.
typecheck.InitRuntime()
ssagen.InitConfig()
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 753db80f76..0472af7441 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -111,7 +111,6 @@ func dumpdata() {
numDecls := len(typecheck.Target.Decls)
dumpglobls(typecheck.Target.Externs)
- staticdata.WriteFuncSyms()
reflectdata.CollectPTabs()
numExports := len(typecheck.Target.Exports)
addsignats(typecheck.Target.Externs)
@@ -122,7 +121,7 @@ func dumpdata() {
reflectdata.WriteBasicTypes()
dumpembeds()
- // Calls to dumpsignats can generate functions,
+ // Calls to WriteRuntimeTypes can generate functions,
// like method wrappers and hash and equality routines.
// Compile any generated functions, process any new resulting types, repeat.
// This can't loop forever, because there is no way to generate an infinite
@@ -147,10 +146,11 @@ func dumpdata() {
dumpglobls(typecheck.Target.Externs[numExterns:])
if reflectdata.ZeroSize > 0 {
- zero := ir.Pkgs.Map.Lookup("zero").Linksym()
+ zero := base.PkgLinksym("go.map", "zero", obj.ABI0)
objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
}
+ staticdata.WriteFuncSyms()
addGCLocals()
if numExports != len(typecheck.Target.Exports) {
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 6f5f6499ce..f0be169f56 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -4,7 +4,7 @@
//
// The inlining facility makes 2 passes: first caninl determines which
// functions are suitable for inlining, and for those that are it
-// saves a copy of the body. Then inlcalls walks each function body to
+// saves a copy of the body. Then InlineCalls walks each function body to
// expand calls to inlinable functions.
//
// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
@@ -27,7 +27,6 @@
package inline
import (
- "errors"
"fmt"
"go/constant"
"strings"
@@ -74,12 +73,12 @@ func InlinePackage() {
})
}
-// Caninl determines whether fn is inlineable.
+// CanInline determines whether fn is inlineable.
// If so, CanInline saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
func CanInline(fn *ir.Func) {
if fn.Nname == nil {
- base.Fatalf("caninl no nname %+v", fn)
+ base.Fatalf("CanInline no nname %+v", fn)
}
var reason string // reason, if any, that the function was not inlined
@@ -144,7 +143,7 @@ func CanInline(fn *ir.Func) {
}
if fn.Typecheck() == 0 {
- base.Fatalf("caninl on non-typechecked function %v", fn)
+ base.Fatalf("CanInline on non-typechecked function %v", fn)
}
n := fn.Nname
@@ -170,7 +169,6 @@ func CanInline(fn *ir.Func) {
visitor := hairyVisitor{
budget: inlineMaxBudget,
extraCallCost: cc,
- usedLocals: make(map[*ir.Name]bool),
}
if visitor.tooHairy(fn) {
reason = visitor.reason
@@ -180,7 +178,7 @@ func CanInline(fn *ir.Func) {
n.Func.Inl = &ir.Inline{
Cost: inlineMaxBudget - visitor.budget,
Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
- Body: ir.DeepCopyList(src.NoXPos, fn.Body),
+ Body: inlcopylist(fn.Body),
}
if base.Flag.LowerM > 1 {
@@ -200,11 +198,11 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
return
}
if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
- base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class)
+ base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
}
fn := n.Func
if fn == nil {
- base.Fatalf("inlFlood: missing Func on %v", n)
+ base.Fatalf("Inline_Flood: missing Func on %v", n)
}
if fn.Inl == nil {
return
@@ -217,10 +215,8 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
typecheck.ImportedBody(fn)
- // Recursively identify all referenced functions for
- // reexport. We want to include even non-called functions,
- // because after inlining they might be callable.
- ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
+ var doFlood func(n ir.Node)
+ doFlood = func(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR, ir.ODOTMETH:
Inline_Flood(ir.MethodExprName(n), exportsym)
@@ -239,15 +235,16 @@ func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
// Okay, because we don't yet inline indirect
// calls to method values.
case ir.OCLOSURE:
- // If the closure is inlinable, we'll need to
- // flood it too. But today we don't support
- // inlining functions that contain closures.
- //
- // When we do, we'll probably want:
- // inlFlood(n.Func.Closure.Func.Nname)
- base.Fatalf("unexpected closure in inlinable function")
+ // VisitList doesn't visit closure bodies, so force a
+ // recursive call to VisitList on the body of the closure.
+ ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
}
- })
+ }
+
+ // Recursively identify all referenced functions for
+ // reexport. We want to include even non-called functions,
+ // because after inlining they might be callable.
+ ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
}
// hairyVisitor visits a function body to determine its inlining
@@ -256,18 +253,13 @@ type hairyVisitor struct {
budget int32
reason string
extraCallCost int32
- usedLocals map[*ir.Name]bool
- do func(ir.Node) error
+ usedLocals ir.NameSet
+ do func(ir.Node) bool
}
-var errBudget = errors.New("too expensive")
-
func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
v.do = v.doNode // cache closure
-
- err := errChildren(fn, v.do)
- if err != nil {
- v.reason = err.Error()
+ if ir.DoChildren(fn, v.do) {
return true
}
if v.budget < 0 {
@@ -277,11 +269,10 @@ func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
return false
}
-func (v *hairyVisitor) doNode(n ir.Node) error {
+func (v *hairyVisitor) doNode(n ir.Node) bool {
if n == nil {
- return nil
+ return false
}
-
switch n.Op() {
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLFUNC:
@@ -295,7 +286,8 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
fn := name.Sym().Name
if fn == "getcallerpc" || fn == "getcallersp" {
- return errors.New("call to " + fn)
+ v.reason = "call to " + fn
+ return true
}
if fn == "throw" {
v.budget -= inlineExtraThrowCost
@@ -346,38 +338,61 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
v.budget -= v.extraCallCost
case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
+ // Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
+ // Before CL 284412, these conversions were introduced later in the
+ // compiler, so they didn't count against inlining budget.
+ v.budget++
+ }
v.budget -= inlineExtraPanicCost
case ir.ORECOVER:
// recover matches the argument frame pointer to find
// the right panic value, so it needs an argument frame.
- return errors.New("call to recover")
+ v.reason = "call to recover"
+ return true
+
+ case ir.OCLOSURE:
+ // TODO(danscales,mdempsky): Get working with -G.
+ // Probably after #43818 is fixed.
+ if base.Flag.G > 0 {
+ v.reason = "inlining closures not yet working with -G"
+ return true
+ }
- case ir.OCLOSURE,
- ir.ORANGE,
+ // TODO(danscales) - fix some bugs when budget is lowered below 30
+ // Maybe make budget proportional to number of closure variables, e.g.:
+ //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
+ v.budget -= 30
+
+ case ir.ORANGE,
ir.OSELECT,
ir.OGO,
ir.ODEFER,
ir.ODCLTYPE, // can't print yet
- ir.ORETJMP:
- return errors.New("unhandled op " + n.Op().String())
+ ir.OTAILCALL:
+ v.reason = "unhandled op " + n.Op().String()
+ return true
case ir.OAPPEND:
v.budget -= inlineExtraAppendCost
case ir.ODCLCONST, ir.OFALL:
// These nodes don't produce code; omit from inlining budget.
- return nil
+ return false
case ir.OFOR, ir.OFORUNTIL:
n := n.(*ir.ForStmt)
if n.Label != nil {
- return errors.New("labeled control")
+ v.reason = "labeled control"
+ return true
}
case ir.OSWITCH:
n := n.(*ir.SwitchStmt)
if n.Label != nil {
- return errors.New("labeled control")
+ v.reason = "labeled control"
+ return true
}
// case ir.ORANGE, ir.OSELECT in "unhandled" above
@@ -393,22 +408,15 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
if ir.IsConst(n.Cond, constant.Bool) {
// This if and the condition cost nothing.
// TODO(rsc): It seems strange that we visit the dead branch.
- if err := errList(n.Init(), v.do); err != nil {
- return err
- }
- if err := errList(n.Body, v.do); err != nil {
- return err
- }
- if err := errList(n.Else, v.do); err != nil {
- return err
- }
- return nil
+ return doList(n.Init(), v.do) ||
+ doList(n.Body, v.do) ||
+ doList(n.Else, v.do)
}
case ir.ONAME:
n := n.(*ir.Name)
if n.Class == ir.PAUTO {
- v.usedLocals[n] = true
+ v.usedLocals.Add(n)
}
case ir.OBLOCK:
@@ -428,10 +436,11 @@ func (v *hairyVisitor) doNode(n ir.Node) error {
// When debugging, don't stop early, to get full cost of inlining this function
if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
- return errBudget
+ v.reason = "too expensive"
+ return true
}
- return errChildren(n, v.do)
+ return ir.DoChildren(n, v.do)
}
func isBigFunc(fn *ir.Func) bool {
@@ -442,6 +451,52 @@ func isBigFunc(fn *ir.Func) bool {
})
}
+// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
+// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
+// the body and dcls of an inlineable function.
+func inlcopylist(ll []ir.Node) []ir.Node {
+ s := make([]ir.Node, len(ll))
+ for i, n := range ll {
+ s[i] = inlcopy(n)
+ }
+ return s
+}
+
+// inlcopy is like DeepCopy(), but does extra work to copy closures.
+func inlcopy(n ir.Node) ir.Node {
+ var edit func(ir.Node) ir.Node
+ edit = func(x ir.Node) ir.Node {
+ switch x.Op() {
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
+ return x
+ }
+ m := ir.Copy(x)
+ ir.EditChildren(m, edit)
+ if x.Op() == ir.OCLOSURE {
+ x := x.(*ir.ClosureExpr)
+ // Need to save/duplicate x.Func.Nname,
+ // x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
+ // x.Func.Body for iexport and local inlining.
+ oldfn := x.Func
+ newfn := ir.NewFunc(oldfn.Pos())
+ if oldfn.ClosureCalled() {
+ newfn.SetClosureCalled(true)
+ }
+ m.(*ir.ClosureExpr).Func = newfn
+ newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
+ // XXX OK to share fn.Type() ??
+ newfn.Nname.SetType(oldfn.Nname.Type())
+ newfn.Nname.Ntype = inlcopy(oldfn.Nname.Ntype).(ir.Ntype)
+ newfn.Body = inlcopylist(oldfn.Body)
+ // Make shallow copy of the Dcl and ClosureVar slices
+ newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
+ newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
+ }
+ return m
+ }
+ return edit(n)
+}
+
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
func InlineCalls(fn *ir.Func) {
@@ -762,13 +817,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
if ln.Class == ir.PPARAMOUT { // return values handled below.
continue
}
- if ir.IsParamStackCopy(ln) { // ignore the on-stack copy of a parameter that moved to the heap
- // TODO(mdempsky): Remove once I'm confident
- // this never actually happens. We currently
- // perform inlining before escape analysis, so
- // nothing should have moved to the heap yet.
- base.Fatalf("impossible: %v", ln)
- }
inlf := typecheck.Expr(inlvar(ln)).(*ir.Name)
inlvars[ln] = inlf
if base.Flag.GenDwarfInl > 0 {
@@ -925,6 +973,7 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
inlvars: inlvars,
bases: make(map[*src.PosBase]*src.PosBase),
newInlIndex: newIndex,
+ fn: fn,
}
subst.edit = subst.node
@@ -1031,6 +1080,12 @@ type inlsubst struct {
newInlIndex int
edit func(ir.Node) ir.Node // cached copy of subst.node method value closure
+
+ // If non-nil, we are inside a closure inside the inlined function, and
+ // newclofn is the Func of the new inlined closure.
+ newclofn *ir.Func
+
+ fn *ir.Func // For debug -- the func that is being inlined
}
// list inlines a list of nodes.
@@ -1042,6 +1097,157 @@ func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
return s
}
+// fields returns a list of the fields of a struct type representing receiver,
+// params, or results, after duplicating the field nodes and substituting the
+// Nname nodes inside the field nodes.
+func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
+ oldfields := oldt.FieldSlice()
+ newfields := make([]*types.Field, len(oldfields))
+ for i := range oldfields {
+ newfields[i] = oldfields[i].Copy()
+ if oldfields[i].Nname != nil {
+ newfields[i].Nname = subst.node(oldfields[i].Nname.(*ir.Name))
+ }
+ }
+ return newfields
+}
+
+// clovar creates a new ONAME node for a local variable or param of a closure
+// inside a function being inlined.
+func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
+ // TODO(danscales): want to get rid of this shallow copy, with code like the
+ // following, but it is hard to copy all the necessary flags in a maintainable way.
+ // m := ir.NewNameAt(n.Pos(), n.Sym())
+ // m.Class = n.Class
+ // m.SetType(n.Type())
+ // m.SetTypecheck(1)
+ //if n.IsClosureVar() {
+ // m.SetIsClosureVar(true)
+ //}
+ m := &ir.Name{}
+ *m = *n
+ m.Curfn = subst.newclofn
+ if n.Defn != nil && n.Defn.Op() == ir.ONAME {
+ if !n.IsClosureVar() {
+ base.FatalfAt(n.Pos(), "want closure variable, got: %+v", n)
+ }
+ if n.Sym().Pkg != types.LocalPkg {
+ // If the closure came from inlining a function from
+ // another package, must change package of captured
+ // variable to localpkg, so that the fields of the closure
+ // struct are local package and can be accessed even if
+ // name is not exported. If you disable this code, you can
+ // reproduce the problem by running 'go test
+ // go/internal/srcimporter'. TODO(mdempsky) - maybe change
+ // how we create closure structs?
+ m.SetSym(types.LocalPkg.Lookup(n.Sym().Name))
+ }
+ // Make sure any inlvar which is the Defn
+ // of an ONAME closure var is rewritten
+ // during inlining. Don't substitute
+ // if Defn node is outside inlined function.
+ if subst.inlvars[n.Defn.(*ir.Name)] != nil {
+ m.Defn = subst.node(n.Defn)
+ }
+ }
+ if n.Outer != nil {
+ // Either the outer variable is defined in function being inlined,
+ // and we will replace it with the substituted variable, or it is
+ // defined outside the function being inlined, and we should just
+ // skip the outer variable (the closure variable of the function
+ // being inlined).
+ s := subst.node(n.Outer).(*ir.Name)
+ if s == n.Outer {
+ s = n.Outer.Outer
+ }
+ m.Outer = s
+ }
+ return m
+}
+
+// closure does the necessary substitions for a ClosureExpr n and returns the new
+// closure node.
+func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
+ m := ir.Copy(n)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ ir.EditChildren(m, subst.edit)
+
+ //fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
+
+ // The following is similar to funcLit
+ oldfn := n.Func
+ newfn := ir.NewFunc(oldfn.Pos())
+ // These three lines are not strictly necessary, but just to be clear
+ // that new function needs to redo typechecking and inlinability.
+ newfn.SetTypecheck(0)
+ newfn.SetInlinabilityChecked(false)
+ newfn.Inl = nil
+ newfn.SetIsHiddenClosure(true)
+ newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
+ newfn.Nname.Func = newfn
+ newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
+ newfn.Nname.Defn = newfn
+
+ m.(*ir.ClosureExpr).Func = newfn
+ newfn.OClosure = m.(*ir.ClosureExpr)
+
+ if subst.newclofn != nil {
+ //fmt.Printf("Inlining a closure with a nested closure\n")
+ }
+ prevxfunc := subst.newclofn
+
+ // Mark that we are now substituting within a closure (within the
+ // inlined function), and create new nodes for all the local
+ // vars/params inside this closure.
+ subst.newclofn = newfn
+ newfn.Dcl = nil
+ newfn.ClosureVars = nil
+ for _, oldv := range oldfn.Dcl {
+ newv := subst.clovar(oldv)
+ subst.inlvars[oldv] = newv
+ newfn.Dcl = append(newfn.Dcl, newv)
+ }
+ for _, oldv := range oldfn.ClosureVars {
+ newv := subst.clovar(oldv)
+ subst.inlvars[oldv] = newv
+ newfn.ClosureVars = append(newfn.ClosureVars, newv)
+ }
+
+ // Need to replace ONAME nodes in
+ // newfn.Type().FuncType().Receiver/Params/Results.FieldSlice().Nname
+ oldt := oldfn.Type()
+ newrecvs := subst.fields(oldt.Recvs())
+ var newrecv *types.Field
+ if len(newrecvs) > 0 {
+ newrecv = newrecvs[0]
+ }
+ newt := types.NewSignature(oldt.Pkg(), newrecv,
+ subst.fields(oldt.Params()), subst.fields(oldt.Results()))
+
+ newfn.Nname.SetType(newt)
+ newfn.Body = subst.list(oldfn.Body)
+
+ // Remove the nodes for the current closure from subst.inlvars
+ for _, oldv := range oldfn.Dcl {
+ delete(subst.inlvars, oldv)
+ }
+ for _, oldv := range oldfn.ClosureVars {
+ delete(subst.inlvars, oldv)
+ }
+ // Go back to previous closure func
+ subst.newclofn = prevxfunc
+
+ // Actually create the named function for the closure, now that
+ // the closure is inlined in a specific function.
+ m.SetTypecheck(0)
+ if oldfn.ClosureCalled() {
+ typecheck.Callee(m)
+ } else {
+ typecheck.Expr(m)
+ }
+ return m
+}
+
// node recursively copies a node from the saved pristine body of the
// inlined function, substituting references to input/output
// parameters with ones to the tmpnames, and substituting returns with
@@ -1056,13 +1262,17 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
n := n.(*ir.Name)
// Handle captured variables when inlining closures.
- if n.IsClosureVar() {
+ if n.IsClosureVar() && subst.newclofn == nil {
o := n.Outer
+ // Deal with case where sequence of closures are inlined.
+ // TODO(danscales) - write test case to see if we need to
+ // go up multiple levels.
+ if o.Curfn != ir.CurFunc {
+ o = o.Outer
+ }
+
// make sure the outer param matches the inlining location
- // NB: if we enabled inlining of functions containing OCLOSURE or refined
- // the reassigned check via some sort of copy propagation this would most
- // likely need to be changed to a loop to walk up to the correct Param
if o == nil || o.Curfn != ir.CurFunc {
base.Fatalf("%v: unresolvable capture %v\n", ir.Line(n), n)
}
@@ -1098,6 +1308,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
case ir.ORETURN:
+ if subst.newclofn != nil {
+ // Don't do special substitutions if inside a closure
+ break
+ }
// Since we don't handle bodies with closures,
// this return is guaranteed to belong to the current inlined function.
n := n.(*ir.ReturnStmt)
@@ -1136,6 +1350,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
return m
case ir.OLABEL:
+ if subst.newclofn != nil {
+ // Don't do special substitutions if inside a closure
+ break
+ }
n := n.(*ir.LabelStmt)
m := ir.Copy(n).(*ir.LabelStmt)
m.SetPos(subst.updatedPos(m.Pos()))
@@ -1143,10 +1361,10 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
p := fmt.Sprintf("%s·%d", n.Label.Name, inlgen)
m.Label = typecheck.Lookup(p)
return m
- }
- if n.Op() == ir.OCLOSURE {
- base.Fatalf("cannot inline function containing closure: %+v", n)
+ case ir.OCLOSURE:
+ return subst.closure(n.(*ir.ClosureExpr))
+
}
m := ir.Copy(n)
@@ -1171,7 +1389,7 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
s := make([]*ir.Name, 0, len(ll))
for _, n := range ll {
if n.Class == ir.PAUTO {
- if _, found := vis.usedLocals[n]; !found {
+ if !vis.usedLocals.Has(n) {
continue
}
}
@@ -1191,21 +1409,13 @@ func numNonClosures(list []*ir.Func) int {
return count
}
-// TODO(mdempsky): Update inl.go to use ir.DoChildren directly.
-func errChildren(n ir.Node, do func(ir.Node) error) (err error) {
- ir.DoChildren(n, func(x ir.Node) bool {
- err = do(x)
- return err != nil
- })
- return
-}
-func errList(list []ir.Node, do func(ir.Node) error) error {
+func doList(list []ir.Node, do func(ir.Node) bool) bool {
for _, x := range list {
if x != nil {
- if err := do(x); err != nil {
- return err
+ if do(x) {
+ return true
}
}
}
- return nil
+ return false
}
diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go
index bfa0136232..eaa4d5b6b1 100644
--- a/src/cmd/compile/internal/ir/const.go
+++ b/src/cmd/compile/internal/ir/const.go
@@ -77,7 +77,7 @@ func ConstOverflow(v constant.Value, t *types.Type) bool {
ft := types.FloatForComplex(t)
return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
}
- base.Fatalf("doesoverflow: %v, %v", v, t)
+ base.Fatalf("ConstOverflow: %v, %v", v, t)
panic("unreachable")
}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index 51425db42d..b32ed71260 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -8,6 +8,7 @@ import (
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"fmt"
"go/constant"
@@ -32,8 +33,7 @@ type miniExpr struct {
}
const (
- miniExprHasCall = 1 << iota
- miniExprNonNil
+ miniExprNonNil = 1 << iota
miniExprTransient
miniExprBounded
miniExprImplicit // for use by implementations; not supported by every Expr
@@ -44,8 +44,6 @@ func (*miniExpr) isExpr() {}
func (n *miniExpr) Type() *types.Type { return n.typ }
func (n *miniExpr) SetType(x *types.Type) { n.typ = x }
-func (n *miniExpr) HasCall() bool { return n.flags&miniExprHasCall != 0 }
-func (n *miniExpr) SetHasCall(b bool) { n.flags.set(miniExprHasCall, b) }
func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 }
func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil }
func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 }
@@ -145,7 +143,7 @@ func (n *BinaryExpr) SetOp(op Op) {
}
// A CallUse records how the result of the call is used:
-type CallUse int
+type CallUse byte
const (
_ CallUse = iota
@@ -161,7 +159,6 @@ type CallExpr struct {
origNode
X Node
Args Nodes
- Rargs Nodes // TODO(rsc): Delete.
KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool
Use CallUse
@@ -464,21 +461,35 @@ func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr {
return n
}
-// A NameOffsetExpr refers to an offset within a variable.
+// A LinksymOffsetExpr refers to an offset within a global variable.
// It is like a SelectorExpr but without the field name.
-type NameOffsetExpr struct {
+type LinksymOffsetExpr struct {
miniExpr
- Name_ *Name
+ Linksym *obj.LSym
Offset_ int64
}
-func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *NameOffsetExpr {
- n := &NameOffsetExpr{Name_: name, Offset_: offset}
+func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr {
+ n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset}
n.typ = typ
- n.op = ONAMEOFFSET
+ n.op = OLINKSYMOFFSET
return n
}
+// NewLinksymExpr is NewLinksymOffsetExpr, but with offset fixed at 0.
+func NewLinksymExpr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *LinksymOffsetExpr {
+ return NewLinksymOffsetExpr(pos, lsym, 0, typ)
+}
+
+// NewNameOffsetExpr is NewLinksymOffsetExpr, but taking a *Name
+// representing a global variable instead of an *obj.LSym directly.
+func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr {
+ if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) {
+ base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name)
+ }
+ return NewLinksymOffsetExpr(pos, name.Linksym(), offset, typ)
+}
+
// A SelectorExpr is a selector expression X.Sel.
type SelectorExpr struct {
miniExpr
@@ -612,11 +623,9 @@ type TypeAssertExpr struct {
X Node
Ntype Ntype
- // Runtime type information provided by walkDotType.
- // Caution: These aren't always populated; see walkDotType.
- SrcType *AddrExpr `mknode:"-"` // *runtime._type for X's type
- DstType *AddrExpr `mknode:"-"` // *runtime._type for Type
- Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type
+ // Runtime type information provided by walkDotType for
+ // assertions from non-empty interface to concrete type.
+ Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type
}
func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr {
@@ -656,7 +665,7 @@ func (n *UnaryExpr) SetOp(op Op) {
case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW,
OOFFSETOF, OPANIC, OREAL, OSIZEOF,
- OCHECKNIL, OCFUNC, OIDATA, OITAB, ONEWOBJ, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
+ OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
n.op = op
}
}
@@ -728,7 +737,7 @@ func IsAddressable(n Node) bool {
}
return true
- case ONAMEOFFSET:
+ case OLINKSYMOFFSET:
return true
}
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index a4e769f508..1a05079dac 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -378,9 +378,9 @@ func stmtFmt(n Node, s fmt.State) {
n := n.(*ReturnStmt)
fmt.Fprintf(s, "return %.v", n.Results)
- case ORETJMP:
- n := n.(*BranchStmt)
- fmt.Fprintf(s, "retjmp %v", n.Label)
+ case OTAILCALL:
+ n := n.(*TailCallStmt)
+ fmt.Fprintf(s, "tailcall %v", n.Target)
case OINLMARK:
n := n.(*InlineMarkStmt)
@@ -589,20 +589,20 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
if n.Type() == types.UntypedRune {
- switch x, ok := constant.Int64Val(n.Val()); {
+ switch x, ok := constant.Uint64Val(n.Val()); {
case !ok:
fallthrough
default:
fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
- case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
- fmt.Fprintf(s, "'%c'", int(x))
+ case x < utf8.RuneSelf:
+ fmt.Fprintf(s, "%q", x)
- case 0 <= x && x < 1<<16:
- fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
+ case x < 1<<16:
+ fmt.Fprintf(s, "'\\u%04x'", x)
- case 0 <= x && x <= utf8.MaxRune:
- fmt.Fprintf(s, "'\\U%08x'", uint64(x))
+ case x <= utf8.MaxRune:
+ fmt.Fprintf(s, "'\\U%08x'", x)
}
} else {
fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#')))
@@ -632,9 +632,9 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OPACK, ONONAME:
fmt.Fprint(s, n.Sym())
- case ONAMEOFFSET:
- n := n.(*NameOffsetExpr)
- fmt.Fprintf(s, "(%v)(%v@%d)", n.Type(), n.Name_, n.Offset_)
+ case OLINKSYMOFFSET:
+ n := n.(*LinksymOffsetExpr)
+ fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_)
case OTYPE:
if n.Type() == nil && n.Sym() != nil {
@@ -1020,6 +1020,15 @@ func dumpNodeHeader(w io.Writer, n Node) {
fmt.Fprintf(w, " defn(%p)", n.Name().Defn)
}
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Curfn != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " curfn(%p)", n.Name().Curfn)
+ }
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Outer != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " outer(%p)", n.Name().Outer)
+ }
+
if EscFmt != nil {
if esc := EscFmt(n); esc != "" {
fmt.Fprintf(w, " %s", esc)
@@ -1119,6 +1128,11 @@ func dumpNode(w io.Writer, n Node, depth int) {
return
}
+ if n == nil {
+ fmt.Fprint(w, "NilIrNode")
+ return
+ }
+
if len(n.Init()) != 0 {
fmt.Fprintf(w, "%+v-init", n.Op())
dumpNodes(w, n.Init(), depth+1)
@@ -1182,6 +1196,18 @@ func dumpNode(w io.Writer, n Node, depth int) {
dumpNode(w, dcl, depth+1)
}
}
+ if len(fn.ClosureVars) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-ClosureVars", n.Op())
+ for _, cv := range fn.ClosureVars {
+ dumpNode(w, cv, depth+1)
+ }
+ }
+ if len(fn.Enter) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-Enter", n.Op())
+ dumpNodes(w, fn.Enter, depth+1)
+ }
if len(fn.Body) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-body", n.Op())
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index 30cddd298e..0a9db92d96 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -63,7 +63,7 @@ type Func struct {
Exit Nodes
// ONAME nodes for all params/locals for this func/closure, does NOT
- // include closurevars until transformclosure runs.
+ // include closurevars until transforming closures during walk.
// Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
// with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
// However, as anonymous or blank PPARAMs are not actually declared,
@@ -133,9 +133,10 @@ func (n *Func) copy() Node { panic(n.no("copy")) }
func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) }
func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) }
-func (f *Func) Type() *types.Type { return f.Nname.Type() }
-func (f *Func) Sym() *types.Sym { return f.Nname.Sym() }
-func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() }
+func (f *Func) Type() *types.Type { return f.Nname.Type() }
+func (f *Func) Sym() *types.Sym { return f.Nname.Sym() }
+func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() }
+func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) }
// An Inline holds fields used for function bodies that can be inlined.
type Inline struct {
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
index 4dd9a8807a..a7ff4ac9c7 100644
--- a/src/cmd/compile/internal/ir/mini.go
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -57,7 +57,7 @@ const (
miniWalkdefShift = 0 // TODO(mdempsky): Move to Name.flags.
miniTypecheckShift = 2
miniDiag = 1 << 4
- miniHasCall = 1 << 5 // for miniStmt
+ miniWalked = 1 << 5 // to prevent/catch re-walking
)
func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
@@ -71,6 +71,9 @@ func (n *miniNode) SetTypecheck(x uint8) {
func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 }
func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
+func (n *miniNode) Walked() bool { return n.bits&miniWalked != 0 }
+func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) }
+
// Empty, immutable graph structure.
func (n *miniNode) Init() Nodes { return Nodes{} }
@@ -85,7 +88,5 @@ func (n *miniNode) Name() *Name { return nil }
func (n *miniNode) Sym() *types.Sym { return nil }
func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
-func (n *miniNode) HasCall() bool { return false }
-func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) }
func (n *miniNode) NonNil() bool { return false }
func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
index 514b303893..fa0639600c 100644
--- a/src/cmd/compile/internal/ir/name.go
+++ b/src/cmd/compile/internal/ir/name.go
@@ -58,9 +58,6 @@ type Name struct {
Ntype Ntype
Heapaddr *Name // temp holding heap address of param
- // ONAME PAUTOHEAP
- Stackcopy *Name // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
-
// ONAME closure linkage
// Consider:
//
@@ -150,12 +147,7 @@ func (n *Name) TypeDefn() *types.Type {
// RecordFrameOffset records the frame offset for the name.
// It is used by package types when laying out function arguments.
func (n *Name) RecordFrameOffset(offset int64) {
- if n.Stackcopy != nil {
- n.Stackcopy.SetFrameOffset(offset)
- n.SetFrameOffset(0)
- } else {
- n.SetFrameOffset(offset)
- }
+ n.SetFrameOffset(offset)
}
// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
@@ -234,7 +226,8 @@ func (n *Name) SetWalkdef(x uint8) {
n.bits.set2(miniWalkdefShift, x)
}
-func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() }
+func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() }
+func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) }
func (*Name) CanBeNtype() {}
func (*Name) CanBeAnSSASym() {}
@@ -292,6 +285,21 @@ func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
+// OnStack reports whether variable n may reside on the stack.
+func (n *Name) OnStack() bool {
+ if n.Op() == ONAME {
+ switch n.Class {
+ case PPARAM, PPARAMOUT, PAUTO:
+ return n.Esc() != EscHeap
+ case PEXTERN, PAUTOHEAP:
+ return false
+ }
+ }
+ // Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed
+ // methods, but it can only recover from panics, not Fatalf.
+ panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n))
+}
+
// MarkReadonly indicates that n is an ONAME with readonly contents.
func (n *Name) MarkReadonly() {
if n.Op() != ONAME {
@@ -501,24 +509,4 @@ func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
return p
}
-// IsParamStackCopy reports whether this is the on-stack copy of a
-// function parameter that moved to the heap.
-func IsParamStackCopy(n Node) bool {
- if n.Op() != ONAME {
- return false
- }
- name := n.(*Name)
- return (name.Class == PPARAM || name.Class == PPARAMOUT) && name.Heapaddr != nil
-}
-
-// IsParamHeapCopy reports whether this is the on-heap copy of
-// a function parameter that moved to the heap.
-func IsParamHeapCopy(n Node) bool {
- if n.Op() != ONAME {
- return false
- }
- name := n.(*Name)
- return name.Class == PAUTOHEAP && name.Stackcopy != nil
-}
-
var RegFP *Name
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index a2b6e7203b..ffa7daf6b2 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -52,8 +52,6 @@ type Node interface {
SetTypecheck(x uint8)
NonNil() bool
MarkNonNil()
- HasCall() bool
- SetHasCall(x bool)
}
// Line returns n's position as a string. If n has been inlined,
@@ -216,7 +214,6 @@ const (
OAND // Left & Right
OANDNOT // Left &^ Right
ONEW // new(Left); corresponds to calls to new in source code
- ONEWOBJ // runtime.newobject(n.Type); introduced by walk; Left is type descriptor
ONOT // !Left
OBITNOT // ^Left
OPLUS // +Left
@@ -294,23 +291,27 @@ const (
OTSLICE // []int
// misc
- OINLCALL // intermediary representation of an inlined call.
- OEFACE // itable and data words of an empty-interface value.
- OITAB // itable word of an interface value.
- OIDATA // data word of an interface value in Left
- OSPTR // base pointer of a slice or string.
- OCFUNC // reference to c function pointer (not go func value)
- OCHECKNIL // emit code to ensure pointer/interface not nil
- OVARDEF // variable is about to be fully initialized
- OVARKILL // variable is dead
- OVARLIVE // variable is alive
- ORESULT // result of a function call; Xoffset is stack offset
- OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
- ONAMEOFFSET // offset within a name
+ // intermediate representation of an inlined call. Uses Init (assignments
+ // for the captured variables, parameters, retvars, & INLMARK op),
+ // Body (body of the inlined function), and ReturnVars (list of
+ // return values)
+ OINLCALL // intermediary representation of an inlined call.
+ OEFACE // itable and data words of an empty-interface value.
+ OITAB // itable word of an interface value.
+ OIDATA // data word of an interface value in Left
+ OSPTR // base pointer of a slice or string.
+ OCFUNC // reference to c function pointer (not go func value)
+ OCHECKNIL // emit code to ensure pointer/interface not nil
+ OVARDEF // variable is about to be fully initialized
+ OVARKILL // variable is dead
+ OVARLIVE // variable is alive
+ ORESULT // result of a function call; Xoffset is stack offset
+ OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+ OLINKSYMOFFSET // offset within a name
// arch-specific opcodes
- ORETJMP // return to other function
- OGETG // runtime.getg() (read g pointer)
+ OTAILCALL // tail call to another function
+ OGETG // runtime.getg() (read g pointer)
OEND
)
@@ -452,6 +453,9 @@ const (
// Go command pragmas
GoBuildPragma
+
+ RegisterParams // TODO remove after register abi is working
+
)
func AsNode(n types.Object) Node {
@@ -542,7 +546,6 @@ func InitExpr(init []Node, expr Node) Node {
}
n.PtrInit().Prepend(init...)
- n.SetHasCall(true)
return n
}
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index f1b0a21628..fe436867b2 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -250,7 +250,6 @@ func (n *CallExpr) copy() Node {
c := *n
c.init = copyNodes(c.init)
c.Args = copyNodes(c.Args)
- c.Rargs = copyNodes(c.Rargs)
c.KeepAlive = copyNames(c.KeepAlive)
return &c
}
@@ -264,9 +263,6 @@ func (n *CallExpr) doChildren(do func(Node) bool) bool {
if doNodes(n.Args, do) {
return true
}
- if doNodes(n.Rargs, do) {
- return true
- }
if doNames(n.KeepAlive, do) {
return true
}
@@ -278,7 +274,6 @@ func (n *CallExpr) editChildren(edit func(Node) Node) {
n.X = edit(n.X).(Node)
}
editNodes(n.Args, edit)
- editNodes(n.Rargs, edit)
editNames(n.KeepAlive, edit)
}
@@ -734,6 +729,22 @@ func (n *LabelStmt) editChildren(edit func(Node) Node) {
editNodes(n.init, edit)
}
+func (n *LinksymOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LinksymOffsetExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *LinksymOffsetExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *LinksymOffsetExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *LogicalExpr) copy() Node {
c := *n
@@ -815,28 +826,6 @@ func (n *MapType) editChildren(edit func(Node) Node) {
func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *NameOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
-func (n *NameOffsetExpr) copy() Node {
- c := *n
- c.init = copyNodes(c.init)
- return &c
-}
-func (n *NameOffsetExpr) doChildren(do func(Node) bool) bool {
- if doNodes(n.init, do) {
- return true
- }
- if n.Name_ != nil && do(n.Name_) {
- return true
- }
- return false
-}
-func (n *NameOffsetExpr) editChildren(edit func(Node) Node) {
- editNodes(n.init, edit)
- if n.Name_ != nil {
- n.Name_ = edit(n.Name_).(*Name)
- }
-}
-
func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *NilExpr) copy() Node {
c := *n
@@ -1233,6 +1222,28 @@ func (n *SwitchStmt) editChildren(edit func(Node) Node) {
editNodes(n.Compiled, edit)
}
+func (n *TailCallStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TailCallStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *TailCallStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Target != nil && do(n.Target) {
+ return true
+ }
+ return false
+}
+func (n *TailCallStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Target != nil {
+ n.Target = edit(n.Target).(*Name)
+ }
+}
+
func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *TypeAssertExpr) copy() Node {
c := *n
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
index b54b4785a2..15c60baf44 100644
--- a/src/cmd/compile/internal/ir/op_string.go
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -91,81 +91,80 @@ func _() {
_ = x[OAND-80]
_ = x[OANDNOT-81]
_ = x[ONEW-82]
- _ = x[ONEWOBJ-83]
- _ = x[ONOT-84]
- _ = x[OBITNOT-85]
- _ = x[OPLUS-86]
- _ = x[ONEG-87]
- _ = x[OOROR-88]
- _ = x[OPANIC-89]
- _ = x[OPRINT-90]
- _ = x[OPRINTN-91]
- _ = x[OPAREN-92]
- _ = x[OSEND-93]
- _ = x[OSLICE-94]
- _ = x[OSLICEARR-95]
- _ = x[OSLICESTR-96]
- _ = x[OSLICE3-97]
- _ = x[OSLICE3ARR-98]
- _ = x[OSLICEHEADER-99]
- _ = x[ORECOVER-100]
- _ = x[ORECV-101]
- _ = x[ORUNESTR-102]
- _ = x[OSELRECV2-103]
- _ = x[OIOTA-104]
- _ = x[OREAL-105]
- _ = x[OIMAG-106]
- _ = x[OCOMPLEX-107]
- _ = x[OALIGNOF-108]
- _ = x[OOFFSETOF-109]
- _ = x[OSIZEOF-110]
- _ = x[OMETHEXPR-111]
- _ = x[OSTMTEXPR-112]
- _ = x[OBLOCK-113]
- _ = x[OBREAK-114]
- _ = x[OCASE-115]
- _ = x[OCONTINUE-116]
- _ = x[ODEFER-117]
- _ = x[OFALL-118]
- _ = x[OFOR-119]
- _ = x[OFORUNTIL-120]
- _ = x[OGOTO-121]
- _ = x[OIF-122]
- _ = x[OLABEL-123]
- _ = x[OGO-124]
- _ = x[ORANGE-125]
- _ = x[ORETURN-126]
- _ = x[OSELECT-127]
- _ = x[OSWITCH-128]
- _ = x[OTYPESW-129]
- _ = x[OTCHAN-130]
- _ = x[OTMAP-131]
- _ = x[OTSTRUCT-132]
- _ = x[OTINTER-133]
- _ = x[OTFUNC-134]
- _ = x[OTARRAY-135]
- _ = x[OTSLICE-136]
- _ = x[OINLCALL-137]
- _ = x[OEFACE-138]
- _ = x[OITAB-139]
- _ = x[OIDATA-140]
- _ = x[OSPTR-141]
- _ = x[OCFUNC-142]
- _ = x[OCHECKNIL-143]
- _ = x[OVARDEF-144]
- _ = x[OVARKILL-145]
- _ = x[OVARLIVE-146]
- _ = x[ORESULT-147]
- _ = x[OINLMARK-148]
- _ = x[ONAMEOFFSET-149]
- _ = x[ORETJMP-150]
- _ = x[OGETG-151]
- _ = x[OEND-152]
+ _ = x[ONOT-83]
+ _ = x[OBITNOT-84]
+ _ = x[OPLUS-85]
+ _ = x[ONEG-86]
+ _ = x[OOROR-87]
+ _ = x[OPANIC-88]
+ _ = x[OPRINT-89]
+ _ = x[OPRINTN-90]
+ _ = x[OPAREN-91]
+ _ = x[OSEND-92]
+ _ = x[OSLICE-93]
+ _ = x[OSLICEARR-94]
+ _ = x[OSLICESTR-95]
+ _ = x[OSLICE3-96]
+ _ = x[OSLICE3ARR-97]
+ _ = x[OSLICEHEADER-98]
+ _ = x[ORECOVER-99]
+ _ = x[ORECV-100]
+ _ = x[ORUNESTR-101]
+ _ = x[OSELRECV2-102]
+ _ = x[OIOTA-103]
+ _ = x[OREAL-104]
+ _ = x[OIMAG-105]
+ _ = x[OCOMPLEX-106]
+ _ = x[OALIGNOF-107]
+ _ = x[OOFFSETOF-108]
+ _ = x[OSIZEOF-109]
+ _ = x[OMETHEXPR-110]
+ _ = x[OSTMTEXPR-111]
+ _ = x[OBLOCK-112]
+ _ = x[OBREAK-113]
+ _ = x[OCASE-114]
+ _ = x[OCONTINUE-115]
+ _ = x[ODEFER-116]
+ _ = x[OFALL-117]
+ _ = x[OFOR-118]
+ _ = x[OFORUNTIL-119]
+ _ = x[OGOTO-120]
+ _ = x[OIF-121]
+ _ = x[OLABEL-122]
+ _ = x[OGO-123]
+ _ = x[ORANGE-124]
+ _ = x[ORETURN-125]
+ _ = x[OSELECT-126]
+ _ = x[OSWITCH-127]
+ _ = x[OTYPESW-128]
+ _ = x[OTCHAN-129]
+ _ = x[OTMAP-130]
+ _ = x[OTSTRUCT-131]
+ _ = x[OTINTER-132]
+ _ = x[OTFUNC-133]
+ _ = x[OTARRAY-134]
+ _ = x[OTSLICE-135]
+ _ = x[OINLCALL-136]
+ _ = x[OEFACE-137]
+ _ = x[OITAB-138]
+ _ = x[OIDATA-139]
+ _ = x[OSPTR-140]
+ _ = x[OCFUNC-141]
+ _ = x[OCHECKNIL-142]
+ _ = x[OVARDEF-143]
+ _ = x[OVARKILL-144]
+ _ = x[OVARLIVE-145]
+ _ = x[ORESULT-146]
+ _ = x[OINLMARK-147]
+ _ = x[OLINKSYMOFFSET-148]
+ _ = x[OTAILCALL-149]
+ _ = x[OGETG-150]
+ _ = x[OEND-151]
}
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKNAMEOFFSETRETJMPGETGEND"
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFMETHEXPRSTMTEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 477, 480, 486, 490, 493, 497, 502, 507, 513, 518, 522, 527, 535, 543, 549, 558, 569, 576, 580, 587, 595, 599, 603, 607, 614, 621, 629, 635, 643, 651, 656, 661, 665, 673, 678, 682, 685, 693, 697, 699, 704, 706, 711, 717, 723, 729, 735, 740, 744, 751, 757, 762, 768, 774, 781, 786, 790, 795, 799, 804, 812, 818, 825, 832, 838, 845, 855, 861, 865, 868}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 126, 129, 139, 146, 153, 160, 164, 168, 176, 184, 193, 201, 204, 209, 216, 223, 229, 238, 246, 254, 260, 264, 273, 280, 284, 287, 294, 302, 309, 315, 318, 324, 331, 339, 343, 350, 358, 360, 362, 364, 366, 368, 370, 375, 380, 388, 391, 400, 403, 407, 415, 422, 431, 444, 447, 450, 453, 456, 459, 462, 468, 471, 474, 480, 484, 487, 491, 496, 501, 507, 512, 516, 521, 529, 537, 543, 552, 563, 570, 574, 581, 589, 593, 597, 601, 608, 615, 623, 629, 637, 645, 650, 655, 659, 667, 672, 676, 679, 687, 691, 693, 698, 700, 705, 711, 717, 723, 729, 734, 738, 745, 751, 756, 762, 768, 775, 780, 784, 789, 793, 798, 806, 812, 819, 826, 832, 839, 852, 860, 864, 867}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
index 553dc53760..d8c1518b90 100644
--- a/src/cmd/compile/internal/ir/sizeof_test.go
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 188, 328},
- {Name{}, 116, 208},
+ {Name{}, 112, 200},
}
for _, tt := range tests {
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
index b13c6b7795..c304867e1d 100644
--- a/src/cmd/compile/internal/ir/stmt.go
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -50,11 +50,9 @@ type miniStmt struct {
func (*miniStmt) isStmt() {}
-func (n *miniStmt) Init() Nodes { return n.init }
-func (n *miniStmt) SetInit(x Nodes) { n.init = x }
-func (n *miniStmt) PtrInit() *Nodes { return &n.init }
-func (n *miniStmt) HasCall() bool { return n.bits&miniHasCall != 0 }
-func (n *miniStmt) SetHasCall(b bool) { n.bits.set(miniHasCall, b) }
+func (n *miniStmt) Init() Nodes { return n.init }
+func (n *miniStmt) SetInit(x Nodes) { n.init = x }
+func (n *miniStmt) PtrInit() *Nodes { return &n.init }
// An AssignListStmt is an assignment statement with
// more than one item on at least one side: Lhs = Rhs.
@@ -146,9 +144,6 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
}
// A BranchStmt is a break, continue, fallthrough, or goto statement.
-//
-// For back-end code generation, Op may also be RETJMP (return+jump),
-// in which case the label names another function entirely.
type BranchStmt struct {
miniStmt
Label *types.Sym // label if present
@@ -156,7 +151,7 @@ type BranchStmt struct {
func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
switch op {
- case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP:
+ case OBREAK, OCONTINUE, OFALL, OGOTO:
// ok
default:
panic("NewBranch " + op.String())
@@ -343,7 +338,7 @@ type SelectStmt struct {
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
- Compiled Nodes // compiled form, after walkswitch
+ Compiled Nodes // compiled form, after walkSwitch
}
func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt {
@@ -376,7 +371,7 @@ type SwitchStmt struct {
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
- Compiled Nodes // compiled form, after walkswitch
+ Compiled Nodes // compiled form, after walkSwitch
}
func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
@@ -386,6 +381,23 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
return n
}
+// A TailCallStmt is a tail call statement, which is used for back-end
+// code generation to jump directly to another function entirely.
+type TailCallStmt struct {
+ miniStmt
+ Target *Name
+}
+
+func NewTailCallStmt(pos src.XPos, target *Name) *TailCallStmt {
+ if target.Op() != ONAME || target.Class != PFUNC {
+ base.FatalfAt(pos, "tail call to non-func %v", target)
+ }
+ n := &TailCallStmt{Target: target}
+ n.pos = pos
+ n.op = OTAILCALL
+ return n
+}
+
// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
type TypeSwitchGuard struct {
miniNode
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index df694f6c84..61727fb1c4 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -9,12 +9,6 @@ import (
"cmd/internal/obj"
)
-// Names holds known names.
-var Names struct {
- Staticuint64s *Name
- Zerobase *Name
-}
-
// Syms holds known symbols.
var Syms struct {
AssertE2I *obj.LSym
@@ -46,6 +40,7 @@ var Syms struct {
Racewriterange *obj.LSym
// Wasm
SigPanic *obj.LSym
+ Staticuint64s *obj.LSym
Typedmemclr *obj.LSym
Typedmemmove *obj.LSym
Udiv *obj.LSym
@@ -70,13 +65,8 @@ var Syms struct {
// Pkgs holds known packages.
var Pkgs struct {
- Go *types.Pkg
- Itab *types.Pkg
- Itablink *types.Pkg
- Map *types.Pkg
- Msan *types.Pkg
- Race *types.Pkg
- Runtime *types.Pkg
- Track *types.Pkg
- Unsafe *types.Pkg
+ Go *types.Pkg
+ Itab *types.Pkg
+ Runtime *types.Pkg
+ Unsafe *types.Pkg
}
diff --git a/src/cmd/compile/internal/liveness/bvset.go b/src/cmd/compile/internal/liveness/bvset.go
index 21bc1fee4d..3431f54ede 100644
--- a/src/cmd/compile/internal/liveness/bvset.go
+++ b/src/cmd/compile/internal/liveness/bvset.go
@@ -47,7 +47,7 @@ func (m *bvecSet) grow() {
m.index = newIndex
}
-// add adds bv to the set and returns its index in m.extractUniqe.
+// add adds bv to the set and returns its index in m.extractUnique.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bitvec.BitVec) int {
if len(m.uniq)*4 >= len(m.index) {
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
index 8d1754c813..53ae797fce 100644
--- a/src/cmd/compile/internal/liveness/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -17,12 +17,14 @@ package liveness
import (
"crypto/md5"
"fmt"
+ "sort"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typebits"
"cmd/compile/internal/types"
@@ -174,14 +176,14 @@ type progeffectscache struct {
initialized bool
}
-// ShouldTrack reports whether the liveness analysis
+// shouldTrack reports whether the liveness analysis
// should track the variable n.
// We don't care about variables that have no pointers,
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
-func ShouldTrack(n *ir.Name) bool {
- return (n.Class == ir.PAUTO || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers()
+func shouldTrack(n *ir.Name) bool {
+ return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
@@ -189,7 +191,7 @@ func ShouldTrack(n *ir.Name) bool {
func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
var vars []*ir.Name
for _, n := range fn.Dcl {
- if ShouldTrack(n) {
+ if shouldTrack(n) {
vars = append(vars, n)
}
}
@@ -788,7 +790,7 @@ func (lv *liveness) epilogue() {
if n.Class == ir.PPARAM {
continue // ok
}
- base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n)
+ base.FatalfAt(n.Pos(), "bad live variable at entry of %v: %L", lv.fn.Nname, n)
}
// Record live variables.
@@ -1060,7 +1062,7 @@ func (lv *liveness) printDebug() {
func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
- // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
+ // (Nodes without pointers aren't in lv.vars; see ShouldTrack.)
var maxArgNode *ir.Name
for _, n := range lv.vars {
switch n.Class {
@@ -1179,9 +1181,54 @@ func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals
+ if x := lv.emitStackObjects(); x != nil {
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_StackObjects)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+ }
+
return lv.livenessMap
}
+func (lv *liveness) emitStackObjects() *obj.LSym {
+ var vars []*ir.Name
+ for _, n := range lv.fn.Dcl {
+ if shouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap {
+ vars = append(vars, n)
+ }
+ }
+ if len(vars) == 0 {
+ return nil
+ }
+
+ // Sort variables from lowest to highest address.
+ sort.Slice(vars, func(i, j int) bool { return vars[i].FrameOffset() < vars[j].FrameOffset() })
+
+ // Populate the stack object data.
+ // Format must match runtime/stack.go:stackObjectRecord.
+ x := base.Ctxt.Lookup(lv.fn.LSym.Name + ".stkobj")
+ lv.fn.LSym.Func().StackObjects = x
+ off := 0
+ off = objw.Uintptr(x, off, uint64(len(vars)))
+ for _, v := range vars {
+ // Note: arguments and return values have non-negative Xoffset,
+ // in which case the offset is relative to argp.
+ // Locals have a negative Xoffset, in which case the offset is relative to varp.
+ off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
+ off = objw.SymPtr(x, off, reflectdata.TypeLinksym(v.Type()), 0)
+ }
+
+ if base.Flag.Live != 0 {
+ for _, v := range vars {
+ base.WarnfAt(v.Pos(), "stack object %v %v", v, v.Type())
+ }
+ }
+
+ return x
+}
+
// isfat reports whether a variable of type t needs multiple assignments to initialize.
// For example:
//
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
index 4d20f410bc..c41b77c100 100644
--- a/src/cmd/compile/internal/noder/decl.go
+++ b/src/cmd/compile/internal/noder/decl.go
@@ -48,6 +48,9 @@ func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
if ipkg == ir.Pkgs.Unsafe {
p.importedUnsafe = true
}
+ if ipkg.Path == "embed" {
+ p.importedEmbed = true
+ }
}
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
@@ -164,9 +167,8 @@ func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
if decl.Pragma != nil {
pragma := decl.Pragma.(*pragmas)
- if err := varEmbed(g.makeXPos, names[0], decl, pragma); err != nil {
- base.ErrorfAt(g.pos(decl), "%s", err.Error())
- }
+ // TODO(mdempsky): Plumb noder.importedEmbed through to here.
+ varEmbed(g.makeXPos, names[0], decl, pragma, true)
g.reportUnused(pragma)
}
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index ac7bc8bbf0..aa02c01cff 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -428,7 +428,7 @@ func clearImports() {
if types.IsDotAlias(s) {
// throw away top-level name left over
// from previous import . "x"
- // We'll report errors after type checking in checkDotImports.
+ // We'll report errors after type checking in CheckDotImports.
s.Def = nil
continue
}
diff --git a/src/cmd/compile/internal/noder/lex.go b/src/cmd/compile/internal/noder/lex.go
index 1095f3344a..cdca9e55f3 100644
--- a/src/cmd/compile/internal/noder/lex.go
+++ b/src/cmd/compile/internal/noder/lex.go
@@ -28,6 +28,7 @@ const (
ir.Nosplit |
ir.Noinline |
ir.NoCheckPtr |
+ ir.RegisterParams | // TODO remove after register abi is working
ir.CgoUnsafeArgs |
ir.UintptrEscapes |
ir.Systemstack |
@@ -79,6 +80,8 @@ func pragmaFlag(verb string) ir.PragmaFlag {
// in the argument list.
// Used in syscall/dll_windows.go.
return ir.UintptrEscapes
+ case "go:registerparams": // TODO remove after register abi is working
+ return ir.RegisterParams
case "go:notinheap":
return ir.NotInHeap
}
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index e1ae2569e0..887205b9fb 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -5,7 +5,6 @@
package noder
import (
- "errors"
"fmt"
"go/constant"
"go/token"
@@ -136,7 +135,15 @@ func LoadPackage(filenames []string) {
for i := 0; i < len(typecheck.Target.Decls); i++ {
n := typecheck.Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore typecheck %v", n)
+ ir.Dump(s, n)
+ }
typecheck.FuncBody(n.(*ir.Func))
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nafter typecheck %v", n)
+ ir.Dump(s, n)
+ }
fcount++
}
}
@@ -385,9 +392,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
exprs := p.exprList(decl.Values)
if pragma, ok := decl.Pragma.(*pragmas); ok {
- if err := varEmbed(p.makeXPos, names[0], decl, pragma); err != nil {
- p.errorAt(decl.Pos(), "%s", err.Error())
- }
+ varEmbed(p.makeXPos, names[0], decl, pragma, p.importedEmbed)
p.checkUnused(pragma)
}
@@ -555,7 +560,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
}
} else {
f.Shortname = name
- name = ir.BlankNode.Sym() // filled in by typecheckfunc
+ name = ir.BlankNode.Sym() // filled in by tcFunc
}
f.Nname = ir.NewNameAt(p.pos(fun.Name), name)
@@ -1001,7 +1006,7 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
if s == nil {
} else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 {
// Inline non-empty block.
- // Empty blocks must be preserved for checkreturn.
+ // Empty blocks must be preserved for CheckReturn.
nodes = append(nodes, s.(*ir.BlockStmt).List...)
} else {
nodes = append(nodes, s)
@@ -1774,7 +1779,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
fn := ir.NewFunc(p.pos(expr))
fn.SetIsHiddenClosure(ir.CurFunc != nil)
- fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by typecheckclosure
+ fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure
fn.Nname.Func = fn
fn.Nname.Ntype = xtype
fn.Nname.Defn = fn
@@ -1829,29 +1834,35 @@ func oldname(s *types.Sym) ir.Node {
return n
}
-func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas) error {
+func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
if pragma.Embeds == nil {
- return nil
+ return
}
pragmaEmbeds := pragma.Embeds
pragma.Embeds = nil
+ pos := makeXPos(pragmaEmbeds[0].Pos)
- if base.Flag.Cfg.Embed.Patterns == nil {
- return errors.New("invalid go:embed: build system did not supply embed configuration")
+ if !haveEmbed {
+ base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"")
+ return
}
if len(decl.NameList) > 1 {
- return errors.New("go:embed cannot apply to multiple vars")
+ base.ErrorfAt(pos, "go:embed cannot apply to multiple vars")
+ return
}
if decl.Values != nil {
- return errors.New("go:embed cannot apply to var with initializer")
+ base.ErrorfAt(pos, "go:embed cannot apply to var with initializer")
+ return
}
if decl.Type == nil {
// Should not happen, since Values == nil now.
- return errors.New("go:embed cannot apply to var without type")
+ base.ErrorfAt(pos, "go:embed cannot apply to var without type")
+ return
}
if typecheck.DeclContext != ir.PEXTERN {
- return errors.New("go:embed cannot apply to var inside func")
+ base.ErrorfAt(pos, "go:embed cannot apply to var inside func")
+ return
}
var embeds []ir.Embed
@@ -1860,5 +1871,4 @@ func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.Va
}
typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
name.Embed = &embeds
- return nil
}
diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go
index 8d24f94aa5..b5ac4dda1e 100644
--- a/src/cmd/compile/internal/objw/prog.go
+++ b/src/cmd/compile/internal/objw/prog.go
@@ -205,7 +205,7 @@ func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16,
func (pp *Progs) SetText(fn *ir.Func) {
if pp.Text != nil {
- base.Fatalf("Progs.settext called twice")
+ base.Fatalf("Progs.SetText called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go
index 5bc66c7e1b..7cad262214 100644
--- a/src/cmd/compile/internal/pkginit/init.go
+++ b/src/cmd/compile/internal/pkginit/init.go
@@ -60,10 +60,10 @@ func Task() *ir.Name {
fns = append(fns, fn.Linksym())
}
if typecheck.InitTodoFunc.Dcl != nil {
- // We only generate temps using initTodo if there
+ // We only generate temps using InitTodoFunc if there
// are package-scope initialization statements, so
// something's weird if we get here.
- base.Fatalf("initTodo still has declarations")
+ base.Fatalf("InitTodoFunc still has declarations")
}
typecheck.InitTodoFunc = nil
diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go
index bdefd594ff..97d69629fb 100644
--- a/src/cmd/compile/internal/pkginit/initorder.go
+++ b/src/cmd/compile/internal/pkginit/initorder.go
@@ -113,7 +113,7 @@ func initOrder(l []ir.Node) []ir.Node {
// first.
base.ExitIfErrors()
- o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name))
+ o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name), new(ir.NameSet))
base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
@@ -184,10 +184,7 @@ func (o *InitOrder) flushReady(initialize func(ir.Node)) {
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
-func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
- // We implement a simple DFS loop-finding algorithm. This
- // could be faster, but initialization cycles are rare.
-
+func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.NameSet) {
for i, x := range *path {
if x == n {
reportInitLoopAndExit((*path)[i:])
@@ -204,12 +201,19 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
- if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone {
+ if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone || ok.Has(ref) {
continue
}
- o.findInitLoopAndExit(ref, path)
+ o.findInitLoopAndExit(ref, path, ok)
}
+
+ // n is not involved in a cycle.
+ // Record that fact to avoid checking it again when reached another way,
+ // or else this traversal will take exponential time traversing all paths
+ // through the part of the package's call graph implicated in the cycle.
+ ok.Add(n)
+
*path = (*path)[:len(*path)-1]
}
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
index d576053753..fcd824f164 100644
--- a/src/cmd/compile/internal/reflectdata/alg.go
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -689,7 +689,7 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
- base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
+ base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 30857fff6d..3ff14c87f4 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -32,7 +32,7 @@ type itabEntry struct {
// symbols of each method in
// the itab, sorted by byte offset;
- // filled in by peekitabs
+ // filled in by CompileITabs
entries []*obj.LSym
}
@@ -401,7 +401,7 @@ func dimportpath(p *types.Pkg) {
}
// If we are compiling the runtime package, there are two runtime packages around
- // -- localpkg and Runtimepkg. We don't want to produce import path symbols for
+ // -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg.
if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime {
return
@@ -562,7 +562,7 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
}
for _, a := range m {
- WriteType(a.type_)
+ writeType(a.type_)
}
ot = dgopkgpathOff(lsym, ot, typePkg(t))
@@ -613,7 +613,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym)
- ot = dmethodptrOff(lsym, ot, WriteType(a.mtype))
+ ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym)
ot = dmethodptrOff(lsym, ot, a.tsym)
}
@@ -690,7 +690,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if t.Sym() != nil || methods(tptr) != nil {
sptrWeak = false
}
- sptr = WriteType(tptr)
+ sptr = writeType(tptr)
}
gcsym, useGCProg, ptrdata := dgcsym(t)
@@ -791,7 +791,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
- return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name).Linksym()
+ return base.PkgLinksym("go.track", t.ShortString()+"."+f.Sym.Name, obj.ABI0)
}
func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
@@ -811,7 +811,7 @@ func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
func TypeSym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
- base.Fatalf("typenamesym %v", t)
+ base.Fatalf("TypeSym %v", t)
}
if t.Kind() == types.TFUNC && t.Recv() != nil {
base.Fatalf("misuse of method type: %v", t)
@@ -836,39 +836,22 @@ func TypeLinksym(t *types.Type) *obj.LSym {
}
func TypePtr(t *types.Type) *ir.AddrExpr {
- s := TypeSym(t)
- if s.Def == nil {
- n := ir.NewNameAt(src.NoXPos, s)
- n.SetType(types.Types[types.TUINT8])
- n.Class = ir.PEXTERN
- n.SetTypecheck(1)
- s.Def = n
- }
-
- n := typecheck.NodAddr(ir.AsNode(s.Def))
- n.SetType(types.NewPtr(s.Def.Type()))
- n.SetTypecheck(1)
- return n
+ n := ir.NewLinksymExpr(base.Pos, TypeLinksym(t), types.Types[types.TUINT8])
+ return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
- base.Fatalf("itabname(%v, %v)", t, itype)
- }
- s := ir.Pkgs.Itab.Lookup(t.ShortString() + "," + itype.ShortString())
- if s.Def == nil {
- n := typecheck.NewName(s)
- n.SetType(types.Types[types.TUINT8])
- n.Class = ir.PEXTERN
- n.SetTypecheck(1)
- s.Def = n
- itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: n.Linksym()})
- }
-
- n := typecheck.NodAddr(ir.AsNode(s.Def))
- n.SetType(types.NewPtr(s.Def.Type()))
- n.SetTypecheck(1)
- return n
+ base.Fatalf("ITabAddr(%v, %v)", t, itype)
+ }
+ s, existed := ir.Pkgs.Itab.LookupOK(t.ShortString() + "," + itype.ShortString())
+ if !existed {
+ itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
+ }
+
+ lsym := s.Linksym()
+ n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
+ return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
// needkeyupdate reports whether map updates with t as a key
@@ -933,10 +916,10 @@ func formalType(t *types.Type) *types.Type {
return t
}
-func WriteType(t *types.Type) *obj.LSym {
+func writeType(t *types.Type) *obj.LSym {
t = formalType(t)
if t.IsUntyped() {
- base.Fatalf("dtypesym %v", t)
+ base.Fatalf("writeType %v", t)
}
s := types.TypeSym(t)
@@ -983,9 +966,9 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TARRAY:
// ../../../../runtime/type.go:/arrayType
- s1 := WriteType(t.Elem())
+ s1 := writeType(t.Elem())
t2 := types.NewSlice(t.Elem())
- s2 := WriteType(t2)
+ s2 := writeType(t2)
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.SymPtr(lsym, ot, s2, 0)
@@ -994,14 +977,14 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TSLICE:
// ../../../../runtime/type.go:/sliceType
- s1 := WriteType(t.Elem())
+ s1 := writeType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
case types.TCHAN:
// ../../../../runtime/type.go:/chanType
- s1 := WriteType(t.Elem())
+ s1 := writeType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
@@ -1009,15 +992,15 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TFUNC:
for _, t1 := range t.Recvs().Fields().Slice() {
- WriteType(t1.Type)
+ writeType(t1.Type)
}
isddd := false
for _, t1 := range t.Params().Fields().Slice() {
isddd = t1.IsDDD()
- WriteType(t1.Type)
+ writeType(t1.Type)
}
for _, t1 := range t.Results().Fields().Slice() {
- WriteType(t1.Type)
+ writeType(t1.Type)
}
ot = dcommontype(lsym, t)
@@ -1037,20 +1020,20 @@ func WriteType(t *types.Type) *obj.LSym {
// Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
}
for _, t1 := range t.Params().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
}
for _, t1 := range t.Results().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
}
case types.TINTER:
m := imethods(t)
n := len(m)
for _, a := range m {
- WriteType(a.type_)
+ writeType(a.type_)
}
// ../../../../runtime/type.go:/interfaceType
@@ -1078,14 +1061,14 @@ func WriteType(t *types.Type) *obj.LSym {
nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym)
- ot = objw.SymPtrOff(lsym, ot, WriteType(a.type_))
+ ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
}
// ../../../../runtime/type.go:/mapType
case types.TMAP:
- s1 := WriteType(t.Key())
- s2 := WriteType(t.Elem())
- s3 := WriteType(MapBucketType(t))
+ s1 := writeType(t.Key())
+ s2 := writeType(t.Elem())
+ s3 := writeType(MapBucketType(t))
hasher := genhash(t.Key())
ot = dcommontype(lsym, t)
@@ -1132,7 +1115,7 @@ func WriteType(t *types.Type) *obj.LSym {
}
// ../../../../runtime/type.go:/ptrType
- s1 := WriteType(t.Elem())
+ s1 := writeType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
@@ -1143,7 +1126,7 @@ func WriteType(t *types.Type) *obj.LSym {
case types.TSTRUCT:
fields := t.Fields().Slice()
for _, t1 := range fields {
- WriteType(t1.Type)
+ writeType(t1.Type)
}
// All non-exported struct field names within a struct
@@ -1171,7 +1154,7 @@ func WriteType(t *types.Type) *obj.LSym {
for _, f := range fields {
// ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f)
- ot = objw.SymPtr(lsym, ot, WriteType(f.Type), 0)
+ ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
@@ -1275,7 +1258,7 @@ func genfun(t, it *types.Type) []*obj.LSym {
}
// ITabSym uses the information gathered in
-// peekitabs to de-virtualize interface methods.
+// CompileITabs to de-virtualize interface methods.
// Since this is called by the SSA backend, it shouldn't
// generate additional Nodes, Syms, etc.
func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
@@ -1312,7 +1295,7 @@ func NeedRuntimeType(t *types.Type) {
}
func WriteRuntimeTypes() {
- // Process signatset. Use a loop, as dtypesym adds
+ // Process signatset. Use a loop, as writeType adds
// entries to signatset while it is being processed.
signats := make([]typeAndStr, len(signatslice))
for len(signatslice) > 0 {
@@ -1326,9 +1309,9 @@ func WriteRuntimeTypes() {
sort.Sort(typesByString(signats))
for _, ts := range signats {
t := ts.t
- WriteType(t)
+ writeType(t)
if t.Sym() != nil {
- WriteType(types.NewPtr(t))
+ writeType(types.NewPtr(t))
}
}
}
@@ -1345,8 +1328,8 @@ func WriteTabs() {
// _ [4]byte
// fun [1]uintptr // variable sized
// }
- o := objw.SymPtr(i.lsym, 0, WriteType(i.itype), 0)
- o = objw.SymPtr(i.lsym, o, WriteType(i.t), 0)
+ o := objw.SymPtr(i.lsym, 0, writeType(i.itype), 0)
+ o = objw.SymPtr(i.lsym, o, writeType(i.t), 0)
o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) {
@@ -1373,7 +1356,7 @@ func WriteTabs() {
if p.Class != ir.PFUNC {
t = types.NewPtr(t)
}
- tsym := WriteType(t)
+ tsym := writeType(t)
ot = objw.SymPtrOff(s, ot, nsym)
ot = objw.SymPtrOff(s, ot, tsym)
// Plugin exports symbols as interfaces. Mark their types
@@ -1407,16 +1390,16 @@ func WriteBasicTypes() {
// but using runtime means fewer copies in object files.
if base.Ctxt.Pkgpath == "runtime" {
for i := types.Kind(1); i <= types.TBOOL; i++ {
- WriteType(types.NewPtr(types.Types[i]))
+ writeType(types.NewPtr(types.Types[i]))
}
- WriteType(types.NewPtr(types.Types[types.TSTRING]))
- WriteType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
+ writeType(types.NewPtr(types.Types[types.TSTRING]))
+ writeType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
- WriteType(types.NewPtr(types.ErrorType))
+ writeType(types.NewPtr(types.ErrorType))
- WriteType(types.NewSignature(types.NoPkg, nil, []*types.Field{
+ writeType(types.NewSignature(types.NoPkg, nil, []*types.Field{
types.NewField(base.Pos, nil, types.ErrorType),
}, []*types.Field{
types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
@@ -1426,11 +1409,12 @@ func WriteBasicTypes() {
dimportpath(ir.Pkgs.Runtime)
if base.Flag.Race {
- dimportpath(ir.Pkgs.Race)
+ dimportpath(types.NewPkg("runtime/race", ""))
}
if base.Flag.MSan {
- dimportpath(ir.Pkgs.Msan)
+ dimportpath(types.NewPkg("runtime/msan", ""))
}
+
dimportpath(types.NewPkg("main", ""))
}
}
@@ -1617,13 +1601,13 @@ func (p *gcProg) emit(t *types.Type, offset int64) {
}
switch t.Kind() {
default:
- base.Fatalf("GCProg.emit: unexpected type %v", t)
+ base.Fatalf("gcProg.emit: unexpected type %v", t)
case types.TSTRING:
p.w.Ptr(offset / int64(types.PtrSize))
case types.TINTER:
- // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
+ // Note: the first word isn't a pointer. See comment in typebits.Set
p.w.Ptr(offset/int64(types.PtrSize) + 1)
case types.TSLICE:
@@ -1632,7 +1616,7 @@ func (p *gcProg) emit(t *types.Type, offset int64) {
case types.TARRAY:
if t.NumElem() == 0 {
// should have been handled by haspointers check above
- base.Fatalf("GCProg.emit: empty array")
+ base.Fatalf("gcProg.emit: empty array")
}
// Flatten array-of-array-of-array to just a big array by multiplying counts.
@@ -1670,18 +1654,9 @@ func ZeroAddr(size int64) ir.Node {
if ZeroSize < size {
ZeroSize = size
}
- s := ir.Pkgs.Map.Lookup("zero")
- if s.Def == nil {
- x := typecheck.NewName(s)
- x.SetType(types.Types[types.TUINT8])
- x.Class = ir.PEXTERN
- x.SetTypecheck(1)
- s.Def = x
- }
- z := typecheck.NodAddr(ir.AsNode(s.Def))
- z.SetType(types.NewPtr(types.Types[types.TUINT8]))
- z.SetTypecheck(1)
- return z
+ lsym := base.PkgLinksym("go.map", "zero", obj.ABI0)
+ x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
+ return typecheck.Expr(typecheck.NodAddr(x))
}
func CollectPTabs() {
@@ -1794,7 +1769,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
}
as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
fn.Body.Append(as)
- fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
+ fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 530918da4d..0cf9931dbc 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -139,7 +139,7 @@ func dse(f *Func) {
func elimDeadAutosGeneric(f *Func) {
addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
- used := make(map[*ir.Name]bool) // used autos that must be kept
+ var used ir.NameSet // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
@@ -178,8 +178,8 @@ func elimDeadAutosGeneric(f *Func) {
if !ok || n.Class != ir.PAUTO {
return
}
- if !used[n] {
- used[n] = true
+ if !used.Has(n) {
+ used.Add(n)
changed = true
}
return
@@ -212,8 +212,8 @@ func elimDeadAutosGeneric(f *Func) {
if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
for _, a := range args {
if n, ok := addr[a]; ok {
- if !used[n] {
- used[n] = true
+ if !used.Has(n) {
+ used.Add(n)
changed = true
}
}
@@ -224,7 +224,7 @@ func elimDeadAutosGeneric(f *Func) {
// Propagate any auto addresses through v.
var node *ir.Name
for _, a := range args {
- if n, ok := addr[a]; ok && !used[n] {
+ if n, ok := addr[a]; ok && !used.Has(n) {
if node == nil {
node = n
} else if node != n {
@@ -233,7 +233,7 @@ func elimDeadAutosGeneric(f *Func) {
// multiple pointers (e.g. NeqPtr, Phi etc.).
// This is rare, so just propagate the first
// value to keep things simple.
- used[n] = true
+ used.Add(n)
changed = true
}
}
@@ -249,7 +249,7 @@ func elimDeadAutosGeneric(f *Func) {
}
if addr[v] != node {
// This doesn't happen in practice, but catch it just in case.
- used[node] = true
+ used.Add(node)
changed = true
}
return
@@ -269,8 +269,8 @@ func elimDeadAutosGeneric(f *Func) {
}
// keep the auto if its address reaches a control value
for _, c := range b.ControlValues() {
- if n, ok := addr[c]; ok && !used[n] {
- used[n] = true
+ if n, ok := addr[c]; ok && !used.Has(n) {
+ used.Add(n)
changed = true
}
}
@@ -282,7 +282,7 @@ func elimDeadAutosGeneric(f *Func) {
// Eliminate stores to unread autos.
for v, n := range elim {
- if used[n] {
+ if used.Has(n) {
continue
}
// replace with OpCopy
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index e1c657d4a4..af994d4b5b 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -24,6 +24,10 @@ type offsetKey struct {
pt *types.Type
}
+func isBlockMultiValueExit(b *Block) bool {
+ return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && len(b.Controls) > 0 && b.Controls[0].Op == OpMakeResult
+}
+
// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
@@ -194,7 +198,8 @@ func expandCalls(f *Func) {
}
break
}
- if leaf.Op == OpIData {
+ switch leaf.Op {
+ case OpIData, OpStructSelect, OpArraySelect:
leafType = removeTrivialWrapperTypes(leaf.Type)
}
aux := selector.Aux
@@ -624,6 +629,24 @@ func expandCalls(f *Func) {
return x
}
+ rewriteDereference := func(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
+ source := a.Args[0]
+ dst := offsetFrom(base, offset, source.Type)
+ if a.Uses == 1 && a.Block == b {
+ a.reset(OpMove)
+ a.Pos = pos
+ a.Type = types.TypeMem
+ a.Aux = typ
+ a.AuxInt = size
+ a.SetArgs3(dst, source, mem)
+ mem = a
+ } else {
+ mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
+ mem.AuxInt = size
+ }
+ return mem
+ }
+
// rewriteArgs removes all the Args from a call and converts the call args into appropriate
// stores (or later, register movement). Extra args for interface and closure calls are ignored,
// but removed.
@@ -631,7 +654,7 @@ func expandCalls(f *Func) {
// Thread the stores on the memory arg
aux := v.Aux.(*AuxCall)
pos := v.Pos.WithNotStmt()
- m0 := v.Args[len(v.Args)-1]
+ m0 := v.MemoryArg()
mem := m0
for i, a := range v.Args {
if i < firstArg {
@@ -647,20 +670,7 @@ func expandCalls(f *Func) {
}
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
// TODO this will be more complicated with registers in the picture.
- source := a.Args[0]
- dst := f.ConstOffPtrSP(source.Type, aux.OffsetOfArg(auxI), sp)
- if a.Uses == 1 && a.Block == v.Block {
- a.reset(OpMove)
- a.Pos = pos
- a.Type = types.TypeMem
- a.Aux = aux.TypeOfArg(auxI)
- a.AuxInt = aux.SizeOfArg(auxI)
- a.SetArgs3(dst, source, mem)
- mem = a
- } else {
- mem = v.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, source, mem)
- mem.AuxInt = aux.SizeOfArg(auxI)
- }
+ mem = rewriteDereference(v.Block, sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos)
} else {
if debug {
fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
@@ -692,6 +702,45 @@ func expandCalls(f *Func) {
v.SetArgs2(code, mem)
}
}
+ if isBlockMultiValueExit(b) {
+ // Very similar to code in rewriteArgs, but results instead of args.
+ v := b.Controls[0]
+ m0 := v.MemoryArg()
+ mem := m0
+ aux := f.OwnAux
+ pos := v.Pos.WithNotStmt()
+ for j, a := range v.Args {
+ i := int64(j)
+ if a == m0 {
+ break
+ }
+ auxType := aux.TypeOfResult(i)
+ auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, sp, mem)
+ auxOffset := int64(0)
+ auxSize := aux.SizeOfResult(i)
+ if a.Op == OpDereference {
+ // Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen.
+ if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP &&
+ dAddr.Args[1] == dMem && dAddr.Aux == aux.results[i].Name {
+ if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux {
+ dMem.copyOf(dMem.MemoryArg()) // elide the VarDef
+ }
+ continue
+ }
+ mem = rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos)
+ } else {
+ if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
+ addr := a.Args[0]
+ if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.results[i].Name {
+ continue
+ }
+ }
+ mem = storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset)
+ }
+ }
+ b.SetControl(mem)
+ v.reset(OpInvalid) // otherwise it can have a mem operand which will fail check(), even though it is dead.
+ }
}
for i, name := range f.Names {
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index e6c4798a78..de99a8d4af 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -58,6 +58,11 @@ type Func struct {
// of keys to make iteration order deterministic.
Names []LocalSlot
+ // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry.
+ RegArgs []ArgPair
+ // AuxCall describing parameters and results for this function.
+ OwnAux *AuxCall
+
// WBLoads is a list of Blocks that branch on the write
// barrier flag. Safe-points are disabled from the OpLoad that
// reads the write-barrier flag until the control flow rejoins
@@ -771,7 +776,7 @@ func DebugNameMatch(evname, name string) bool {
}
func (f *Func) spSb() (sp, sb *Value) {
- initpos := f.Entry.Pos
+ initpos := src.NoXPos // These are originally created with no position in ssa.go; if they are optimized out then recreated, should be the same.
for _, v := range f.Entry.Values {
if v.Op == OpSB {
sb = v
@@ -780,7 +785,7 @@ func (f *Func) spSb() (sp, sb *Value) {
sp = v
}
if sb != nil && sp != nil {
- break
+ return
}
}
if sb == nil {
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index fbc12fd672..df03cb71a6 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -475,7 +475,7 @@
(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP(L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(L|W|B) y x))
+(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x))
// strength reduction
// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index a866a967b9..7d46266411 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -916,7 +916,7 @@
(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP(Q|L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(Q|L|W|B) y x))
+(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
// Using MOVZX instead of AND is cheaper.
(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 11c36b5da3..de0df363e4 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -507,7 +507,7 @@
(TEQ x (MOVWconst [c])) => (TEQconst [c] x)
// Canonicalize the order of arguments to comparisons - helps with CSE.
-(CMP x y) && x.ID > y.ID => (InvertFlags (CMP y x))
+(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x))
// don't extend after proper load
// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 3f4d0c1c52..a0e2a0d5e2 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -1151,7 +1151,7 @@
(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x))
+((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
// mul-neg => mneg
(NEG (MUL x y)) => (MNEG x y)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index 87db2b7c9d..b0bc9c78ff 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -478,20 +478,24 @@ func init() {
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
- {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
- {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
- {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
- {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
- {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
- {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
- {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
- {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
- {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
- {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
- {name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y false otherwise.
- {name: "LessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y false otherwise.
- {name: "GreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y false otherwise.
- {name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise.
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+ {name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y false otherwise.
+ {name: "LessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y false otherwise.
+ {name: "GreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y false otherwise.
+ {name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise.
+ {name: "NotLessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y || x is unordered with y, false otherwise.
+ {name: "NotLessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y || x is unordered with y, false otherwise.
+ {name: "NotGreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y || x is unordered with y, false otherwise.
+ {name: "NotGreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y || x is unordered with y, false otherwise.
// duffzero
// arg0 = address of memory to zero
// arg1 = mem
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index c064046172..a762be65d4 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -1088,7 +1088,7 @@
(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
// Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 384f2e807e..c3421da0a2 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -785,7 +785,7 @@
=> (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
// Canonicalize the order of arguments to comparisons - helps with CSE.
-((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// Use sign/zero extend instead of RISBGZ.
(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 81568b7b7a..1784923224 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -2512,7 +2512,7 @@
(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
&& t1.Compare(t2) == types.CMPeq
&& isSamePtr(tmp1, tmp2)
- && isStackPtr(src)
+ && isStackPtr(src) && !isVolatile(src)
&& disjoint(src, s, tmp2, s)
&& (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
=> (Move {t1} [s] dst src midmem)
@@ -2521,7 +2521,7 @@
(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
&& t1.Compare(t2) == types.CMPeq
&& isSamePtr(tmp1, tmp2)
- && isStackPtr(src)
+ && isStackPtr(src) && !isVolatile(src)
&& disjoint(src, s, tmp2, s)
&& (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
=> (Move {t1} [s] dst src midmem)
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index a9d52fa4ee..c06b5808e1 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -9,9 +9,9 @@ import (
"cmd/internal/src"
"fmt"
"html"
+ exec "internal/execabs"
"io"
"os"
- "os/exec"
"path/filepath"
"strconv"
"strings"
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
index 69f90d9ab4..4cd0ac8d77 100644
--- a/src/cmd/compile/internal/ssa/location.go
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -87,3 +87,29 @@ func (t LocPair) String() string {
}
return fmt.Sprintf("<%s,%s>", n0, n1)
}
+
+type ArgPair struct {
+ reg *Register
+ mem LocalSlot
+}
+
+func (ap *ArgPair) Reg() int16 {
+ return ap.reg.objNum
+}
+
+func (ap *ArgPair) Type() *types.Type {
+ return ap.mem.Type
+}
+
+func (ap *ArgPair) Mem() *LocalSlot {
+ return &ap.mem
+}
+
+func (t ArgPair) String() string {
+ n0 := "nil"
+ if t.reg != nil {
+ n0 = t.reg.String()
+ }
+ n1 := t.mem.String()
+ return fmt.Sprintf("<%s,%s>", n0, n1)
+}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 5e6ce2b508..c64b145107 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
@@ -70,7 +71,8 @@ type auxType int8
type Param struct {
Type *types.Type
- Offset int32 // TODO someday this will be a register
+ Offset int32 // Offset of Param if not in a register.
+ Name *ir.Name // For OwnAux, need to prepend stores with Vardefs
}
type AuxCall struct {
@@ -199,6 +201,12 @@ func ClosureAuxCall(args []Param, results []Param) *AuxCall {
func (*AuxCall) CanBeAnSSAAux() {}
+// OwnAuxCall returns a function's own AuxCall
+func OwnAuxCall(args []Param, results []Param) *AuxCall {
+ // TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate.
+ return &AuxCall{Fn: nil, args: args, results: results}
+}
+
const (
auxNone auxType = iota
auxBool // auxInt is 0/1 for false/true
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 83d35cf7e1..e590f6ba5d 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -1564,6 +1564,10 @@ const (
OpARM64LessEqualF
OpARM64GreaterThanF
OpARM64GreaterEqualF
+ OpARM64NotLessThanF
+ OpARM64NotLessEqualF
+ OpARM64NotGreaterThanF
+ OpARM64NotGreaterEqualF
OpARM64DUFFZERO
OpARM64LoweredZero
OpARM64DUFFCOPY
@@ -20799,6 +20803,42 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "NotLessThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotLessEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "DUFFZERO",
auxType: auxInt64,
argLen: 2,
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 9abfe0938b..e82aa84cdf 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -521,6 +521,18 @@ func shiftIsBounded(v *Value) bool {
return v.AuxInt != 0
}
+// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
+// generated code as much as possible.
+func canonLessThan(x, y *Value) bool {
+ if x.Op != y.Op {
+ return x.Op < y.Op
+ }
+ if !x.Pos.SameFileAndLine(y.Pos) {
+ return x.Pos.Before(y.Pos)
+ }
+ return x.ID < y.ID
+}
+
// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
// of the mantissa. It will panic if the truncation results in lost information.
func truncate64Fto32F(f float64) float32 {
@@ -984,9 +996,10 @@ func flagArg(v *Value) *Value {
}
// arm64Negate finds the complement to an ARM64 condition code,
-// for example Equal -> NotEqual or LessThan -> GreaterEqual
+// for example !Equal -> NotEqual or !LessThan -> GreaterEqual
//
-// TODO: add floating-point conditions
+// For floating point, it's more subtle because NaN is unordered. We do
+// !LessThanF -> NotLessThanF, the latter takes care of NaNs.
func arm64Negate(op Op) Op {
switch op {
case OpARM64LessThan:
@@ -1010,13 +1023,21 @@ func arm64Negate(op Op) Op {
case OpARM64NotEqual:
return OpARM64Equal
case OpARM64LessThanF:
- return OpARM64GreaterEqualF
- case OpARM64GreaterThanF:
- return OpARM64LessEqualF
+ return OpARM64NotLessThanF
+ case OpARM64NotLessThanF:
+ return OpARM64LessThanF
case OpARM64LessEqualF:
+ return OpARM64NotLessEqualF
+ case OpARM64NotLessEqualF:
+ return OpARM64LessEqualF
+ case OpARM64GreaterThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
return OpARM64GreaterThanF
case OpARM64GreaterEqualF:
- return OpARM64LessThanF
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64GreaterEqualF
default:
panic("unreachable")
}
@@ -1027,8 +1048,6 @@ func arm64Negate(op Op) Op {
// that the same result would be produced if the arguments
// to the flag-generating instruction were reversed, e.g.
// (InvertFlags (CMP x y)) -> (CMP y x)
-//
-// TODO: add floating-point conditions
func arm64Invert(op Op) Op {
switch op {
case OpARM64LessThan:
@@ -1057,6 +1076,14 @@ func arm64Invert(op Op) Op {
return OpARM64GreaterEqualF
case OpARM64GreaterEqualF:
return OpARM64LessEqualF
+ case OpARM64NotLessThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
+ return OpARM64NotLessThanF
+ case OpARM64NotLessEqualF:
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64NotLessEqualF
default:
panic("unreachable")
}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index 2acdccd568..4e7fdb9e63 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -1785,12 +1785,12 @@ func rewriteValue386_Op386CMPB(v *Value) bool {
return true
}
// match: (CMPB x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPB y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
@@ -2078,12 +2078,12 @@ func rewriteValue386_Op386CMPL(v *Value) bool {
return true
}
// match: (CMPL x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPL y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
@@ -2386,12 +2386,12 @@ func rewriteValue386_Op386CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(Op386InvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 75d4ff7357..db2dc7a004 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -6749,12 +6749,12 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
return true
}
// match: (CMPB x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPB y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
@@ -7135,12 +7135,12 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
return true
}
// match: (CMPL x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPL y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
@@ -7544,12 +7544,12 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
return true
}
// match: (CMPQ x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPQ y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
@@ -8106,12 +8106,12 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpAMD64InvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index d9d439fa63..c958aae2c4 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -3728,12 +3728,12 @@ func rewriteValueARM_OpARMCMP(v *Value) bool {
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpARMInvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 5d5e526add..ff1156d901 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -2772,12 +2772,12 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool {
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpARM64InvertFlags)
@@ -2941,12 +2941,12 @@ func rewriteValueARM64_OpARM64CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpARM64InvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 455f9b1388..98f748e5fa 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -4777,12 +4777,12 @@ func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
@@ -4834,12 +4834,12 @@ func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool {
return true
}
// match: (CMPU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
@@ -4964,12 +4964,12 @@ func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
@@ -5045,12 +5045,12 @@ func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool {
return true
}
// match: (CMPWU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPWU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpPPC64InvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index a9722b820c..b52a1b6745 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -6332,12 +6332,12 @@ func rewriteValueS390X_OpS390XCMP(v *Value) bool {
return true
}
// match: (CMP x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMP y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
@@ -6389,12 +6389,12 @@ func rewriteValueS390X_OpS390XCMPU(v *Value) bool {
return true
}
// match: (CMPU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
@@ -6624,12 +6624,12 @@ func rewriteValueS390X_OpS390XCMPW(v *Value) bool {
return true
}
// match: (CMPW x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPW y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
@@ -6721,12 +6721,12 @@ func rewriteValueS390X_OpS390XCMPWU(v *Value) bool {
return true
}
// match: (CMPWU x y)
- // cond: x.ID > y.ID
+ // cond: canonLessThan(x,y)
// result: (InvertFlags (CMPWU y x))
for {
x := v_0
y := v_1
- if !(x.ID > y.ID) {
+ if !(canonLessThan(x, y)) {
break
}
v.reset(OpS390XInvertFlags)
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 4cb9a8f328..958e24d29f 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -13637,7 +13637,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
return true
}
// match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
- // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
// result: (Move {t1} [s] dst src midmem)
for {
s := auxIntToInt64(v.AuxInt)
@@ -13651,7 +13651,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
t2 := auxToType(midmem.Aux)
src := midmem.Args[1]
tmp2 := midmem.Args[0]
- if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
break
}
v.reset(OpMove)
@@ -13661,7 +13661,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
return true
}
// match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
- // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
// result: (Move {t1} [s] dst src midmem)
for {
s := auxIntToInt64(v.AuxInt)
@@ -13679,7 +13679,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
t2 := auxToType(midmem_0.Aux)
src := midmem_0.Args[1]
tmp2 := midmem_0.Args[0]
- if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
break
}
v.reset(OpMove)
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
index dc27ec3a29..5bebce1db5 100644
--- a/src/cmd/compile/internal/ssagen/abi.go
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -138,13 +138,12 @@ func ReadSymABIs(file, myimportpath string) {
// For body-less functions, we only create the LSym; for functions
// with bodies call a helper to setup up / populate the LSym.
func InitLSym(f *ir.Func, hasBody bool) {
- staticdata.NeedFuncSym(f.Sym())
-
// FIXME: for new-style ABI wrappers, we set up the lsym at the
// point the wrapper is created.
if f.LSym != nil && base.Flag.ABIWrap {
return
}
+ staticdata.NeedFuncSym(f.Sym())
selectLSym(f, hasBody)
if hasBody {
setupTextLSym(f, 0)
@@ -155,18 +154,18 @@ func InitLSym(f *ir.Func, hasBody bool) {
// makes calls to helpers to create ABI wrappers if needed.
func selectLSym(f *ir.Func, hasBody bool) {
if f.LSym != nil {
- base.FatalfAt(f.Pos(), "Func.initLSym called twice on %v", f)
+ base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f)
}
if nam := f.Nname; !ir.IsBlank(nam) {
var wrapperABI obj.ABI
needABIWrapper := false
- defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
+ defABI, hasDefABI := symabiDefs[nam.Linksym().Name]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
- f.LSym = nam.Sym().LinksymABI0()
+ f.LSym = nam.LinksymABI(obj.ABI0)
needABIWrapper, wrapperABI = true, obj.ABIInternal
} else {
f.LSym = nam.Linksym()
@@ -302,8 +301,9 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
// extra work in typecheck/walk/ssa, might want to add a new node
// OTAILCALL or something to this effect.
var tail ir.Node
- if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 {
- tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym())
+ if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
+
+ tail = ir.NewTailCallStmt(base.Pos, f.Nname)
} else {
call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
call.Args = ir.ParamNames(tfn.Type())
diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go
index 60cfb2f698..a2434366a0 100644
--- a/src/cmd/compile/internal/ssagen/nowb.go
+++ b/src/cmd/compile/internal/ssagen/nowb.go
@@ -45,7 +45,7 @@ type nowritebarrierrecCall struct {
}
// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
-// must be called before transformclosure and walk.
+// must be called before walk
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
@@ -54,7 +54,7 @@ func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
// Find all systemstack calls and record their targets. In
// general, flow analysis can't see into systemstack, but it's
// important to handle it for this check, so we model it
- // directly. This has to happen before transformclosure since
+ // directly. This has to happen before transforming closures in walk since
// it's a lot harder to work out the argument after.
for _, n := range typecheck.Target.Decls {
if n.Op() != ir.ODCLFUNC {
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
index bbd319d735..182f8408cf 100644
--- a/src/cmd/compile/internal/ssagen/pgen.go
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -96,7 +96,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
if n, ok := v.Aux.(*ir.Name); ok {
switch n.Class {
case ir.PPARAM, ir.PPARAMOUT:
- // Don't modify nodfp; it is a global.
+ // Don't modify RegFP; it is a global.
if n != ir.RegFP {
n.SetUsed(true)
}
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 54bde20f1c..ecf3294082 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -124,6 +124,7 @@ func InitConfig() {
ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
+ ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
@@ -356,6 +357,13 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
+ if fn.Pragma&ir.RegisterParams != 0 { // TODO remove after register abi is working
+ if strings.Contains(name, ".") {
+ base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
+ }
+ s.f.Warnl(fn.Pos(), "declared function %v has register params", fn)
+ }
+
s.panics = map[funcLine]*ssa.Block{}
s.softFloat = s.config.SoftFloat
@@ -392,11 +400,20 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
// Skip doing open defers if there is any extra exit code (likely
- // copying heap-allocated return values or race detection), since
- // we will not generate that code in the case of the extra
- // deferreturn/ret segment.
+ // race detection), since we will not generate that code in the
+ // case of the extra deferreturn/ret segment.
s.hasOpenDefers = false
}
+ if s.hasOpenDefers {
+ // Similarly, skip if there are any heap-allocated result
+ // parameters that need to be copied back to their stack slots.
+ for _, f := range s.curfn.Type().Results().FieldSlice() {
+ if !f.Nname.(*ir.Name).OnStack() {
+ s.hasOpenDefers = false
+ break
+ }
+ }
+ }
if s.hasOpenDefers &&
s.curfn.NumReturns*s.curfn.NumDefers > 15 {
// Since we are generating defer calls at every exit for
@@ -442,24 +459,15 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
case ir.PPARAMOUT:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
- results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset())})
- if s.canSSA(n) {
- // Save ssa-able PPARAMOUT variables so we can
- // store them back to the stack at the end of
- // the function.
- s.returns = append(s.returns, n)
- }
+ results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.FrameOffset()), Name: n})
case ir.PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
- case ir.PAUTOHEAP:
- // moved to heap - already handled by frontend
- case ir.PFUNC:
- // local function - already handled by frontend
default:
s.Fatalf("local variable with class %v unimplemented", n.Class)
}
}
+ s.f.OwnAux = ssa.OwnAuxCall(args, results)
// Populate SSAable arguments.
for _, n := range fn.Dcl {
@@ -481,38 +489,35 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
offset = types.Rnd(offset, typ.Alignment())
- r := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
+ ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
offset += typ.Size()
- if n.Byval() && TypeOK(n.Type()) {
- // If it is a small variable captured by value, downgrade it to PAUTO.
- r = s.load(n.Type(), r)
-
+ // If n is a small variable captured by value, promote
+ // it to PAUTO so it can be converted to SSA.
+ //
+ // Note: While we never capture a variable by value if
+ // the user took its address, we may have generated
+ // runtime calls that did (#43701). Since we don't
+ // convert Addrtaken variables to SSA anyway, no point
+ // in promoting them either.
+ if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
n.Class = ir.PAUTO
- } else {
- if !n.Byval() {
- r = s.load(typ, r)
- }
-
- // Declare variable holding address taken from closure.
- addr := ir.NewNameAt(fn.Pos(), &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
- addr.SetType(types.NewPtr(n.Type()))
- addr.Class = ir.PAUTO
- addr.SetUsed(true)
- addr.Curfn = fn
- types.CalcSize(addr.Type())
-
- n.Heapaddr = addr
- n = addr
+ fn.Dcl = append(fn.Dcl, n)
+ s.assign(n, s.load(n.Type(), ptr), false, 0)
+ continue
}
- fn.Dcl = append(fn.Dcl, n)
- s.assign(n, r, false, 0)
+ if !n.Byval() {
+ ptr = s.load(typ, ptr)
+ }
+ s.setHeapaddr(fn.Pos(), n, ptr)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Enter)
+ s.zeroResults()
+ s.paramsToHeap()
s.stmtList(fn.Body)
// fallthrough to exit
@@ -528,6 +533,8 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
}
+ s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
+
s.insertPhis()
// Main call to ssa package to compile function
@@ -540,6 +547,100 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
return s.f
}
+// zeroResults zeros the return values at the start of the function.
+// We need to do this very early in the function. Defer might stop a
+// panic and show the return values as they exist at the time of
+// panic. For precise stacks, the garbage collector assumes results
+// are always live, so we need to zero them before any allocations,
+// even allocations to move params/results to the heap.
+func (s *state) zeroResults() {
+ for _, f := range s.curfn.Type().Results().FieldSlice() {
+ n := f.Nname.(*ir.Name)
+ if !n.OnStack() {
+ // The local which points to the return value is the
+ // thing that needs zeroing. This is already handled
+ // by a Needzero annotation in plive.go:(*liveness).epilogue.
+ continue
+ }
+ // Zero the stack location containing f.
+ if typ := n.Type(); TypeOK(typ) {
+ s.assign(n, s.zeroVal(typ), false, 0)
+ } else {
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.zero(n.Type(), s.decladdrs[n])
+ }
+ }
+}
+
+// paramsToHeap produces code to allocate memory for heap-escaped parameters
+// and to copy non-result parameters' values from the stack.
+func (s *state) paramsToHeap() {
+ do := func(params *types.Type) {
+ for _, f := range params.FieldSlice() {
+ if f.Nname == nil {
+ continue // anonymous or blank parameter
+ }
+ n := f.Nname.(*ir.Name)
+ if ir.IsBlank(n) || n.OnStack() {
+ continue
+ }
+ s.newHeapaddr(n)
+ if n.Class == ir.PPARAM {
+ s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
+ }
+ }
+ }
+
+ typ := s.curfn.Type()
+ do(typ.Recvs())
+ do(typ.Params())
+ do(typ.Results())
+}
+
+// newHeapaddr allocates heap memory for n and sets its heap address.
+func (s *state) newHeapaddr(n *ir.Name) {
+ s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
+}
+
+// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
+// and then sets it as n's heap address.
+func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
+ if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
+ base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
+ }
+
+ // Declare variable to hold address.
+ addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
+ addr.SetType(types.NewPtr(n.Type()))
+ addr.Class = ir.PAUTO
+ addr.SetUsed(true)
+ addr.Curfn = s.curfn
+ s.curfn.Dcl = append(s.curfn.Dcl, addr)
+ types.CalcSize(addr.Type())
+
+ if n.Class == ir.PPARAMOUT {
+ addr.SetIsOutputParamHeapAddr(true)
+ }
+
+ n.Heapaddr = addr
+ s.assign(addr, ptr, false, 0)
+}
+
+// newObject returns an SSA value denoting new(typ).
+func (s *state) newObject(typ *types.Type) *ssa.Value {
+ if typ.Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
+ }
+ return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
+}
+
+// reflectType returns an SSA value representing a pointer to typ's
+// reflection type descriptor.
+func (s *state) reflectType(typ *types.Type) *ssa.Value {
+ lsym := reflectdata.TypeLinksym(typ)
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
+}
+
func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
@@ -675,7 +776,7 @@ type state struct {
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[ir.Node]*ssa.Value
- // addresses of PPARAM and PPARAMOUT variables.
+ // addresses of PPARAM and PPARAMOUT variables on the stack.
decladdrs map[*ir.Name]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
@@ -695,9 +796,6 @@ type state struct {
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
- // list of PPARAMOUT (return) variables.
- returns []*ir.Name
-
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
softFloat bool
@@ -1283,8 +1381,8 @@ func (s *state) stmt(n ir.Node) {
case ir.ODCL:
n := n.(*ir.Decl)
- if n.X.Class == ir.PAUTOHEAP {
- s.Fatalf("DCL %v", n)
+ if v := n.X; v.Esc() == ir.EscHeap {
+ s.newHeapaddr(v)
}
case ir.OLABEL:
@@ -1414,10 +1512,10 @@ func (s *state) stmt(n ir.Node) {
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
- //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
+ //if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
// j = nil
//}
- //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
+ //if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
@@ -1486,11 +1584,11 @@ func (s *state) stmt(n ir.Node) {
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
- case ir.ORETJMP:
- n := n.(*ir.BranchStmt)
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = callTargetLSym(n.Label, s.curfn.LSym)
+ b.Aux = callTargetLSym(n.Target, s.curfn.LSym)
case ir.OCONTINUE, ir.OBREAK:
n := n.(*ir.BranchStmt)
@@ -1704,6 +1802,7 @@ const shareDeferExits = false
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
+ lateResultLowering := s.f.DebugTest && ssa.LateCallExpansionEnabledWithin(s.f)
if s.hasdefer {
if s.hasOpenDefers {
if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
@@ -1720,24 +1819,61 @@ func (s *state) exit() *ssa.Block {
}
}
- // Run exit code. Typically, this code copies heap-allocated PPARAMOUT
- // variables back to the stack.
- s.stmtList(s.curfn.Exit)
+ var b *ssa.Block
+ var m *ssa.Value
+ // Do actual return.
+ // These currently turn into self-copies (in many cases).
+ if lateResultLowering {
+ resultFields := s.curfn.Type().Results().FieldSlice()
+ results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
+ m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
+ // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
+ for i, f := range resultFields {
+ n := f.Nname.(*ir.Name)
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ if s.canSSA(n) { // result is in some SSA variable
+ results[i] = s.variable(n, n.Type())
+ } else if !n.OnStack() { // result is actually heap allocated
+ ha := s.expr(n.Heapaddr)
+ s.instrumentFields(n.Type(), ha, instrumentRead)
+ results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
+ } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
+ // Before register ABI this ought to be a self-move, home=dest,
+ // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
+ results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
+ }
+ }
- // Store SSAable PPARAMOUT variables back to stack locations.
- for _, n := range s.returns {
- addr := s.decladdrs[n]
- val := s.variable(n, n.Type())
- s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
- s.store(n.Type(), addr, val)
- // TODO: if val is ever spilled, we'd like to use the
- // PPARAMOUT slot for spilling it. That won't happen
- // currently.
- }
+ // Run exit code. Today, this is just racefuncexit, in -race mode.
+ // TODO this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
+ // Spills in register allocation might just fix it.
+ s.stmtList(s.curfn.Exit)
- // Do actual return.
- m := s.mem()
- b := s.endBlock()
+ results[len(results)-1] = s.mem()
+ m.AddArgs(results...)
+ } else {
+ // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
+ for _, f := range s.curfn.Type().Results().FieldSlice() {
+ n := f.Nname.(*ir.Name)
+ if s.canSSA(n) {
+ val := s.variable(n, n.Type())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.store(n.Type(), s.decladdrs[n], val)
+ } else if !n.OnStack() {
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.move(n.Type(), s.decladdrs[n], s.expr(n.Heapaddr))
+ } // else, on stack but too large to SSA, the result is already in its destination by construction, so no store needed.
+
+ // TODO: if (SSA) val is ever spilled, we'd like to use the PPARAMOUT slot for spilling it. That won't happen currently.
+ }
+
+ // Run exit code. Today, this is just racefuncexit, in -race mode.
+ s.stmtList(s.curfn.Exit)
+
+ // Do actual return.
+ m = s.mem()
+ }
+ b = s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
if s.hasdefer && s.hasOpenDefers {
@@ -2159,15 +2295,10 @@ func (s *state) expr(n ir.Node) *ssa.Value {
if s.canSSA(n) {
return s.variable(n, n.Type())
}
- addr := s.addr(n)
- return s.load(n.Type(), addr)
- case ir.ONAMEOFFSET:
- n := n.(*ir.NameOffsetExpr)
- if s.canSSAName(n.Name_) && TypeOK(n.Type()) {
- return s.variable(n, n.Type())
- }
- addr := s.addr(n)
- return s.load(n.Type(), addr)
+ return s.load(n.Type(), s.addr(n))
+ case ir.OLINKSYMOFFSET:
+ n := n.(*ir.LinksymOffsetExpr)
+ return s.load(n.Type(), s.addr(n))
case ir.ONIL:
n := n.(*ir.NilExpr)
t := n.Type()
@@ -2936,14 +3067,9 @@ func (s *state) expr(n ir.Node) *ssa.Value {
}
return s.zeroVal(n.Type())
- case ir.ONEWOBJ:
+ case ir.ONEW:
n := n.(*ir.UnaryExpr)
- if n.Type().Elem().Size() == 0 {
- return s.newValue1A(ssa.OpAddr, n.Type(), ir.Syms.Zerobase, s.sb)
- }
- typ := s.expr(n.X)
- vv := s.rtcall(ir.Syms.Newobject, true, []*types.Type{n.Type()}, typ)
- return vv[0]
+ return s.newObject(n.Type().Elem())
default:
s.Fatalf("unhandled expr %v", n.Op())
@@ -3260,7 +3386,7 @@ func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
- if base, ok := clobberBase(left).(*ir.Name); ok && base.Op() == ir.ONAME && base.Class != ir.PEXTERN && base.Class != ir.PAUTOHEAP && skip == 0 {
+ if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
@@ -4366,30 +4492,8 @@ func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
- // Construct map of temps; see comments in s.call about the structure of n.
- temps := map[ir.Node]*ssa.Value{}
- for _, a := range n.Args {
- if a.Op() != ir.OAS {
- s.Fatalf("non-assignment as a temp function argument %v", a.Op())
- }
- a := a.(*ir.AssignStmt)
- l, r := a.X, a.Y
- if l.Op() != ir.ONAME {
- s.Fatalf("non-ONAME temp function argument %v", a.Op())
- }
- // Evaluate and store to "temporary".
- // Walk ensures these temporaries are dead outside of n.
- temps[l] = s.expr(r)
- }
- args := make([]*ssa.Value, len(n.Rargs))
- for i, n := range n.Rargs {
- // Store a value to an argument slot.
- if x, ok := temps[n]; ok {
- // This is a previously computed temporary.
- args[i] = x
- continue
- }
- // This is an explicit value; evaluate it.
+ args := make([]*ssa.Value, len(n.Args))
+ for i, n := range n.Args {
args[i] = s.expr(n)
}
return args
@@ -4402,13 +4506,6 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
- // Do any needed expression evaluation for the args (including the
- // receiver, if any). This may be evaluating something like 'autotmp_3 =
- // once.mutex'. Such a statement will create a mapping in s.vars[] from
- // the autotmp name to the evaluated SSA arg value, but won't do any
- // stores to the stack.
- s.stmtList(n.Args)
-
var args []*ssa.Value
var argNodes []*ir.Name
@@ -4441,7 +4538,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
}
- for _, argn := range n.Rargs {
+ for _, argn := range n.Args {
var v *ssa.Value
if TypeOK(argn.Type()) {
v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
@@ -4667,7 +4764,7 @@ func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
// Returns the address of the return value (or nil if none).
func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
- var sym *types.Sym // target symbol (if static)
+ var callee *ir.Name // target function (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
@@ -4685,13 +4782,21 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
testLateExpansion := false
+ inRegisters := false
switch n.Op() {
case ir.OCALLFUNC:
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
- sym = fn.Sym()
+ callee = fn
+ // TODO remove after register abi is working
+ inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
+ inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
+ inRegisters = inRegistersImported || inRegistersSamePackage
+ if inRegisters {
+ s.f.Warnl(n.Pos(), "called function %v has register params", callee)
+ }
break
}
closure = s.expr(fn)
@@ -4719,11 +4824,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
types.CalcSize(fn.Type())
stksize := fn.Type().ArgWidth() // includes receiver, args, and results
- // Run all assignments of temps.
- // The temps are introduced to avoid overwriting argument
- // slots when arguments themselves require function calls.
- s.stmtList(n.Args)
-
var call *ssa.Value
if k == callDeferStack {
testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
@@ -4757,7 +4857,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Then, store all the arguments of the defer call.
ft := fn.Type()
off := t.FieldOff(12)
- args := n.Rargs
+ args := n.Args
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
@@ -4832,7 +4932,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Write args.
t := n.X.Type()
- args := n.Rargs
+ args := n.Args
if n.Op() == ir.OCALLMETH {
base.Fatalf("OCALLMETH missed by walkCall")
}
@@ -4885,13 +4985,13 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
} else {
call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
}
- case sym != nil:
+ case callee != nil:
if testLateExpansion {
- aux := ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults)
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
} else {
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(sym, s.curfn.LSym), ACArgs, ACResults), s.mem())
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults), s.mem())
}
default:
s.Fatalf("bad call type %v %v", n.Op(), n)
@@ -4987,24 +5087,27 @@ func (s *state) addr(n ir.Node) *ssa.Value {
}
t := types.NewPtr(n.Type())
- var offset int64
+ linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
+ v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
+ // TODO: Make OpAddr use AuxInt as well as Aux.
+ if offset != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
+ }
+ return v
+ }
switch n.Op() {
- case ir.ONAMEOFFSET:
- no := n.(*ir.NameOffsetExpr)
- offset = no.Offset_
- n = no.Name_
- fallthrough
+ case ir.OLINKSYMOFFSET:
+ no := n.(*ir.LinksymOffsetExpr)
+ return linksymOffset(no.Linksym, no.Offset_)
case ir.ONAME:
n := n.(*ir.Name)
+ if n.Heapaddr != nil {
+ return s.expr(n.Heapaddr)
+ }
switch n.Class {
case ir.PEXTERN:
// global variable
- v := s.entryNewValue1A(ssa.OpAddr, t, n.Linksym(), s.sb)
- // TODO: Make OpAddr use AuxInt as well as Aux.
- if offset != 0 {
- v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
- }
- return v
+ return linksymOffset(n.Linksym(), 0)
case ir.PPARAM:
// parameter slot
v := s.decladdrs[n]
@@ -5024,8 +5127,6 @@ func (s *state) addr(n ir.Node) *ssa.Value {
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
- case ir.PAUTOHEAP:
- return s.expr(n.Heapaddr)
default:
s.Fatalf("variable address class %v not implemented", n.Class)
return nil
@@ -5126,15 +5227,10 @@ func (s *state) canSSA(n ir.Node) bool {
}
func (s *state) canSSAName(name *ir.Name) bool {
- if name.Addrtaken() {
- return false
- }
- if ir.IsParamHeapCopy(name) {
+ if name.Addrtaken() || !name.OnStack() {
return false
}
switch name.Class {
- case ir.PEXTERN, ir.PAUTOHEAP:
- return false
case ir.PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
@@ -5160,7 +5256,7 @@ func (s *state) canSSAName(name *ir.Name) bool {
// TODO: try to make more variables SSAable?
}
-// canSSA reports whether variables of type t are SSA-able.
+// TypeOK reports whether variables of type t are SSA-able.
func TypeOK(t *types.Type) bool {
types.CalcSize(t)
if t.Width > int64(4*types.PtrSize) {
@@ -6013,8 +6109,8 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
- iface := s.expr(n.X) // input interface
- target := s.expr(n.DstType) // target type
+ iface := s.expr(n.X) // input interface
+ target := s.reflectType(n.Type()) // target type
byteptr := s.f.Config.Types.BytePtr
if n.Type().IsInterface() {
@@ -6148,7 +6244,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
- taddr := s.expr(n.SrcType)
+ taddr := s.reflectType(n.X.Type())
if n.X.Type().IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
@@ -6365,7 +6461,7 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
// in the generated code.
if p.IsStmt() != src.PosIsStmt {
p = p.WithNotStmt()
- // Calls use the pos attached to v, but copy the statement mark from SSAGenState
+ // Calls use the pos attached to v, but copy the statement mark from State
}
s.SetPos(p)
} else {
@@ -6374,57 +6470,6 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
}
}
-// byXoffset implements sort.Interface for []*ir.Name using Xoffset as the ordering.
-type byXoffset []*ir.Name
-
-func (s byXoffset) Len() int { return len(s) }
-func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() }
-func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func emitStackObjects(e *ssafn, pp *objw.Progs) {
- var vars []*ir.Name
- for _, n := range e.curfn.Dcl {
- if liveness.ShouldTrack(n) && n.Addrtaken() {
- vars = append(vars, n)
- }
- }
- if len(vars) == 0 {
- return
- }
-
- // Sort variables from lowest to highest address.
- sort.Sort(byXoffset(vars))
-
- // Populate the stack object data.
- // Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.LSym.Func().StackObjects
- off := 0
- off = objw.Uintptr(x, off, uint64(len(vars)))
- for _, v := range vars {
- // Note: arguments and return values have non-negative Xoffset,
- // in which case the offset is relative to argp.
- // Locals have a negative Xoffset, in which case the offset is relative to varp.
- off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
- if !types.TypeSym(v.Type()).Siggen() {
- e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
- }
- off = objw.SymPtr(x, off, reflectdata.WriteType(v.Type()), 0)
- }
-
- // Emit a funcdata pointing at the stack object data.
- p := pp.Prog(obj.AFUNCDATA)
- p.From.SetConst(objabi.FUNCDATA_StackObjects)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = x
-
- if base.Flag.Live != 0 {
- for _, v := range vars {
- base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String())
- }
- }
-}
-
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *objw.Progs) {
var s State
@@ -6432,7 +6477,6 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
e := f.Frontend().(*ssafn)
s.livenessMap = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
- emitStackObjects(e, pp)
openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
@@ -7165,7 +7209,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
if n.Type().IsEmptyInterface() {
f = ".type"
}
- c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
+ c := e.SplitSlot(&name, f, 0, u) // see comment in typebits.Set
d := e.SplitSlot(&name, ".data", u.Size(), t)
return c, d
}
@@ -7345,31 +7389,26 @@ func clobberBase(n ir.Node) ir.Node {
//
// 3. in all other cases, want the regular ABIInternal linksym
//
-func callTargetLSym(callee *types.Sym, callerLSym *obj.LSym) *obj.LSym {
+func callTargetLSym(callee *ir.Name, callerLSym *obj.LSym) *obj.LSym {
lsym := callee.Linksym()
if !base.Flag.ABIWrap {
return lsym
}
- if ir.AsNode(callee.Def) == nil {
- return lsym
- }
- defn := ir.AsNode(callee.Def).Name().Defn
- if defn == nil {
+ fn := callee.Func
+ if fn == nil {
return lsym
}
- ndclfunc := defn.(*ir.Func)
// check for case 1 above
if callerLSym.ABIWrapper() {
- if nlsym := ndclfunc.LSym; nlsym != nil {
+ if nlsym := fn.LSym; nlsym != nil {
lsym = nlsym
}
} else {
// check for case 2 above
- nam := ndclfunc.Nname
- defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
+ defABI, hasDefABI := symabiDefs[lsym.Name]
if hasDefABI && defABI == obj.ABI0 {
- lsym = nam.Sym().LinksymABI0()
+ lsym = callee.LinksymABI(obj.ABI0)
}
}
return lsym
diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go
index 4b12590fde..b06fd7aa4b 100644
--- a/src/cmd/compile/internal/staticdata/data.go
+++ b/src/cmd/compile/internal/staticdata/data.go
@@ -25,55 +25,38 @@ import (
"cmd/internal/src"
)
-// InitAddr writes the static address of a to n. a must be an ONAME.
-// Neither n nor a is modified.
-func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
+// InitAddrOffset writes the static name symbol lsym to n, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME/PEXTERN node.
+func InitAddrOffset(n *ir.Name, noff int64, lsym *obj.LSym, off int64) {
if n.Op() != ir.ONAME {
- base.Fatalf("addrsym n op %v", n.Op())
+ base.Fatalf("InitAddr n op %v", n.Op())
}
if n.Sym() == nil {
- base.Fatalf("addrsym nil n sym")
- }
- if a.Op() != ir.ONAME {
- base.Fatalf("addrsym a op %v", a.Op())
+ base.Fatalf("InitAddr nil n sym")
}
s := n.Linksym()
- s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Linksym(), aoff)
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, off)
}
-// InitFunc writes the static address of f to n. f must be a global function.
-// Neither n nor f is modified.
-func InitFunc(n *ir.Name, noff int64, f *ir.Name) {
- if n.Op() != ir.ONAME {
- base.Fatalf("pfuncsym n op %v", n.Op())
- }
- if n.Sym() == nil {
- base.Fatalf("pfuncsym nil n sym")
- }
- if f.Class != ir.PFUNC {
- base.Fatalf("pfuncsym class not PFUNC %d", f.Class)
- }
- s := n.Linksym()
- s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncLinksym(f), 0)
+// InitAddr is InitAddrOffset, with offset fixed to 0.
+func InitAddr(n *ir.Name, noff int64, lsym *obj.LSym) {
+ InitAddrOffset(n, noff, lsym, 0)
}
-// InitSlice writes a static slice symbol {&arr, lencap, lencap} to n+noff.
-// InitSlice does not modify n.
-func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
+// InitSlice writes a static slice symbol {lsym, lencap, lencap} to n+noff, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME node.
+func InitSlice(n *ir.Name, noff int64, lsym *obj.LSym, lencap int64) {
s := n.Linksym()
- if arr.Op() != ir.ONAME {
- base.Fatalf("slicesym non-name arr %v", arr)
- }
- s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Linksym(), 0)
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, 0)
s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap)
s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap)
}
func InitSliceBytes(nam *ir.Name, off int64, s string) {
if nam.Op() != ir.ONAME {
- base.Fatalf("slicebytes %v", nam)
+ base.Fatalf("InitSliceBytes %v", nam)
}
- InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
+ InitSlice(nam, off, slicedata(nam.Pos(), s).Linksym(), int64(len(s)))
}
const (
@@ -243,14 +226,14 @@ func FuncSym(s *types.Sym) *types.Sym {
// except for the types package, which is protected separately.
// Reusing funcsymsmu to also cover this package lookup
// avoids a general, broader, expensive package lookup mutex.
- // Note makefuncsym also does package look-up of func sym names,
+ // Note NeedFuncSym also does package look-up of func sym names,
// but that it is only called serially, from the front end.
funcsymsmu.Lock()
sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s))
// Don't export s·f when compiling for dynamic linking.
// When dynamically linking, the necessary function
- // symbols will be created explicitly with makefuncsym.
- // See the makefuncsym comment for details.
+ // symbols will be created explicitly with NeedFuncSym.
+ // See the NeedFuncSym comment for details.
if !base.Ctxt.Flag_dynlink && !existed {
funcsyms = append(funcsyms, s)
}
@@ -265,6 +248,13 @@ func FuncLinksym(n *ir.Name) *obj.LSym {
return FuncSym(n.Sym()).Linksym()
}
+func GlobalLinksym(n *ir.Name) *obj.LSym {
+ if n.Op() != ir.ONAME || n.Class != ir.PEXTERN {
+ base.Fatalf("expected global variable: %v", n)
+ }
+ return n.Linksym()
+}
+
// NeedFuncSym ensures that s·f is exported, if needed.
// It is only used with -dynlink.
// When not compiling for dynamic linking,
@@ -297,7 +287,7 @@ func NeedFuncSym(s *types.Sym) {
func WriteFuncSyms() {
sort.Slice(funcsyms, func(i, j int) bool {
- return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
+ return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name
})
for _, s := range funcsyms {
sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
@@ -310,16 +300,16 @@ func WriteFuncSyms() {
// Neither n nor c is modified.
func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
- base.Fatalf("litsym n op %v", n.Op())
+ base.Fatalf("InitConst n op %v", n.Op())
}
if n.Sym() == nil {
- base.Fatalf("litsym nil n sym")
+ base.Fatalf("InitConst nil n sym")
}
if c.Op() == ir.ONIL {
return
}
if c.Op() != ir.OLITERAL {
- base.Fatalf("litsym c op %v", c.Op())
+ base.Fatalf("InitConst c op %v", c.Op())
}
s := n.Linksym()
switch u := c.Val(); u.Kind() {
@@ -358,6 +348,6 @@ func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
default:
- base.Fatalf("litsym unhandled OLITERAL %v", c)
+ base.Fatalf("InitConst unhandled OLITERAL %v", c)
}
}
diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go
index 2e551f0b2c..8936c4f5b4 100644
--- a/src/cmd/compile/internal/staticdata/embed.go
+++ b/src/cmd/compile/internal/staticdata/embed.go
@@ -23,13 +23,7 @@ const (
embedFiles
)
-func embedFileList(v *ir.Name) []string {
- kind := embedKind(v.Type())
- if kind == embedUnknown {
- base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
- return nil
- }
-
+func embedFileList(v *ir.Name, kind int) []string {
// Build list of files to store.
have := make(map[string]bool)
var list []string
@@ -71,38 +65,15 @@ func embedFileList(v *ir.Name) []string {
return list
}
-// embedKindApprox determines the kind of embedding variable, approximately.
-// The match is approximate because we haven't done scope resolution yet and
-// can't tell whether "string" and "byte" really mean "string" and "byte".
-// The result must be confirmed later, after type checking, using embedKind.
-func embedKindApprox(typ ir.Node) int {
- if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
- return embedFiles
- }
- // These are not guaranteed to match only string and []byte -
- // maybe the local package has redefined one of those words.
- // But it's the best we can do now during the noder.
- // The stricter check happens later, in initEmbed calling embedKind.
- if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == types.LocalPkg {
- return embedString
- }
- if typ, ok := typ.(*ir.SliceType); ok {
- if sym := typ.Elem.Sym(); sym != nil && sym.Name == "byte" && sym.Pkg == types.LocalPkg {
- return embedBytes
- }
- }
- return embedUnknown
-}
-
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
- if typ == types.Types[types.TSTRING] {
+ if typ.Kind() == types.TSTRING {
return embedString
}
- if typ.Sym() == nil && typ.IsSlice() && typ.Elem() == types.ByteType {
+ if typ.Sym() == nil && typ.IsSlice() && typ.Elem().Kind() == types.TUINT8 {
return embedBytes
}
return embedUnknown
@@ -134,11 +105,28 @@ func embedFileLess(x, y string) bool {
// WriteEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func WriteEmbed(v *ir.Name) {
- files := embedFileList(v)
- switch kind := embedKind(v.Type()); kind {
- case embedUnknown:
+ // TODO(mdempsky): User errors should be reported by the frontend.
+
+ commentPos := (*v.Embed)[0].Pos
+ if !types.AllowsGoVersion(types.LocalPkg, 1, 16) {
+ prevPos := base.Pos
+ base.Pos = commentPos
+ base.ErrorfVers("go1.16", "go:embed")
+ base.Pos = prevPos
+ return
+ }
+ if base.Flag.Cfg.Embed.Patterns == nil {
+ base.ErrorfAt(commentPos, "invalid go:embed: build system did not supply embed configuration")
+ return
+ }
+ kind := embedKind(v.Type())
+ if kind == embedUnknown {
base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
+ return
+ }
+ files := embedFileList(v, kind)
+ switch kind {
case embedString, embedBytes:
file := files[0]
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index ac0b6cd87e..f3ad82e7b6 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -15,6 +15,7 @@ import (
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
+ "cmd/internal/src"
)
type Entry struct {
@@ -80,7 +81,7 @@ func (s *Schedule) tryStaticInit(nn ir.Node) bool {
func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
if rn.Class == ir.PFUNC {
// TODO if roff != 0 { panic }
- staticdata.InitFunc(l, loff, rn)
+ staticdata.InitAddr(l, loff, staticdata.FuncLinksym(rn))
return true
}
if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
@@ -137,9 +138,8 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty
case ir.OADDR:
r := r.(*ir.AddrExpr)
- if a := r.X; a.Op() == ir.ONAME {
- a := a.(*ir.Name)
- staticdata.InitAddr(l, loff, a, 0)
+ if a, ok := r.X.(*ir.Name); ok && a.Op() == ir.ONAME {
+ staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(a))
return true
}
@@ -148,14 +148,14 @@ func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Ty
switch r.X.Op() {
case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
// copy pointer
- staticdata.InitAddr(l, loff, s.Temps[r], 0)
+ staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(s.Temps[r]))
return true
}
case ir.OSLICELIT:
r := r.(*ir.CompLitExpr)
// copy slice
- staticdata.InitSlice(l, loff, s.Temps[r], r.Len)
+ staticdata.InitSlice(l, loff, staticdata.GlobalLinksym(s.Temps[r]), r.Len)
return true
case ir.OARRAYLIT, ir.OSTRUCTLIT:
@@ -199,6 +199,20 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
r = r.(*ir.ConvExpr).X
}
+ assign := func(pos src.XPos, a *ir.Name, aoff int64, v ir.Node) {
+ if s.StaticAssign(a, aoff, v, v.Type()) {
+ return
+ }
+ var lhs ir.Node
+ if ir.IsBlank(a) {
+ // Don't use NameOffsetExpr with blank (#43677).
+ lhs = ir.BlankNode
+ } else {
+ lhs = ir.NewNameOffsetExpr(pos, a, aoff, v.Type())
+ }
+ s.append(ir.NewAssignStmt(pos, lhs, v))
+ }
+
switch r.Op() {
case ir.ONAME:
r := r.(*ir.Name)
@@ -220,8 +234,8 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
case ir.OADDR:
r := r.(*ir.AddrExpr)
- if name, offset, ok := StaticLoc(r.X); ok {
- staticdata.InitAddr(l, loff, name, offset)
+ if name, offset, ok := StaticLoc(r.X); ok && name.Class == ir.PEXTERN {
+ staticdata.InitAddrOffset(l, loff, name.Linksym(), offset)
return true
}
fallthrough
@@ -234,12 +248,10 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
a := StaticName(r.X.Type())
s.Temps[r] = a
- staticdata.InitAddr(l, loff, a, 0)
+ staticdata.InitAddr(l, loff, a.Linksym())
// Init underlying literal.
- if !s.StaticAssign(a, 0, r.X, a.Type()) {
- s.append(ir.NewAssignStmt(base.Pos, a, r.X))
- }
+ assign(base.Pos, a, 0, r.X)
return true
}
//dump("not static ptrlit", r);
@@ -260,7 +272,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
ta.SetNoalg(true)
a := StaticName(ta)
s.Temps[r] = a
- staticdata.InitSlice(l, loff, a, r.Len)
+ staticdata.InitSlice(l, loff, a.Linksym(), r.Len)
// Fall through to init underlying array.
l = a
loff = 0
@@ -278,10 +290,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
continue
}
ir.SetPos(e.Expr)
- if !s.StaticAssign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
- a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
- s.append(ir.NewAssignStmt(base.Pos, a, e.Expr))
- }
+ assign(base.Pos, l, loff+e.Xoffset, e.Expr)
}
return true
@@ -298,7 +307,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
// Closures with no captured variables are globals,
// so the assignment can be done at link time.
// TODO if roff != 0 { panic }
- staticdata.InitFunc(l, loff, r.Func.Nname)
+ staticdata.InitAddr(l, loff, staticdata.FuncLinksym(r.Func.Nname))
return true
}
ir.ClosureDebugRuntimeCheck(r)
@@ -335,7 +344,7 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
// Create a copy of l to modify while we emit data.
// Emit itab, advance offset.
- staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0)
+ staticdata.InitAddr(l, loff, itab.X.(*ir.LinksymOffsetExpr).Linksym)
// Emit data.
if types.IsDirectIface(val.Type()) {
@@ -345,18 +354,13 @@ func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Ty
}
// Copy val directly into n.
ir.SetPos(val)
- if !s.StaticAssign(l, loff+int64(types.PtrSize), val, val.Type()) {
- a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type())
- s.append(ir.NewAssignStmt(base.Pos, a, val))
- }
+ assign(base.Pos, l, loff+int64(types.PtrSize), val)
} else {
// Construct temp to hold val, write pointer to temp into n.
a := StaticName(val.Type())
s.Temps[val] = a
- if !s.StaticAssign(a, 0, val, val.Type()) {
- s.append(ir.NewAssignStmt(base.Pos, a, val))
- }
- staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0)
+ assign(base.Pos, a, 0, val)
+ staticdata.InitAddr(l, loff+int64(types.PtrSize), a.Linksym())
}
return true
@@ -450,7 +454,7 @@ var statuniqgen int // name generator for static temps
// StaticName returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
func StaticName(t *types.Type) *ir.Name {
- // Don't use lookupN; it interns the resulting string, but these are all unique.
+ // Don't use LookupNum; it interns the resulting string, but these are all unique.
n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
typecheck.Declare(n, ir.PEXTERN)
diff --git a/src/cmd/compile/internal/gc/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go
index 6fd0af1b1f..ae7d484062 100644
--- a/src/cmd/compile/internal/gc/abiutils_test.go
+++ b/src/cmd/compile/internal/test/abiutils_test.go
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
import (
"bufio"
+ "cmd/compile/internal/abi"
"cmd/compile/internal/base"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
@@ -20,12 +21,7 @@ import (
// AMD64 registers available:
// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
// - floating point: X0 - X14
-var configAMD64 = ABIConfig{
- regAmounts: RegAmounts{
- intRegs: 9,
- floatRegs: 15,
- },
-}
+var configAMD64 = abi.NewABIConfig(9,15)
func TestMain(m *testing.M) {
ssagen.Arch.LinkArch = &x86.Linkamd64
diff --git a/src/cmd/compile/internal/gc/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go
index 9386b554b0..10fb668745 100644
--- a/src/cmd/compile/internal/gc/abiutilsaux_test.go
+++ b/src/cmd/compile/internal/test/abiutilsaux_test.go
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package test
// This file contains utility routines and harness infrastructure used
// by the ABI tests in "abiutils_test.go".
import (
+ "cmd/compile/internal/abi"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@@ -75,7 +76,7 @@ func tokenize(src string) []string {
return res
}
-func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment, which string, idx int) int {
+func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int {
n := ir.AsNode(f.Nname).(*ir.Name)
if n.FrameOffset() != int64(r.Offset) {
t.Errorf("%s %d: got offset %d wanted %d t=%v",
@@ -110,7 +111,7 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
types.CalcSize(ft)
// Analyze with full set of registers.
- regRes := ABIAnalyze(ft, configAMD64)
+ regRes := abi.ABIAnalyze(ft, configAMD64)
regResString := strings.TrimSpace(regRes.String())
// Check results.
@@ -121,12 +122,12 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
}
// Analyze again with empty register set.
- empty := ABIConfig{}
- emptyRes := ABIAnalyze(ft, empty)
+ empty := &abi.ABIConfig{}
+ emptyRes := abi.ABIAnalyze(ft, empty)
emptyResString := emptyRes.String()
// Walk the results and make sure the offsets assigned match
- // up with those assiged by dowidth. This checks to make sure that
+ // up with those assiged by CalcSize. This checks to make sure that
// when we have no available registers the ABI assignment degenerates
// back to the original ABI0.
@@ -135,18 +136,18 @@ func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
rfsl := ft.Recvs().Fields().Slice()
poff := 0
if len(rfsl) != 0 {
- failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.inparams[0], "receiver", 0)
+ failed |= verifyParamResultOffset(t, rfsl[0], emptyRes.InParams()[0], "receiver", 0)
poff = 1
}
// params
pfsl := ft.Params().Fields().Slice()
for k, f := range pfsl {
- verifyParamResultOffset(t, f, emptyRes.inparams[k+poff], "param", k)
+ verifyParamResultOffset(t, f, emptyRes.InParams()[k+poff], "param", k)
}
// results
ofsl := ft.Results().Fields().Slice()
for k, f := range ofsl {
- failed |= verifyParamResultOffset(t, f, emptyRes.outparams[k], "result", k)
+ failed |= verifyParamResultOffset(t, f, emptyRes.OutParams()[k], "result", k)
}
if failed != 0 {
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
index db5ca7dcbe..b87daed8e9 100644
--- a/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
@@ -53,7 +53,7 @@ func G(x *A, n int) {
return
}
// Address-taken local of type A, which will insure that the
- // compiler's dtypesym() routine will create a method wrapper.
+ // compiler's writeType() routine will create a method wrapper.
var a, b A
a.next = x
a.prev = &b
diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go
index 63a2bb3ffa..1c1b077423 100644
--- a/src/cmd/compile/internal/typebits/typebits.go
+++ b/src/cmd/compile/internal/typebits/typebits.go
@@ -15,7 +15,7 @@ import (
// on future calls with the same type t.
func Set(t *types.Type, off int64, bv bitvec.BitVec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
- base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
+ base.Fatalf("typebits.Set: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
if !t.HasPointers() {
// Note: this case ensures that pointers to go:notinheap types
@@ -26,14 +26,14 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
switch t.Kind() {
case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
if off&int64(types.PtrSize-1) != 0 {
- base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(types.PtrSize))) // pointer
case types.TSTRING:
// struct { byte *str; intgo len; }
if off&int64(types.PtrSize-1) != 0 {
- base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot
@@ -42,7 +42,7 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if off&int64(types.PtrSize-1) != 0 {
- base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
// treat it as such.
@@ -61,7 +61,7 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
if off&int64(types.PtrSize-1) != 0 {
- base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
+ base.Fatalf("typebits.Set: invalid TARRAY alignment, %v", t)
}
bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer)
@@ -82,6 +82,6 @@ func Set(t *types.Type, off int64, bv bitvec.BitVec) {
}
default:
- base.Fatalf("onebitwalktype1: unexpected type, %v", t)
+ base.Fatalf("typebits.Set: unexpected type, %v", t)
}
}
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
index d6bf101974..1a8e58383a 100644
--- a/src/cmd/compile/internal/typecheck/const.go
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -623,7 +623,7 @@ func OrigInt(n ir.Node, v int64) ir.Node {
return OrigConst(n, constant.MakeInt64(v))
}
-// defaultlit on both nodes simultaneously;
+// DefaultLit on both nodes simultaneously;
// if they're both ideal going in they better
// get the same type going out.
// force means must assign concrete (non-ideal) type.
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
index bd54919c93..eab0bb09b2 100644
--- a/src/cmd/compile/internal/typecheck/dcl.go
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -41,7 +41,7 @@ func Declare(n *ir.Name, ctxt ir.Class) {
s := n.Sym()
- // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
+ // kludgy: TypecheckAllowed means we're past parsing. Eg reflectdata.methodWrapper may declare out of package names later.
if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg {
base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
}
@@ -311,7 +311,7 @@ func FakeRecv() *types.Field {
var fakeRecvField = FakeRecv
-var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
+var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext
type funcStackEnt struct {
curfn *ir.Func
@@ -401,14 +401,14 @@ func Temp(t *types.Type) *ir.Name {
// make a new Node off the books
func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
if curfn == nil {
- base.Fatalf("no curfn for tempAt")
+ base.Fatalf("no curfn for TempAt")
}
if curfn.Op() == ir.OCLOSURE {
- ir.Dump("tempAt", curfn)
- base.Fatalf("adding tempAt to wrong closure function")
+ ir.Dump("TempAt", curfn)
+ base.Fatalf("adding TempAt to wrong closure function")
}
if t == nil {
- base.Fatalf("tempAt called with nil type")
+ base.Fatalf("TempAt called with nil type")
}
if t.Kind() == types.TFUNC && t.Recv() != nil {
base.Fatalf("misuse of method type: %v", t)
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index 12bfae67a8..339fb00aa4 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -68,7 +68,7 @@ func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
return l, r, nil
}
- // no defaultlit for left
+ // no DefaultLit for left
// the outer context gives the type
t = l.Type()
if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
@@ -201,7 +201,7 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type)
// n.Left = tcCompLit(n.Left)
func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
if base.EnableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckcomplit", n)(&res)
+ defer tracePrint("tcCompLit", n)(&res)
}
lno := base.Pos
@@ -838,7 +838,7 @@ func tcStar(n *ir.StarExpr, top int) ir.Node {
}
if l.Op() == ir.OTYPE {
n.SetOTYPE(types.NewPtr(l.Type()))
- // Ensure l.Type gets dowidth'd for the backend. Issue 20174.
+ // Ensure l.Type gets CalcSize'd for the backend. Issue 20174.
types.CheckSize(l.Type())
return n
}
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index 766eb8bae9..7ab5f68ce3 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -100,7 +100,7 @@ func PartialCallType(n *ir.SelectorExpr) *types.Type {
return t
}
-// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
+// Lazy typechecking of imported bodies. For local functions, CanInline will set ->typecheck
// because they're a copy of an already checked body.
func ImportedBody(fn *ir.Func) {
lno := ir.SetPos(fn.Nname)
@@ -122,14 +122,14 @@ func ImportedBody(fn *ir.Func) {
ImportBody(fn)
- // typecheckinl is only for imported functions;
+ // Stmts(fn.Inl.Body) below is only for imported functions;
// their bodies may refer to unsafe as long as the package
// was marked safe during import (which was checked then).
- // the ->inl of a local function has been typechecked before caninl copied it.
+ // the ->inl of a local function has been typechecked before CanInline copied it.
pkg := fnpkg(fn.Nname)
if pkg == types.LocalPkg || pkg == nil {
- return // typecheckinl on local function
+ return // ImportedBody on local function
}
if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
@@ -141,11 +141,11 @@ func ImportedBody(fn *ir.Func) {
Stmts(fn.Inl.Body)
ir.CurFunc = savefn
- // During expandInline (which imports fn.Func.Inl.Body),
- // declarations are added to fn.Func.Dcl by funcHdr(). Move them
+ // During ImportBody (which imports fn.Func.Inl.Body),
+ // declarations are added to fn.Func.Dcl by funcBody(). Move them
// to fn.Func.Inl.Dcl for consistency with how local functions
- // behave. (Append because typecheckinl may be called multiple
- // times.)
+ // behave. (Append because ImportedBody may be called multiple
+ // times on same fn.)
fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...)
fn.Dcl = nil
@@ -296,15 +296,22 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
fn.SetClosureCalled(top&ctxCallee != 0)
// Do not typecheck fn twice, otherwise, we will end up pushing
- // fn to Target.Decls multiple times, causing initLSym called twice.
+ // fn to Target.Decls multiple times, causing InitLSym called twice.
// See #30709
if fn.Typecheck() == 1 {
clo.SetType(fn.Type())
return
}
- fn.Nname.SetSym(ClosureName(ir.CurFunc))
- ir.MarkFunc(fn.Nname)
+ // Don't give a name and add to xtop if we are typechecking an inlined
+ // body in ImportedBody(), since we only want to create the named function
+ // when the closure is actually inlined (and then we force a typecheck
+ // explicitly in (*inlsubst).node()).
+ inTypeCheckInl := ir.CurFunc != nil && ir.CurFunc.Body == nil
+ if !inTypeCheckInl {
+ fn.Nname.SetSym(ClosureName(ir.CurFunc))
+ ir.MarkFunc(fn.Nname)
+ }
Func(fn)
clo.SetType(fn.Type())
@@ -338,15 +345,22 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
}
fn.ClosureVars = fn.ClosureVars[:out]
- Target.Decls = append(Target.Decls, fn)
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("New closure func: %s", ir.FuncName(fn))
+ ir.Dump(s, fn)
+ }
+ if !inTypeCheckInl {
+ // Add function to xtop once only when we give it a name
+ Target.Decls = append(Target.Decls, fn)
+ }
}
// type check function definition
// To be called by typecheck, not directly.
-// (Call typecheckFunc instead.)
+// (Call typecheck.Func instead.)
func tcFunc(n *ir.Func) {
if base.EnableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckfunc", n)(nil)
+ defer tracePrint("tcFunc", n)(nil)
}
n.Nname = AssignExpr(n.Nname).(*ir.Name)
@@ -896,7 +910,7 @@ func tcNew(n *ir.UnaryExpr) ir.Node {
// tcPanic typechecks an OPANIC node.
func tcPanic(n *ir.UnaryExpr) ir.Node {
n.X = Expr(n.X)
- n.X = DefaultLit(n.X, types.Types[types.TINTER])
+ n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic")
if n.X.Type() == nil {
n.SetType(nil)
return n
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index 4d48b80346..be4a689836 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -423,9 +423,13 @@ type exportWriter struct {
prevLine int64
prevColumn int64
- // dclIndex maps function-scoped declarations to their index
- // within their respective Func's Dcl list.
- dclIndex map[*ir.Name]int
+ // dclIndex maps function-scoped declarations to an int used to refer to
+ // them later in the function. For local variables/params, the int is
+ // non-negative and in order of the appearance in the Func's Dcl list. For
+ // closure variables, the index is negative starting at -2.
+ dclIndex map[*ir.Name]int
+ maxDclIndex int
+ maxClosureVarIndex int
}
func (p *iexporter) doDecl(n *ir.Name) {
@@ -976,6 +980,9 @@ func (w *exportWriter) funcExt(n *ir.Name) {
w.linkname(n.Sym())
w.symIdx(n.Sym())
+ // TODO remove after register abi is working.
+ w.uint64(uint64(n.Func.Pragma))
+
// Escape analysis.
for _, fs := range &types.RecvsParams {
for _, f := range fs(n.Type()).FieldSlice() {
@@ -1035,14 +1042,19 @@ func (w *exportWriter) typeExt(t *types.Type) {
// Inline bodies.
-func (w *exportWriter) funcBody(fn *ir.Func) {
- w.int64(int64(len(fn.Inl.Dcl)))
- for i, n := range fn.Inl.Dcl {
+func (w *exportWriter) writeNames(dcl []*ir.Name) {
+ w.int64(int64(len(dcl)))
+ for i, n := range dcl {
w.pos(n.Pos())
w.localIdent(n.Sym())
w.typ(n.Type())
- w.dclIndex[n] = i
+ w.dclIndex[n] = w.maxDclIndex + i
}
+ w.maxDclIndex += len(dcl)
+}
+
+func (w *exportWriter) funcBody(fn *ir.Func) {
+ w.writeNames(fn.Inl.Dcl)
w.stmtList(fn.Inl.Body)
}
@@ -1312,8 +1324,30 @@ func (w *exportWriter) expr(n ir.Node) {
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
- // case OCLOSURE:
- // unimplemented - handled by default case
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ w.op(ir.OCLOSURE)
+ w.pos(n.Pos())
+ w.signature(n.Type())
+
+ // Write out id for the Outer of each conditional variable. The
+ // conditional variable itself for this closure will be re-created
+ // during import.
+ w.int64(int64(len(n.Func.ClosureVars)))
+ for i, cv := range n.Func.ClosureVars {
+ w.pos(cv.Pos())
+ w.localName(cv.Outer)
+ // Closure variable (which will be re-created during
+ // import) is given via a negative id, starting at -2,
+ // which is used to refer to it later in the function
+ // during export. -1 represents blanks.
+ w.dclIndex[cv] = -(i + 2) - w.maxClosureVarIndex
+ }
+ w.maxClosureVarIndex += len(n.Func.ClosureVars)
+
+ // like w.funcBody(n.Func), but not for .Inl
+ w.writeNames(n.Func.Dcl)
+ w.stmtList(n.Func.Body)
// case OCOMPLIT:
// should have been resolved by typechecking - handled by default case
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
index c9effabce0..f2682257f3 100644
--- a/src/cmd/compile/internal/typecheck/iimport.go
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -37,7 +37,7 @@ var (
// and offset where that identifier's declaration can be read.
DeclImporter = map[*types.Sym]iimporterAndOffset{}
- // inlineImporter is like declImporter, but for inline bodies
+ // inlineImporter is like DeclImporter, but for inline bodies
// for function and method symbols.
inlineImporter = map[*types.Sym]iimporterAndOffset{}
)
@@ -265,6 +265,9 @@ type importReader struct {
// curfn is the current function we're importing into.
curfn *ir.Func
+ // Slice of all dcls for function, including any interior closures
+ allDcls []*ir.Name
+ allClosureVars []*ir.Name
}
func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader {
@@ -334,7 +337,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
recv := r.param()
mtyp := r.signature(recv)
- // methodSym already marked m.Sym as a function.
+ // MethodSym already marked m.Sym as a function.
m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym))
m.Class = ir.PFUNC
m.SetType(mtyp)
@@ -647,6 +650,9 @@ func (r *importReader) funcExt(n *ir.Name) {
r.linkname(n.Sym())
r.symIdx(n.Sym())
+ // TODO remove after register abi is working
+ n.SetPragma(ir.PragmaFlag(r.uint64()))
+
// Escape analysis.
for _, fs := range &types.RecvsParams {
for _, f := range fs(n.Type()).FieldSlice() {
@@ -718,6 +724,7 @@ func (r *importReader) doInline(fn *ir.Func) {
base.Fatalf("%v already has inline body", fn)
}
+ //fmt.Printf("Importing %v\n", n)
r.funcBody(fn)
importlist = append(importlist, fn)
@@ -751,6 +758,24 @@ func (r *importReader) funcBody(fn *ir.Func) {
r.curfn = fn
// Import local declarations.
+ fn.Inl.Dcl = r.readFuncDcls(fn)
+
+ // Import function body.
+ body := r.stmtList()
+ if body == nil {
+ // Make sure empty body is not interpreted as
+ // no inlineable body (see also parser.fnbody)
+ // (not doing so can cause significant performance
+ // degradation due to unnecessary calls to empty
+ // functions).
+ body = []ir.Node{}
+ }
+ fn.Inl.Body = body
+
+ r.curfn = outerfn
+}
+
+func (r *importReader) readNames(fn *ir.Func) []*ir.Name {
dcls := make([]*ir.Name, r.int64())
for i := range dcls {
n := ir.NewDeclNameAt(r.pos(), ir.ONAME, r.localIdent())
@@ -759,7 +784,12 @@ func (r *importReader) funcBody(fn *ir.Func) {
n.SetType(r.typ())
dcls[i] = n
}
- fn.Inl.Dcl = dcls
+ r.allDcls = append(r.allDcls, dcls...)
+ return dcls
+}
+
+func (r *importReader) readFuncDcls(fn *ir.Func) []*ir.Name {
+ dcls := r.readNames(fn)
// Fixup parameter classes and associate with their
// signature's type fields.
@@ -784,28 +814,18 @@ func (r *importReader) funcBody(fn *ir.Func) {
for _, f := range typ.Results().FieldSlice() {
fix(f, ir.PPARAMOUT)
}
-
- // Import function body.
- body := r.stmtList()
- if body == nil {
- // Make sure empty body is not interpreted as
- // no inlineable body (see also parser.fnbody)
- // (not doing so can cause significant performance
- // degradation due to unnecessary calls to empty
- // functions).
- body = []ir.Node{}
- }
- fn.Inl.Body = body
-
- r.curfn = outerfn
+ return dcls
}
func (r *importReader) localName() *ir.Name {
i := r.int64()
- if i < 0 {
+ if i == -1 {
return ir.BlankNode.(*ir.Name)
}
- return r.curfn.Inl.Dcl[i]
+ if i < 0 {
+ return r.allClosureVars[-i-2]
+ }
+ return r.allDcls[i]
}
func (r *importReader) stmtList() []ir.Node {
@@ -921,8 +941,38 @@ func (r *importReader) node() ir.Node {
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// unreachable - should have been resolved by typechecking
- // case OCLOSURE:
- // unimplemented
+ case ir.OCLOSURE:
+ //println("Importing CLOSURE")
+ pos := r.pos()
+ typ := r.signature(nil)
+
+ // All the remaining code below is similar to (*noder).funcLit(), but
+ // with Dcls and ClosureVars lists already set up
+ fn := ir.NewFunc(pos)
+ fn.SetIsHiddenClosure(true)
+ fn.Nname = ir.NewNameAt(pos, ir.BlankNode.Sym())
+ fn.Nname.Func = fn
+ fn.Nname.Ntype = ir.TypeNode(typ)
+ fn.Nname.Defn = fn
+ fn.Nname.SetType(typ)
+
+ cvars := make([]*ir.Name, r.int64())
+ for i := range cvars {
+ cvars[i] = ir.CaptureName(r.pos(), fn, r.localName().Canonical())
+ }
+ fn.ClosureVars = cvars
+ r.allClosureVars = append(r.allClosureVars, cvars...)
+
+ fn.Dcl = r.readFuncDcls(fn)
+ body := r.stmtList()
+ ir.FinishCaptureNames(pos, r.curfn, fn)
+
+ clo := ir.NewClosureExpr(pos, fn)
+ fn.OClosure = clo
+
+ fn.Body = body
+
+ return clo
// case OPTRLIT:
// unreachable - mapped to case OADDR below by exporter
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index 8baa5dda78..14ed175be9 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -25,7 +25,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) {
}
t := RangeExprType(n.X.Type())
- // delicate little dance. see typecheckas2
+ // delicate little dance. see tcAssignList
if n.Key != nil && !ir.DeclaredBy(n.Key, n) {
n.Key = AssignExpr(n.Key)
}
@@ -90,7 +90,7 @@ func typecheckrangeExpr(n *ir.RangeStmt) {
// fill in the var's type.
func tcAssign(n *ir.AssignStmt) {
if base.EnableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckas", n)(nil)
+ defer tracePrint("tcAssign", n)(nil)
}
if n.Y == nil {
@@ -110,7 +110,7 @@ func tcAssign(n *ir.AssignStmt) {
func tcAssignList(n *ir.AssignListStmt) {
if base.EnableTrace && base.Flag.LowerT {
- defer tracePrint("typecheckas2", n)(nil)
+ defer tracePrint("tcAssignList", n)(nil)
}
assign(n, n.Lhs, n.Rhs)
@@ -119,7 +119,7 @@ func tcAssignList(n *ir.AssignListStmt) {
func assign(stmt ir.Node, lhs, rhs []ir.Node) {
// delicate little dance.
// the definition of lhs may refer to this assignment
- // as its definition, in which case it will call typecheckas.
+ // as its definition, in which case it will call tcAssign.
// in that case, do not call typecheck back, or it will cycle.
// if the variable has a type (ntype) then typechecking
// will not look at defn, so it is okay (and desirable,
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index a640d105d1..b88a9f2283 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -81,7 +81,7 @@ func markAddrOf(n ir.Node) ir.Node {
// main typecheck has completed.
// The argument to OADDR needs to be typechecked because &x[i] takes
// the address of x if x is an array, but not if x is a slice.
- // Note: outervalue doesn't work correctly until n is typechecked.
+ // Note: OuterValue doesn't work correctly until n is typechecked.
n = typecheck(n, ctxExpr)
if x := ir.OuterValue(n); x.Op() == ir.ONAME {
x.Name().SetAddrtaken(true)
@@ -367,10 +367,10 @@ func assignop(src, dst *types.Type) (ir.Op, string) {
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
- // Call itabname so that (src, dst)
+ // Call NeedITab/ITabAddr so that (src, dst)
// gets added to itabs early, which allows
// us to de-virtualize calls through this
- // type/interface pair later. See peekitabs in reflect.go
+ // type/interface pair later. See CompileITabs in reflect.go
if types.IsDirectIface(src) && !dst.IsEmptyInterface() {
NeedITab(src, dst)
}
@@ -440,7 +440,7 @@ func assignop(src, dst *types.Type) (ir.Op, string) {
}
}
- // 6. rule about untyped constants - already converted by defaultlit.
+ // 6. rule about untyped constants - already converted by DefaultLit.
// 7. Any typed value can be assigned to the blank identifier.
if dst.Kind() == types.TBLANK {
@@ -834,7 +834,7 @@ func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool)
var slist []symlink
// Code to help generate trampoline functions for methods on embedded
-// types. These are approx the same as the corresponding adddot
+// types. These are approx the same as the corresponding AddImplicitDots
// routines except that they expect to be called with unique tasks and
// they return the actual methods.
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
index 28db40db91..202a932e6c 100644
--- a/src/cmd/compile/internal/typecheck/syms.go
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -15,7 +15,7 @@ import (
func LookupRuntime(name string) *ir.Name {
s := ir.Pkgs.Runtime.Lookup(name)
if s == nil || s.Def == nil {
- base.Fatalf("syslook: can't find runtime.%s", name)
+ base.Fatalf("LookupRuntime: can't find runtime.%s", name)
}
return ir.AsNode(s.Def).(*ir.Name)
}
@@ -33,7 +33,7 @@ func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
n.Class = old.Class
n.SetType(types.SubstAny(old.Type(), &types_))
if len(types_) > 0 {
- base.Fatalf("substArgTypes: too many argument types")
+ base.Fatalf("SubstArgTypes: too many argument types")
}
return n
}
@@ -86,14 +86,17 @@ func InitRuntime() {
// LookupRuntimeFunc looks up Go function name in package runtime. This function
// must follow the internal calling convention.
func LookupRuntimeFunc(name string) *obj.LSym {
- s := ir.Pkgs.Runtime.Lookup(name)
- s.SetFunc(true)
- return s.Linksym()
+ return LookupRuntimeABI(name, obj.ABIInternal)
}
// LookupRuntimeVar looks up a variable (or assembly function) name in package
// runtime. If this is a function, it may have a special calling
// convention.
func LookupRuntimeVar(name string) *obj.LSym {
- return ir.Pkgs.Runtime.Lookup(name).Linksym()
+ return LookupRuntimeABI(name, obj.ABI0)
+}
+
+// LookupRuntimeABI looks up a name in package runtime using the given ABI.
+func LookupRuntimeABI(name string, abi obj.ABI) *obj.LSym {
+ return base.PkgLinksym("runtime", name, abi)
}
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index d5100021a2..cb434578dd 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -456,7 +456,7 @@ func typecheck(n ir.Node, top int) (res ir.Node) {
}
// indexlit implements typechecking of untyped values as
-// array/slice indexes. It is almost equivalent to defaultlit
+// array/slice indexes. It is almost equivalent to DefaultLit
// but also accepts untyped numeric values representable as
// value of type int (see also checkmake for comparison).
// The result of indexlit MUST be assigned back to n, e.g.
@@ -521,7 +521,7 @@ func typecheck1(n ir.Node, top int) ir.Node {
}
return n
- case ir.ONAMEOFFSET:
+ case ir.OLINKSYMOFFSET:
// type already set
return n
@@ -857,8 +857,8 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.ReturnStmt)
return tcReturn(n)
- case ir.ORETJMP:
- n := n.(*ir.BranchStmt)
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
return n
case ir.OSELECT:
@@ -938,7 +938,7 @@ func typecheckargs(n ir.InitNode) {
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
- // temporary variables with initTodo for now, and init.go
+ // temporary variables with InitTodoFunc for now, and init.go
// will reassociate them later when it's appropriate.
static := ir.CurFunc == nil
if static {
@@ -1890,7 +1890,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool {
return false
}
- // Do range checks for constants before defaultlit
+ // Do range checks for constants before DefaultLit
// to avoid redundant "constant NNN overflows int" errors.
if n.Op() == ir.OLITERAL {
v := toint(n.Val())
@@ -1904,7 +1904,7 @@ func checkmake(t *types.Type, arg string, np *ir.Node) bool {
}
}
- // defaultlit is necessary for non-constants too: n might be 1.1<<k.
+ // DefaultLit is necessary for non-constants too: n might be 1.1<<k.
// TODO(gri) The length argument requirements for (array/slice) make
// are the same as for index expressions. Factor the code better;
// for instance, indexlit might be called here and incorporate some
@@ -2023,7 +2023,7 @@ func isTermNode(n ir.Node) bool {
n := n.(*ir.BlockStmt)
return isTermNodes(n.List)
- case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL:
+ case ir.OGOTO, ir.ORETURN, ir.OTAILCALL, ir.OPANIC, ir.OFALL:
return true
case ir.OFOR, ir.OFORUNTIL:
diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go
index 6091ee249c..2c2700f345 100644
--- a/src/cmd/compile/internal/types/alg.go
+++ b/src/cmd/compile/internal/types/alg.go
@@ -132,7 +132,7 @@ func AlgType(t *Type) (AlgKind, *Type) {
return ret, nil
}
- base.Fatalf("algtype: unexpected type %v", t)
+ base.Fatalf("AlgType: unexpected type %v", t)
return 0, nil
}
@@ -163,7 +163,7 @@ func IncomparableField(t *Type) *Field {
// by padding.
func IsPaddedField(t *Type, i int) bool {
if !t.IsStruct() {
- base.Fatalf("ispaddedfield called non-struct %v", t)
+ base.Fatalf("IsPaddedField called non-struct %v", t)
}
end := t.Width
if i+1 < t.NumFields() {
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index cd0679f6b9..da224d4019 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -44,7 +44,7 @@ func OrigSym(s *Sym) *Sym {
}
if strings.HasPrefix(s.Name, ".anon") {
- // originally an unnamed or _ name (see subr.go: structargs)
+ // originally an unnamed or _ name (see subr.go: NewFuncParams)
return nil
}
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index a54c086ded..98540eefb6 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -58,7 +58,7 @@ func typePos(t *Type) src.XPos {
var MaxWidth int64
// CalcSizeDisabled indicates whether it is safe
-// to calculate Types' widths and alignments. See dowidth.
+// to calculate Types' widths and alignments. See CalcSize.
var CalcSizeDisabled bool
// machine size and rounding alignment is dictated around
@@ -135,7 +135,7 @@ func expandiface(t *Type) {
m.Offset = int64(i) * int64(PtrSize)
}
- // Access fields directly to avoid recursively calling dowidth
+ // Access fields directly to avoid recursively calling CalcSize
// within Type.Fields().
t.Extra.(*Interface).Fields.Set(methods)
}
@@ -164,7 +164,7 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
f.Offset = o
if f.Nname != nil {
// addrescapes has similar code to update these offsets.
- // Usually addrescapes runs after widstruct,
+ // Usually addrescapes runs after calcStructOffset,
// in which case we could drop this,
// but function closure functions are the exception.
// NOTE(rsc): This comment may be stale.
@@ -306,17 +306,16 @@ func reportTypeLoop(t *Type) {
}
// CalcSize calculates and stores the size and alignment for t.
-// If sizeCalculationDisabled is set, and the size/alignment
+// If CalcSizeDisabled is set, and the size/alignment
// have not already been calculated, it calls Fatal.
// This is used to prevent data races in the back end.
func CalcSize(t *Type) {
- // Calling dowidth when typecheck tracing enabled is not safe.
+ // Calling CalcSize when typecheck tracing enabled is not safe.
// See issue #33658.
if base.EnableTrace && SkipSizeForTracing {
return
}
if PtrSize == 0 {
-
// Assume this is a test.
return
}
@@ -351,7 +350,7 @@ func CalcSize(t *Type) {
return
}
- // defer checkwidth calls until after we're done
+ // defer CheckSize calls until after we're done
DeferCheckSize()
lno := base.Pos
@@ -367,7 +366,7 @@ func CalcSize(t *Type) {
case TFUNC, TCHAN, TMAP, TSTRING:
break
- // simtype == 0 during bootstrap
+ // SimType == 0 during bootstrap
default:
if SimType[t.Kind()] != 0 {
et = SimType[t.Kind()]
@@ -377,7 +376,7 @@ func CalcSize(t *Type) {
var w int64
switch et {
default:
- base.Fatalf("dowidth: unknown type: %v", t)
+ base.Fatalf("CalcSize: unknown type: %v", t)
// compiler-specific stuff
case TINT8, TUINT8, TBOOL:
@@ -443,11 +442,11 @@ func CalcSize(t *Type) {
case TANY:
// not a real type; should be replaced before use.
- base.Fatalf("dowidth any")
+ base.Fatalf("CalcSize any")
case TSTRING:
if StringSize == 0 {
- base.Fatalf("early dowidth string")
+ base.Fatalf("early CalcSize string")
}
w = StringSize
t.Align = uint8(PtrSize)
@@ -477,7 +476,7 @@ func CalcSize(t *Type) {
case TSTRUCT:
if t.IsFuncArgStruct() {
- base.Fatalf("dowidth fn struct %v", t)
+ base.Fatalf("CalcSize fn struct %v", t)
}
w = calcStructOffset(t, t, 0, 1)
@@ -526,7 +525,7 @@ func CalcStructSize(s *Type) {
s.Width = calcStructOffset(s, s, 0, 1) // sets align
}
-// when a type's width should be known, we call checkwidth
+// when a type's width should be known, we call CheckSize
// to compute it. during a declaration like
//
// type T *struct { next T }
@@ -535,11 +534,11 @@ func CalcStructSize(s *Type) {
// until after T has been initialized to be a pointer to that struct.
// similarly, during import processing structs may be used
// before their definition. in those situations, calling
-// defercheckwidth() stops width calculations until
-// resumecheckwidth() is called, at which point all the
-// checkwidths that were deferred are executed.
-// dowidth should only be called when the type's size
-// is needed immediately. checkwidth makes sure the
+// DeferCheckSize() stops width calculations until
+// ResumeCheckSize() is called, at which point all the
+// CalcSizes that were deferred are executed.
+// CalcSize should only be called when the type's size
+// is needed immediately. CheckSize makes sure the
// size is evaluated eventually.
var deferredTypeStack []*Type
@@ -552,7 +551,7 @@ func CheckSize(t *Type) {
// function arg structs should not be checked
// outside of the enclosing function.
if t.IsFuncArgStruct() {
- base.Fatalf("checkwidth %v", t)
+ base.Fatalf("CheckSize %v", t)
}
if defercalc == 0 {
@@ -606,7 +605,7 @@ func PtrDataSize(t *Type) int64 {
case TINTER:
// struct { Itab *tab; void *data; } or
// struct { Type *type; void *data; }
- // Note: see comment in plive.go:onebitwalktype1.
+ // Note: see comment in typebits.Set
return 2 * int64(PtrSize)
case TSLICE:
@@ -628,7 +627,7 @@ func PtrDataSize(t *Type) int64 {
return lastPtrField.Offset + PtrDataSize(lastPtrField.Type)
default:
- base.Fatalf("typeptrdata: unexpected type, %v", t)
+ base.Fatalf("PtrDataSize: unexpected type, %v", t)
return 0
}
}
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
index 2914e2ed3f..0e66ed348b 100644
--- a/src/cmd/compile/internal/types/sym.go
+++ b/src/cmd/compile/internal/types/sym.go
@@ -64,53 +64,30 @@ func (sym *Sym) IsBlank() bool {
return sym != nil && sym.Name == "_"
}
-func (sym *Sym) LinksymName() string {
- if sym.IsBlank() {
- return "_"
- }
- if sym.Linkname != "" {
- return sym.Linkname
- }
- return sym.Pkg.Prefix + "." + sym.Name
-}
-
// Deprecated: This method should not be used directly. Instead, use a
// higher-level abstraction that directly returns the linker symbol
// for a named object. For example, reflectdata.TypeLinksym(t) instead
// of reflectdata.TypeSym(t).Linksym().
func (sym *Sym) Linksym() *obj.LSym {
- if sym == nil {
- return nil
- }
- initPkg := func(r *obj.LSym) {
- if sym.Linkname != "" {
- r.Pkg = "_"
- } else {
- r.Pkg = sym.Pkg.Prefix
- }
- }
+ abi := obj.ABI0
if sym.Func() {
- // This is a function symbol. Mark it as "internal ABI".
- return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg)
+ abi = obj.ABIInternal
}
- return base.Ctxt.LookupInit(sym.LinksymName(), initPkg)
+ return sym.LinksymABI(abi)
}
-// LinksymABI0 looks up or creates an ABI0 linker symbol for "sym",
-// in cases where we want to specifically select the ABI0 version of
-// a symbol (typically used only for ABI wrappers).
-func (sym *Sym) LinksymABI0() *obj.LSym {
+// Deprecated: This method should not be used directly. Instead, use a
+// higher-level abstraction that directly returns the linker symbol
+// for a named object. For example, (*ir.Name).LinksymABI(abi) instead
+// of (*ir.Name).Sym().LinksymABI(abi).
+func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym {
if sym == nil {
- return nil
+ base.Fatalf("nil symbol")
}
- initPkg := func(r *obj.LSym) {
- if sym.Linkname != "" {
- r.Pkg = "_"
- } else {
- r.Pkg = sym.Pkg.Prefix
- }
+ if sym.Linkname != "" {
+ return base.Linkname(sym.Linkname, abi)
}
- return base.Ctxt.LookupABIInit(sym.LinksymName(), obj.ABI0, initPkg)
+ return base.PkgLinksym(sym.Pkg.Prefix, sym.Name, abi)
}
// Less reports whether symbol a is ordered before symbol b.
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 5176b96c02..0dfbef8af1 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -107,7 +107,7 @@ const (
// Types stores pointers to predeclared named types.
//
// It also stores pointers to several special types:
-// - Types[TANY] is the placeholder "any" type recognized by substArgTypes.
+// - Types[TANY] is the placeholder "any" type recognized by SubstArgTypes.
// - Types[TBLANK] represents the blank variable's type.
// - Types[TNIL] represents the predeclared "nil" value's type.
// - Types[TUNSAFEPTR] is package unsafe's Pointer type.
@@ -643,7 +643,7 @@ func SubstAny(t *Type, types *[]*Type) *Type {
case TANY:
if len(*types) == 0 {
- base.Fatalf("substArgTypes: not enough argument types")
+ base.Fatalf("SubstArgTypes: not enough argument types")
}
t = (*types)[0]
*types = (*types)[1:]
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
index ae573a4ec8..ffd423be27 100644
--- a/src/cmd/compile/internal/types2/stdlib_test.go
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -162,6 +162,8 @@ func TestStdTest(t *testing.T) {
testTestDir(t, filepath.Join(runtime.GOROOT(), "test"),
"cmplxdivide.go", // also needs file cmplxdivide1.go - ignore
"directive.go", // tests compiler rejection of bad directive placement - ignore
+ "embedfunc.go", // tests //go:embed
+ "embedvers.go", // tests //go:embed
"linkname2.go", // types2 doesn't check validity of //go:xxx directives
)
}
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
index 3fe810ac4e..6e8075a35f 100644
--- a/src/cmd/compile/internal/walk/assign.go
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -248,18 +248,6 @@ func walkReturn(n *ir.ReturnStmt) ir.Node {
return n
}
-// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l ir.Node, rt *types.Type) bool {
- if l.HasCall() || l.Op() == ir.OINDEXMAP {
- return true
- }
- if types.Identical(l.Type(), rt) {
- return false
- }
- // There might be a conversion required, which might involve a runtime call.
- return true
-}
-
// check assign type list to
// an expression list. called in
// expr-list = func()
@@ -268,21 +256,17 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields())
}
- var nn, mm ir.Nodes
+ var nn ir.Nodes
for i, l := range nl {
if ir.IsBlank(l) {
continue
}
r := nr.Field(i)
- // Any assignment to an lvalue that might cause a function call must be
- // deferred until all the returned values have been read.
- if fncall(l, r.Type) {
- tmp := ir.Node(typecheck.Temp(r.Type))
- tmp = typecheck.Expr(tmp)
- a := convas(ir.NewAssignStmt(base.Pos, l, tmp), &mm)
- mm.Append(a)
- l = tmp
+ // Order should have created autotemps of the appropriate type for
+ // us to store results into.
+ if tmp, ok := l.(*ir.Name); !ok || !tmp.AutoTemp() || !types.Identical(tmp.Type(), r.Type) {
+ base.FatalfAt(l.Pos(), "assigning %v to %+v", r.Type, l)
}
res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH)
@@ -290,16 +274,9 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
res.SetType(r.Type)
res.SetTypecheck(1)
- a := convas(ir.NewAssignStmt(base.Pos, l, res), &nn)
- updateHasCall(a)
- if a.HasCall() {
- ir.Dump("ascompatet ucount", a)
- base.Fatalf("ascompatet: too many function calls evaluating parameters")
- }
-
- nn.Append(a)
+ nn.Append(ir.NewAssignStmt(base.Pos, l, res))
}
- return append(nn, mm...)
+ return nn
}
// check assign expression list to
@@ -392,11 +369,7 @@ func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late))
- if name == nil || name.Addrtaken() || name.Class == ir.PEXTERN || name.Class == ir.PAUTOHEAP {
- memWrite = true
- continue
- }
- if ir.IsBlank(name) {
+ if name != nil && ir.IsBlank(name) {
// We can ignore assignments to blank.
continue
}
@@ -405,7 +378,12 @@ func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
// parameters. These can't appear in expressions anyway.
continue
}
- assigned.Add(name)
+
+ if name != nil && name.OnStack() && !name.Addrtaken() {
+ assigned.Add(name)
+ } else {
+ memWrite = true
+ }
}
early.Append(late.Take()...)
@@ -418,7 +396,10 @@ func readsMemory(n ir.Node) bool {
switch n.Op() {
case ir.ONAME:
n := n.(*ir.Name)
- return n.Class == ir.PEXTERN || n.Class == ir.PAUTOHEAP || n.Addrtaken()
+ if n.Class == ir.PFUNC {
+ return false
+ }
+ return n.Addrtaken() || !n.OnStack()
case ir.OADD,
ir.OAND,
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index a061181e2f..97f9de9c1d 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -48,10 +48,10 @@ func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
nsrc := n.Args[0]
- // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // walkExprListSafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
- // Using cheapexpr also makes sure that the evaluation
+ // Using cheapExpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
ls := n.Args[1:]
@@ -277,10 +277,8 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// Allocate hmap on stack.
// var hv hmap
- hv := typecheck.Temp(hmapType)
- init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, hv, nil)))
// h = &hv
- h = typecheck.NodAddr(hv)
+ h = stackTempAddr(init, hmapType)
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
@@ -303,11 +301,8 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
nif.Likely = true
// var bv bmap
- bv := typecheck.Temp(reflectdata.MapBucketType(t))
- nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil))
-
// b = &bv
- b := typecheck.NodAddr(bv)
+ b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
@@ -388,7 +383,7 @@ func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// n = arr[:l]
i := typecheck.IndexConst(r)
if i < 0 {
- base.Fatalf("walkexpr: invalid index %v", r)
+ base.Fatalf("walkExpr: invalid index %v", r)
}
// cap is constrained to [0,2^31) or [0,2^63) depending on whether
@@ -501,18 +496,19 @@ func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// walkNew walks an ONEW node.
func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
- if n.Type().Elem().NotInHeap() {
+ t := n.Type().Elem()
+ if t.NotInHeap() {
base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
}
if n.Esc() == ir.EscNone {
- if n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
+ if t.Size() >= ir.MaxImplicitStackVarSize {
base.Fatalf("large ONEW with EscNone: %v", n)
}
- r := typecheck.Temp(n.Type().Elem())
- init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, nil))) // zero temp
- return typecheck.Expr(typecheck.NodAddr(r))
+ return stackTempAddr(init, t)
}
- return callnew(n.Type().Elem())
+ types.CalcSize(t)
+ n.MarkNonNil()
+ return n
}
// generate code for print
@@ -678,15 +674,6 @@ func badtype(op ir.Op, tl, tr *types.Type) {
base.Errorf("illegal types for operand: %v%s", op, s)
}
-func callnew(t *types.Type) ir.Node {
- types.CalcSize(t)
- n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t))
- n.SetType(types.NewPtr(t))
- n.SetTypecheck(1)
- n.MarkNonNil()
- return n
-}
-
func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
fn := typecheck.LookupRuntime(name)
fn = typecheck.SubstArgTypes(fn, l, r)
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
index 694aa99940..1d1cbc2054 100644
--- a/src/cmd/compile/internal/walk/closure.go
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -159,7 +159,7 @@ func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
//
// clos = &struct{F uintptr; R T}{T.M·f, x}
//
- // Like walkclosure above.
+ // Like walkClosure above.
if n.X.Type().IsInterface() {
// Trigger panic for method on nil interface now.
diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go
index a4ea31bf55..7c385c0e0d 100644
--- a/src/cmd/compile/internal/walk/compare.go
+++ b/src/cmd/compile/internal/walk/compare.go
@@ -418,7 +418,7 @@ func eqFor(t *types.Type) (n ir.Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
- // is handled by walkcompare.
+ // is handled by walkCompare.
switch a, _ := types.AlgType(t); a {
case types.AMEM:
n := typecheck.LookupRuntime("memequal")
@@ -436,7 +436,7 @@ func eqFor(t *types.Type) (n ir.Node, needsize bool) {
}))
return n, false
}
- base.Fatalf("eqfor %v", t)
+ base.Fatalf("eqFor %v", t)
return nil, false
}
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
index 8a77bba2ad..73442dc404 100644
--- a/src/cmd/compile/internal/walk/complit.go
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -64,11 +64,11 @@ func readonlystaticname(t *types.Type) *ir.Name {
}
func isSimpleName(nn ir.Node) bool {
- if nn.Op() != ir.ONAME {
+ if nn.Op() != ir.ONAME || ir.IsBlank(nn) {
return false
}
n := nn.(*ir.Name)
- return n.Class != ir.PAUTOHEAP && n.Class != ir.PEXTERN
+ return n.OnStack()
}
func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
@@ -297,7 +297,7 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
if !ok || name.Class != ir.PEXTERN {
base.Fatalf("slicelit: %v", var_)
}
- staticdata.InitSlice(name, offset, vstat, t.NumElem())
+ staticdata.InitSlice(name, offset, vstat.Linksym(), t.NumElem())
return
}
@@ -344,37 +344,18 @@ func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes)
if !types.Identical(t, x.Type()) {
panic("dotdotdot base type does not match order's assigned type")
}
-
- if vstat == nil {
- a = ir.NewAssignStmt(base.Pos, x, nil)
- a = typecheck.Stmt(a)
- init.Append(a) // zero new temp
- } else {
- // Declare that we're about to initialize all of x.
- // (Which happens at the *vauto = vstat below.)
- init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, x))
- }
-
- a = typecheck.NodAddr(x)
+ a = initStackTemp(init, x, vstat)
} else if n.Esc() == ir.EscNone {
- a = typecheck.Temp(t)
- if vstat == nil {
- a = ir.NewAssignStmt(base.Pos, typecheck.Temp(t), nil)
- a = typecheck.Stmt(a)
- init.Append(a) // zero new temp
- a = a.(*ir.AssignStmt).X
- } else {
- init.Append(ir.NewUnaryExpr(base.Pos, ir.OVARDEF, a))
- }
-
- a = typecheck.NodAddr(a)
+ a = initStackTemp(init, typecheck.Temp(t), vstat)
} else {
a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
}
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a))
- if vstat != nil {
- // copy static to heap (4)
+ if vstat != nil && n.Prealloc == nil && n.Esc() != ir.EscNone {
+ // If we allocated on the heap with ONEW, copy the static to the
+ // heap (4). We skip this for stack temporaries, because
+ // initStackTemp already handled the copy.
a = ir.NewStarExpr(base.Pos, vauto)
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat))
}
@@ -550,9 +531,8 @@ func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
var r ir.Node
if n.Prealloc != nil {
- // n.Right is stack temporary used as backing store.
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n.Prealloc, nil)) // zero backing store, just in case (#18410)
- r = typecheck.NodAddr(n.Prealloc)
+ // n.Prealloc is stack temporary used as backing store.
+ r = initStackTemp(init, n.Prealloc, nil)
} else {
r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
r.SetEsc(n.Esc())
@@ -667,7 +647,7 @@ func genAsStatic(as *ir.AssignStmt) {
return
case ir.OMETHEXPR:
r := r.(*ir.SelectorExpr)
- staticdata.InitFunc(name, offset, r.FuncName())
+ staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r.FuncName()))
return
case ir.ONAME:
r := r.(*ir.Name)
@@ -675,7 +655,7 @@ func genAsStatic(as *ir.AssignStmt) {
base.Fatalf("genAsStatic %+v", as)
}
if r.Class == ir.PFUNC {
- staticdata.InitFunc(name, offset, r)
+ staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r))
return
}
}
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
index 85459fd92f..fa8e2c0bb8 100644
--- a/src/cmd/compile/internal/walk/convert.go
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -66,17 +66,6 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
return l
}
- if ir.Names.Staticuint64s == nil {
- ir.Names.Staticuint64s = typecheck.NewName(ir.Pkgs.Runtime.Lookup("staticuint64s"))
- ir.Names.Staticuint64s.Class = ir.PEXTERN
- // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
- // individual bytes.
- ir.Names.Staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
- ir.Names.Zerobase = typecheck.NewName(ir.Pkgs.Runtime.Lookup("zerobase"))
- ir.Names.Zerobase.Class = ir.PEXTERN
- ir.Names.Zerobase.SetType(types.Types[types.TUINTPTR])
- }
-
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
@@ -85,7 +74,7 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
cheapExpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246.
- value = ir.Names.Zerobase
+ value = ir.NewLinksymExpr(base.Pos, ir.Syms.Zerobase, types.Types[types.TUINTPTR])
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
// and staticuint64s[n.Left * 8 + 7] on big-endian.
@@ -95,7 +84,10 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
}
- xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index)
+ // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+ // individual bytes.
+ staticuint64s := ir.NewLinksymExpr(base.Pos, ir.Syms.Staticuint64s, types.NewArray(types.Types[types.TUINT8], 256*8))
+ xe := ir.NewIndexExpr(base.Pos, staticuint64s, index)
xe.SetBounded(true)
value = xe
case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PEXTERN && n.X.(*ir.Name).Readonly():
@@ -198,8 +190,7 @@ func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
// Create temporary buffer for string on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- a = typecheck.NodAddr(typecheck.Temp(t))
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
}
if n.Op() == ir.ORUNES2STR {
// slicerunetostring(*[32]byte, []rune) string
@@ -229,8 +220,7 @@ func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
- t := types.NewArray(types.Types[types.TUINT8], 4)
- a = typecheck.NodAddr(typecheck.Temp(t))
+ a = stackBufAddr(4, types.Types[types.TUINT8])
}
// intstring(*[4]byte, rune)
return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
@@ -246,9 +236,13 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
var a ir.Node
if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
- a = typecheck.NodAddr(typecheck.Temp(t))
+ a = stackBufAddr(t.NumElem(), t.Elem())
} else {
- a = callnew(t)
+ types.CalcSize(t)
+ a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil)
+ a.SetType(types.NewPtr(t))
+ a.SetTypecheck(1)
+ a.MarkNonNil()
}
p := typecheck.Temp(t.PtrTo()) // *[n]byte
init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
@@ -269,8 +263,7 @@ func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- a = typecheck.NodAddr(typecheck.Temp(t))
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
}
// stringtoslicebyte(*32[byte], string) []byte
return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
@@ -294,8 +287,7 @@ func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
a := typecheck.NodNil()
if n.Esc() == ir.EscNone {
// Create temporary buffer for slice on stack.
- t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
- a = typecheck.NodAddr(typecheck.Temp(t))
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TINT32])
}
// stringtoslicerune(*[32]rune, string) []rune
return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
@@ -438,8 +430,8 @@ func walkCheckPtrAlignment(n *ir.ConvExpr, init *ir.Nodes, count ir.Node) ir.Nod
}
func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
- // Calling cheapexpr(n, init) below leads to a recursive call to
- // walkexpr, which leads us back here again. Use n.Checkptr to
+ // Calling cheapExpr(n, init) below leads to a recursive call to
+ // walkExpr, which leads us back here again. Use n.Checkptr to
// prevent infinite loops.
if n.CheckPtr() {
return n
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index 893a95f403..d7a20206c8 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -30,7 +30,7 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
// not okay to use n->ninit when walking n,
// because we might replace n with some other node
// and would lose the init list.
- base.Fatalf("walkexpr init == &n->ninit")
+ base.Fatalf("walkExpr init == &n->ninit")
}
if len(n.Init()) != 0 {
@@ -67,8 +67,6 @@ func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
_ = staticdata.StringSym(n.Pos(), constant.StringVal(n.Val()))
}
- updateHasCall(n)
-
if base.Flag.LowerW != 0 && n != nil {
ir.Dump("after walk expr", n)
}
@@ -81,17 +79,17 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
switch n.Op() {
default:
ir.Dump("walk", n)
- base.Fatalf("walkexpr: switch 1 unknown op %+v", n.Op())
+ base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op())
panic("unreachable")
- case ir.ONONAME, ir.OGETG, ir.ONEWOBJ:
+ case ir.ONONAME, ir.OGETG:
return n
- case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
+ case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
// TODO(mdempsky): Just return n; see discussion on CL 38655.
// Perhaps refactor to use Node.mayBeShared for these instead.
// If these return early, make sure to still call
- // stringsym for constant strings.
+ // StringSym for constant strings.
return n
case ir.OMETHEXPR:
@@ -221,7 +219,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
return walkIndexMap(n, init)
case ir.ORECV:
- base.Fatalf("walkexpr ORECV") // should see inside OAS only
+ base.Fatalf("walkExpr ORECV") // should see inside OAS only
panic("unreachable")
case ir.OSLICEHEADER:
@@ -359,7 +357,7 @@ func safeExpr(n ir.Node, init *ir.Nodes) ir.Node {
}
switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.ONAMEOFFSET:
+ case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
return n
case ir.OLEN, ir.OCAP:
@@ -413,7 +411,7 @@ func safeExpr(n ir.Node, init *ir.Nodes) ir.Node {
// make a copy; must not be used as an lvalue
if ir.IsAddressable(n) {
- base.Fatalf("missing lvalue case in safeexpr: %v", n)
+ base.Fatalf("missing lvalue case in safeExpr: %v", n)
}
return cheapExpr(n, init)
}
@@ -428,7 +426,7 @@ func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
c := len(n.List)
if c < 2 {
- base.Fatalf("addstr count %d too small", c)
+ base.Fatalf("walkAddString count %d too small", c)
}
buf := typecheck.NodNil()
@@ -443,8 +441,7 @@ func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
// Don't allocate the buffer if the result won't fit.
if sz < tmpstringbufsize {
// Create temporary buffer for result string on stack.
- t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
- buf = typecheck.NodAddr(typecheck.Temp(t))
+ buf = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
}
}
@@ -497,9 +494,10 @@ func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
}
func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
- if len(n.Rargs) != 0 {
+ if n.Walked() {
return // already walked
}
+ n.SetWalked(true)
// If this is a method call t.M(...),
// rewrite into a function call T.M(t, ...).
@@ -523,27 +521,26 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
n.X = walkExpr(n.X, init)
walkExprList(args, init)
- // For any argument whose evaluation might require a function call,
- // store that argument into a temporary variable,
- // to prevent that calls from clobbering arguments already on the stack.
- // When instrumenting, all arguments might require function calls.
- var tempAssigns []ir.Node
for i, arg := range args {
- updateHasCall(arg)
- // Determine param type.
- t := params.Field(i).Type
- if base.Flag.Cfg.Instrumenting || fncall(arg, t) {
- // make assignment of fncall to tempAt
- tmp := typecheck.Temp(t)
- a := convas(ir.NewAssignStmt(base.Pos, tmp, arg), init)
- tempAssigns = append(tempAssigns, a)
+ // Validate argument and parameter types match.
+ param := params.Field(i)
+ if !types.Identical(arg.Type(), param.Type) {
+ base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type)
+ }
+
+ // For any argument whose evaluation might require a function call,
+ // store that argument into a temporary variable,
+ // to prevent that calls from clobbering arguments already on the stack.
+ if mayCall(arg) {
+ // assignment of arg to Temp
+ tmp := typecheck.Temp(param.Type)
+ init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init))
// replace arg with temp
args[i] = tmp
}
}
- n.Args = tempAssigns
- n.Rargs = args
+ n.Args = args
}
// walkDivMod walks an ODIV or OMOD node.
@@ -618,11 +615,6 @@ func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
// Set up interface type addresses for back end.
-
- n.DstType = reflectdata.TypePtr(n.Type())
- if n.Op() == ir.ODOTTYPE {
- n.SrcType = reflectdata.TypePtr(n.X.Type())
- }
if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
n.Itab = reflectdata.ITabAddr(n.Type(), n.X.Type())
}
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index 38a9bec6e3..fe0b6a0eff 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -555,10 +555,6 @@ func (o *orderState) mapAssign(n ir.Node) {
n.Y = o.safeMapRHS(n.Y)
}
o.out = append(o.out, n)
-
- case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
- n := n.(*ir.AssignListStmt)
- o.out = append(o.out, n)
}
}
@@ -637,7 +633,7 @@ func (o *orderState) stmt(n ir.Node) {
t := o.markTemp()
o.exprList(n.Lhs)
o.exprList(n.Rhs)
- o.mapAssign(n)
+ o.out = append(o.out, n)
o.cleanTemp(t)
// Special: avoid copy of func call n.Right
@@ -647,7 +643,7 @@ func (o *orderState) stmt(n ir.Node) {
o.exprList(n.Lhs)
o.init(n.Rhs[0])
o.call(n.Rhs[0])
- o.as2(n)
+ o.as2func(n)
o.cleanTemp(t)
// Special: use temporary variables to hold result,
@@ -679,7 +675,7 @@ func (o *orderState) stmt(n ir.Node) {
base.Fatalf("order.stmt: %v", r.Op())
}
- o.okAs2(n)
+ o.as2ok(n)
o.cleanTemp(t)
// Special: does not save n onto out.
@@ -696,7 +692,7 @@ func (o *orderState) stmt(n ir.Node) {
ir.OFALL,
ir.OGOTO,
ir.OLABEL,
- ir.ORETJMP:
+ ir.OTAILCALL:
o.out = append(o.out, n)
// Special: handle call arguments.
@@ -772,14 +768,12 @@ func (o *orderState) stmt(n ir.Node) {
orderBlock(&n.Else, o.free)
o.out = append(o.out, n)
- // Special: argument will be converted to interface using convT2E
- // so make sure it is an addressable temporary.
case ir.OPANIC:
n := n.(*ir.UnaryExpr)
t := o.markTemp()
n.X = o.expr(n.X, nil)
- if !n.X.Type().IsInterface() {
- n.X = o.addrTemp(n.X)
+ if !n.X.Type().IsEmptyInterface() {
+ base.FatalfAt(n.Pos(), "bad argument to panic: %L", n.X)
}
o.out = append(o.out, n)
o.cleanTemp(t)
@@ -849,7 +843,7 @@ func (o *orderState) stmt(n ir.Node) {
n.X = o.copyExpr(r)
// n.Prealloc is the temp for the iterator.
- // hiter contains pointers and needs to be zeroed.
+ // MapIterType contains pointers and needs to be zeroed.
n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
}
n.Key = o.exprInPlace(n.Key)
@@ -962,7 +956,7 @@ func (o *orderState) stmt(n ir.Node) {
cas.Body.Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary?
- // walkselect appears to walk Ninit.
+ // walkSelect appears to walk Ninit.
cas.Body.Prepend(ir.TakeInit(cas)...)
}
@@ -986,7 +980,7 @@ func (o *orderState) stmt(n ir.Node) {
o.cleanTemp(t)
// TODO(rsc): Clean temporaries more aggressively.
- // Note that because walkswitch will rewrite some of the
+ // Note that because walkSwitch will rewrite some of the
// switch into a binary search, this is not as easy as it looks.
// (If we ran that code here we could invoke order.stmt on
// the if-else chain instead.)
@@ -1390,57 +1384,54 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
// No return - type-assertions above. Each case must return for itself.
}
-// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
-// The caller should order the right-hand side of the assignment before calling order.as2.
+// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
+// The caller should order the right-hand side of the assignment before calling order.as2func.
// It rewrites,
-// a, b, a = ...
+// a, b, a = ...
// as
// tmp1, tmp2, tmp3 = ...
-// a, b, a = tmp1, tmp2, tmp3
+// a, b, a = tmp1, tmp2, tmp3
// This is necessary to ensure left to right assignment order.
-func (o *orderState) as2(n *ir.AssignListStmt) {
- tmplist := []ir.Node{}
- left := []ir.Node{}
- for ni, l := range n.Lhs {
- if !ir.IsBlank(l) {
- tmp := o.newTemp(l.Type(), l.Type().HasPointers())
- n.Lhs[ni] = tmp
- tmplist = append(tmplist, tmp)
- left = append(left, l)
+func (o *orderState) as2func(n *ir.AssignListStmt) {
+ results := n.Rhs[0].Type()
+ as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+ for i, nl := range n.Lhs {
+ if !ir.IsBlank(nl) {
+ typ := results.Field(i).Type
+ tmp := o.newTemp(typ, typ.HasPointers())
+ n.Lhs[i] = tmp
+ as.Lhs = append(as.Lhs, nl)
+ as.Rhs = append(as.Rhs, tmp)
}
}
o.out = append(o.out, n)
-
- as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- as.Lhs = left
- as.Rhs = tmplist
o.stmt(typecheck.Stmt(as))
}
-// okAs2 orders OAS2XXX with ok.
-// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *orderState) okAs2(n *ir.AssignListStmt) {
- var tmp1, tmp2 ir.Node
- if !ir.IsBlank(n.Lhs[0]) {
- typ := n.Rhs[0].Type()
- tmp1 = o.newTemp(typ, typ.HasPointers())
+// as2ok orders OAS2XXX with ok.
+// Just like as2func, this also adds temporaries to ensure left-to-right assignment.
+func (o *orderState) as2ok(n *ir.AssignListStmt) {
+ as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+
+ do := func(i int, typ *types.Type) {
+ if nl := n.Lhs[i]; !ir.IsBlank(nl) {
+ var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
+ n.Lhs[i] = tmp
+ as.Lhs = append(as.Lhs, nl)
+ if i == 1 {
+ // The "ok" result is an untyped boolean according to the Go
+ // spec. We need to explicitly convert it to the LHS type in
+ // case the latter is a defined boolean type (#8475).
+ tmp = typecheck.Conv(tmp, nl.Type())
+ }
+ as.Rhs = append(as.Rhs, tmp)
+ }
}
- if !ir.IsBlank(n.Lhs[1]) {
- tmp2 = o.newTemp(types.Types[types.TBOOL], false)
- }
+ do(0, n.Rhs[0].Type())
+ do(1, types.Types[types.TBOOL])
o.out = append(o.out, n)
-
- if tmp1 != nil {
- r := ir.NewAssignStmt(base.Pos, n.Lhs[0], tmp1)
- o.mapAssign(typecheck.Stmt(r))
- n.Lhs[0] = tmp1
- }
- if tmp2 != nil {
- r := ir.NewAssignStmt(base.Pos, n.Lhs[1], typecheck.Conv(tmp2, n.Lhs[1].Type()))
- o.mapAssign(typecheck.Stmt(r))
- n.Lhs[1] = tmp2
- }
+ o.stmt(typecheck.Stmt(as))
}
diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go
index 77cabe50c6..47cd2fdc22 100644
--- a/src/cmd/compile/internal/walk/race.go
+++ b/src/cmd/compile/internal/walk/race.go
@@ -26,10 +26,9 @@ func instrument(fn *ir.Func) {
if base.Flag.Race {
lno := base.Pos
base.Pos = src.NoXPos
-
if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 {
- fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
- fn.Exit.Append(mkcall("racefuncexit", nil, nil))
+ fn.Enter.Prepend(mkcallstmt("racefuncenterfp"))
+ fn.Exit.Append(mkcallstmt("racefuncexit"))
} else {
// nodpc is the PC of the caller as extracted by
@@ -44,8 +43,8 @@ func instrument(fn *ir.Func) {
nodpc.SetType(types.Types[types.TUINTPTR])
nodpc.SetFrameOffset(int64(-types.PtrSize))
fn.Dcl = append(fn.Dcl, nodpc)
- fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
- fn.Exit.Append(mkcall("racefuncexit", nil, nil))
+ fn.Enter.Prepend(mkcallstmt("racefuncenter", nodpc))
+ fn.Exit.Append(mkcallstmt("racefuncexit"))
}
base.Pos = lno
}
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
index 9225c429f0..5ab24b2188 100644
--- a/src/cmd/compile/internal/walk/range.go
+++ b/src/cmd/compile/internal/walk/range.go
@@ -71,7 +71,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
}
if v1 == nil && v2 != nil {
- base.Fatalf("walkrange: v2 != nil while v1 == nil")
+ base.Fatalf("walkRange: v2 != nil while v1 == nil")
}
var ifGuard *ir.IfStmt
@@ -80,7 +80,7 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
var init []ir.Node
switch t.Kind() {
default:
- base.Fatalf("walkrange")
+ base.Fatalf("walkRange")
case types.TARRAY, types.TSLICE:
if nn := arrayClear(nrange, v1, v2, a); nn != nil {
@@ -168,18 +168,18 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
hit := nrange.Prealloc
th := hit.Type()
- keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
+ keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:MapIterType
elemsym := th.Field(1).Sym // ditto
fn := typecheck.LookupRuntime("mapiterinit")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
- init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
+ init = append(init, mkcallstmt1(fn, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
fn = typecheck.LookupRuntime("mapiternext")
fn = typecheck.SubstArgTypes(fn, th)
- nfor.Post = mkcall1(fn, nil, nil, typecheck.NodAddr(hit))
+ nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
if v1 == nil {
@@ -269,12 +269,14 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
// } else {
eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- nif.Else = []ir.Node{eif}
// hv2, hv1 = decoderune(ha, hv1)
eif.Lhs = []ir.Node{hv2, hv1}
fn := typecheck.LookupRuntime("decoderune")
- eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, ha, hv1)}
+ var fnInit ir.Nodes
+ eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, ha, hv1)}
+ fnInit.Append(eif)
+ nif.Else = fnInit
body = append(body, nif)
@@ -374,7 +376,7 @@ func mapClear(m ir.Node) ir.Node {
// instantiate mapclear(typ *type, hmap map[any]any)
fn := typecheck.LookupRuntime("mapclear")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
- n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m)
+ n := mkcallstmt1(fn, reflectdata.TypePtr(t), m)
return walkStmt(typecheck.Stmt(n))
}
@@ -388,7 +390,7 @@ func mapClear(m ir.Node) ir.Node {
//
// in which the evaluation of a is side-effect-free.
//
-// Parameters are as in walkrange: "for v1, v2 = range a".
+// Parameters are as in walkRange: "for v1, v2 = range a".
func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
return nil
@@ -449,10 +451,10 @@ func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
ir.CurFunc.SetWBPos(stmt.Pos())
- fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
+ fn = mkcallstmt("memclrHasPointers", hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
- fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
+ fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
}
n.Body.Append(fn)
diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go
index 776b020155..873be289dc 100644
--- a/src/cmd/compile/internal/walk/select.go
+++ b/src/cmd/compile/internal/walk/select.go
@@ -13,9 +13,10 @@ import (
func walkSelect(sel *ir.SelectStmt) {
lno := ir.SetPos(sel)
- if len(sel.Compiled) != 0 {
- base.Fatalf("double walkselect")
+ if sel.Walked() {
+ base.Fatalf("double walkSelect")
}
+ sel.SetWalked(true)
init := ir.TakeInit(sel)
@@ -34,7 +35,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
// optimization: zero-case select
if ncas == 0 {
- return []ir.Node{mkcall("block", nil, nil)}
+ return []ir.Node{mkcallstmt("block")}
}
// optimization: one-case select: single op.
@@ -213,12 +214,12 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
// TODO(mdempsky): There should be a cleaner way to
// handle this.
if base.Flag.Race {
- r := mkcall("selectsetpc", nil, nil, typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i)))))
+ r := mkcallstmt("selectsetpc", typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i)))))
init = append(init, r)
}
}
if nsends+nrecvs != ncas {
- base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+ base.Fatalf("walkSelectCases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
}
// run the select
@@ -228,7 +229,9 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
r.Lhs = []ir.Node{chosen, recvOK}
fn := typecheck.LookupRuntime("selectgo")
- r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))}
+ var fnInit ir.Nodes
+ r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))}
+ init = append(init, fnInit...)
init = append(init, typecheck.Stmt(r))
// selv and order are no longer alive after selectgo.
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
index 1df491bd4e..46a621c2ba 100644
--- a/src/cmd/compile/internal/walk/stmt.go
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -86,6 +86,7 @@ func walkStmt(n ir.Node) ir.Node {
ir.OFALL,
ir.OGOTO,
ir.OLABEL,
+ ir.ODCL,
ir.ODCLCONST,
ir.ODCLTYPE,
ir.OCHECKNIL,
@@ -94,10 +95,6 @@ func walkStmt(n ir.Node) ir.Node {
ir.OVARLIVE:
return n
- case ir.ODCL:
- n := n.(*ir.Decl)
- return walkDecl(n)
-
case ir.OBLOCK:
n := n.(*ir.BlockStmt)
walkStmtList(n.List)
@@ -139,8 +136,8 @@ func walkStmt(n ir.Node) ir.Node {
n := n.(*ir.ReturnStmt)
return walkReturn(n)
- case ir.ORETJMP:
- n := n.(*ir.BranchStmt)
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
return n
case ir.OINLMARK:
@@ -173,20 +170,6 @@ func walkStmtList(s []ir.Node) {
}
}
-// walkDecl walks an ODCL node.
-func walkDecl(n *ir.Decl) ir.Node {
- v := n.X
- if v.Class == ir.PAUTOHEAP {
- if base.Flag.CompilingRuntime {
- base.Errorf("%v escapes to heap, not allowed in runtime", v)
- }
- nn := ir.NewAssignStmt(base.Pos, v.Heapaddr, callnew(v.Type()))
- nn.Def = true
- return walkStmt(typecheck.Stmt(nn))
- }
- return n
-}
-
// walkFor walks an OFOR or OFORUNTIL node.
func walkFor(n *ir.ForStmt) ir.Node {
if n.Cond != nil {
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
index 59446ef3db..162de018f6 100644
--- a/src/cmd/compile/internal/walk/switch.go
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -19,9 +19,10 @@ import (
// walkSwitch walks a switch statement.
func walkSwitch(sw *ir.SwitchStmt) {
// Guard against double walk, see #25776.
- if len(sw.Cases) == 0 && len(sw.Compiled) > 0 {
+ if sw.Walked() {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
+ sw.SetWalked(true)
if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW {
walkSwitchType(sw)
@@ -48,8 +49,8 @@ func walkSwitchExpr(sw *ir.SwitchStmt) {
// Given "switch string(byteslice)",
// with all cases being side-effect free,
// use a zero-cost alias of the byte slice.
- // Do this before calling walkexpr on cond,
- // because walkexpr will lower the string
+ // Do this before calling walkExpr on cond,
+ // because walkExpr will lower the string
// conversion into a runtime call.
// See issue 24937 for more discussion.
if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go
new file mode 100644
index 0000000000..9879a6c69d
--- /dev/null
+++ b/src/cmd/compile/internal/walk/temp.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// initStackTemp appends statements to init to initialize the given
+// temporary variable to val, and then returns the expression &tmp.
+func initStackTemp(init *ir.Nodes, tmp *ir.Name, val ir.Node) *ir.AddrExpr {
+ if val != nil && !types.Identical(tmp.Type(), val.Type()) {
+ base.Fatalf("bad initial value for %L: %L", tmp, val)
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, val))
+ return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
+
+// stackTempAddr returns the expression &tmp, where tmp is a newly
+// allocated temporary variable of the given type. Statements to
+// zero-initialize tmp are appended to init.
+func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr {
+ return initStackTemp(init, typecheck.Temp(typ), nil)
+}
+
+// stackBufAddr returns thte expression &tmp, where tmp is a newly
+// allocated temporary variable of type [len]elem. This variable is
+// initialized, and elem must not contain pointers.
+func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr {
+ if elem.HasPointers() {
+ base.FatalfAt(base.Pos, "%v has pointers", elem)
+ }
+ tmp := typecheck.Temp(types.NewArray(elem, len))
+ return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
index 4271772fb7..b47d96dc4c 100644
--- a/src/cmd/compile/internal/walk/walk.go
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -7,7 +7,6 @@ package walk
import (
"errors"
"fmt"
- "strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -47,13 +46,6 @@ func Walk(fn *ir.Func) {
ir.DumpList(s, ir.CurFunc.Body)
}
- zeroResults()
- heapmoves()
- if base.Flag.W != 0 && len(ir.CurFunc.Enter) > 0 {
- s := fmt.Sprintf("enter %v", ir.CurFunc.Sym())
- ir.DumpList(s, ir.CurFunc.Enter)
- }
-
if base.Flag.Cfg.Instrumenting {
instrument(fn)
}
@@ -64,23 +56,6 @@ func Walk(fn *ir.Func) {
}
}
-func paramoutheap(fn *ir.Func) bool {
- for _, ln := range fn.Dcl {
- switch ln.Class {
- case ir.PPARAMOUT:
- if ir.IsParamStackCopy(ln) || ln.Addrtaken() {
- return true
- }
-
- case ir.PAUTO:
- // stop early - parameters are over
- return false
- }
- }
-
- return false
-}
-
// walkRecv walks an ORECV node.
func walkRecv(n *ir.UnaryExpr) ir.Node {
if n.Typecheck() == 0 {
@@ -97,8 +72,6 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
if n.Op() != ir.OAS {
base.Fatalf("convas: not OAS %v", n.Op())
}
- defer updateHasCall(n)
-
n.SetTypecheck(1)
if n.X == nil || n.Y == nil {
@@ -127,93 +100,10 @@ func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
var stop = errors.New("stop")
-// paramstoheap returns code to allocate memory for heap-escaped parameters
-// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []ir.Node {
- var nn []ir.Node
- for _, t := range params.Fields().Slice() {
- v := ir.AsNode(t.Nname)
- if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
- v = nil
- }
- if v == nil {
- continue
- }
-
- if stackcopy := v.Name().Stackcopy; stackcopy != nil {
- nn = append(nn, walkStmt(ir.NewDecl(base.Pos, ir.ODCL, v.(*ir.Name))))
- if stackcopy.Class == ir.PPARAM {
- nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, v, stackcopy))))
- }
- }
- }
-
- return nn
-}
-
-// zeroResults zeros the return values at the start of the function.
-// We need to do this very early in the function. Defer might stop a
-// panic and show the return values as they exist at the time of
-// panic. For precise stacks, the garbage collector assumes results
-// are always live, so we need to zero them before any allocations,
-// even allocations to move params/results to the heap.
-// The generated code is added to Curfn's Enter list.
-func zeroResults() {
- for _, f := range ir.CurFunc.Type().Results().Fields().Slice() {
- v := ir.AsNode(f.Nname)
- if v != nil && v.Name().Heapaddr != nil {
- // The local which points to the return value is the
- // thing that needs zeroing. This is already handled
- // by a Needzero annotation in plive.go:livenessepilogue.
- continue
- }
- if ir.IsParamHeapCopy(v) {
- // TODO(josharian/khr): Investigate whether we can switch to "continue" here,
- // and document more in either case.
- // In the review of CL 114797, Keith wrote (roughly):
- // I don't think the zeroing below matters.
- // The stack return value will never be marked as live anywhere in the function.
- // It is not written to until deferreturn returns.
- v = v.Name().Stackcopy
- }
- // Zero the stack location containing f.
- ir.CurFunc.Enter.Append(ir.NewAssignStmt(ir.CurFunc.Pos(), v, nil))
- }
-}
-
-// returnsfromheap returns code to copy values for heap-escaped parameters
-// back to the stack.
-func returnsfromheap(params *types.Type) []ir.Node {
- var nn []ir.Node
- for _, t := range params.Fields().Slice() {
- v := ir.AsNode(t.Nname)
- if v == nil {
- continue
- }
- if stackcopy := v.Name().Stackcopy; stackcopy != nil && stackcopy.Class == ir.PPARAMOUT {
- nn = append(nn, walkStmt(typecheck.Stmt(ir.NewAssignStmt(base.Pos, stackcopy, v))))
- }
- }
-
- return nn
-}
-
-// heapmoves generates code to handle migrating heap-escaped parameters
-// between the stack and the heap. The generated code is added to Curfn's
-// Enter and Exit lists.
-func heapmoves() {
- lno := base.Pos
- base.Pos = ir.CurFunc.Pos()
- nn := paramstoheap(ir.CurFunc.Type().Recvs())
- nn = append(nn, paramstoheap(ir.CurFunc.Type().Params())...)
- nn = append(nn, paramstoheap(ir.CurFunc.Type().Results())...)
- ir.CurFunc.Enter.Append(nn...)
- base.Pos = ir.CurFunc.Endlineno
- ir.CurFunc.Exit.Append(returnsfromheap(ir.CurFunc.Type().Results())...)
- base.Pos = lno
-}
-
func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
+ if init == nil {
+ base.Fatalf("mkcall with nil init: %v", fn)
+ }
if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
base.Fatalf("mkcall %v %v", fn, fn.Type())
}
@@ -233,10 +123,24 @@ func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.Cal
return vmkcall(typecheck.LookupRuntime(name), t, init, args)
}
+func mkcallstmt(name string, args ...ir.Node) ir.Node {
+ return mkcallstmt1(typecheck.LookupRuntime(name), args...)
+}
+
func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
return vmkcall(fn, t, init, args)
}
+func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node {
+ var init ir.Nodes
+ n := vmkcall(fn, nil, &init, args)
+ if len(init) == 0 {
+ return n
+ }
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+}
+
func chanfn(name string, n int, t *types.Type) ir.Node {
if !t.IsChan() {
base.Fatalf("chanfn %v", t)
@@ -324,7 +228,7 @@ func mapfast(t *types.Type) int {
func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
walkExprListSafe(n.Args, init)
- // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // walkExprListSafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
ls := n.Args
@@ -356,8 +260,8 @@ func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
op := stmt.Op()
n := typecheck.Stmt(stmt)
if op == ir.OAS || op == ir.OAS2 {
- // If the assignment has side effects, walkexpr will append them
- // directly to init for us, while walkstmt will wrap it in an OBLOCK.
+ // If the assignment has side effects, walkExpr will append them
+ // directly to init for us, while walkStmt will wrap it in an OBLOCK.
// We need to append them directly.
// TODO(rsc): Clean this up.
n = walkExpr(n, init)
@@ -372,7 +276,7 @@ func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
const maxOpenDefers = 8
// backingArrayPtrLen extracts the pointer and length from a slice or string.
-// This constructs two nodes referring to n, so n must be a cheapexpr.
+// This constructs two nodes referring to n, so n must be a cheapExpr.
func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
var init ir.Nodes
c := cheapExpr(n, &init)
@@ -390,123 +294,71 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
return ptr, length
}
-// updateHasCall checks whether expression n contains any function
-// calls and sets the n.HasCall flag if so.
-func updateHasCall(n ir.Node) {
- if n == nil {
- return
- }
- n.SetHasCall(calcHasCall(n))
-}
-
-func calcHasCall(n ir.Node) bool {
- if len(n.Init()) != 0 {
- // TODO(mdempsky): This seems overly conservative.
+// mayCall reports whether evaluating expression n may require
+// function calls, which could clobber function call arguments/results
+// currently on the stack.
+func mayCall(n ir.Node) bool {
+ // When instrumenting, any expression might require function calls.
+ if base.Flag.Cfg.Instrumenting {
return true
}
- switch n.Op() {
- default:
- base.Fatalf("calcHasCall %+v", n)
- panic("unreachable")
+ isSoftFloat := func(typ *types.Type) bool {
+ return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()]
+ }
- case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE, ir.ONAMEOFFSET:
- if n.HasCall() {
- base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
- }
- return false
- case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- return true
- case ir.OANDAND, ir.OOROR:
- // hard with instrumented code
- n := n.(*ir.LogicalExpr)
- if base.Flag.Cfg.Instrumenting {
- return true
+ return ir.Any(n, func(n ir.Node) bool {
+ // walk should have already moved any Init blocks off of
+ // expressions.
+ if len(n.Init()) != 0 {
+ base.FatalfAt(n.Pos(), "mayCall %+v", n)
}
- return n.X.HasCall() || n.Y.HasCall()
- case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
- ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
- // These ops might panic, make sure they are done
- // before we start marshaling args for a call. See issue 16760.
- return true
- // When using soft-float, these ops might be rewritten to function calls
- // so we ensure they are evaluated first.
- case ir.OADD, ir.OSUB, ir.OMUL:
- n := n.(*ir.BinaryExpr)
- if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
- return true
- }
- return n.X.HasCall() || n.Y.HasCall()
- case ir.ONEG:
- n := n.(*ir.UnaryExpr)
- if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
- return true
- }
- return n.X.HasCall()
- case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
- n := n.(*ir.BinaryExpr)
- if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) {
+ switch n.Op() {
+ default:
+ base.FatalfAt(n.Pos(), "mayCall %+v", n)
+
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return true
- }
- return n.X.HasCall() || n.Y.HasCall()
- case ir.OCONV:
- n := n.(*ir.ConvExpr)
- if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) {
+
+ case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
+ ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
+ // These ops might panic, make sure they are done
+ // before we start marshaling args for a call. See issue 16760.
return true
+
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ // The RHS expression may have init statements that
+ // should only execute conditionally, and so cannot be
+ // pulled out to the top-level init list. We could try
+ // to be more precise here.
+ return len(n.Y.Init()) != 0
+
+ // When using soft-float, these ops might be rewritten to function calls
+ // so we ensure they are evaluated first.
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG:
+ return ssagen.Arch.SoftFloat && isSoftFloat(n.Type())
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ n := n.(*ir.BinaryExpr)
+ return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type())
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type()))
+
+ case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
+ ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE,
+ ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
+ ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
+ ir.OCONVNOP, ir.ODOT,
+ ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
+ ir.OBYTES2STRTMP, ir.OGETG, ir.OSLICEHEADER:
+ // ok: operations that don't require function calls.
+ // Expand as needed.
}
- return n.X.HasCall()
-
- case ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOPY, ir.OCOMPLEX, ir.OEFACE:
- n := n.(*ir.BinaryExpr)
- return n.X.HasCall() || n.Y.HasCall()
-
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- return n.X.HasCall() || n.Y != nil && n.Y.HasCall()
-
- case ir.OADDR:
- n := n.(*ir.AddrExpr)
- return n.X.HasCall()
- case ir.OPAREN:
- n := n.(*ir.ParenExpr)
- return n.X.HasCall()
- case ir.OBITNOT, ir.ONOT, ir.OPLUS, ir.ORECV,
- ir.OALIGNOF, ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.ONEW,
- ir.OOFFSETOF, ir.OPANIC, ir.OREAL, ir.OSIZEOF,
- ir.OCHECKNIL, ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.ONEWOBJ, ir.OSPTR, ir.OVARDEF, ir.OVARKILL, ir.OVARLIVE:
- n := n.(*ir.UnaryExpr)
- return n.X.HasCall()
- case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
- n := n.(*ir.SelectorExpr)
- return n.X.HasCall()
-
- case ir.OGETG, ir.OMETHEXPR:
- return false
- // TODO(rsc): These look wrong in various ways but are what calcHasCall has always done.
- case ir.OADDSTR:
- // TODO(rsc): This used to check left and right, which are not part of OADDSTR.
- return false
- case ir.OBLOCK:
- // TODO(rsc): Surely the block's statements matter.
return false
- case ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.OBYTES2STRTMP, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2BYTESTMP, ir.OSTR2RUNES, ir.ORUNESTR:
- // TODO(rsc): Some conversions are themselves calls, no?
- n := n.(*ir.ConvExpr)
- return n.X.HasCall()
- case ir.ODOTTYPE2:
- // TODO(rsc): Shouldn't this be up with ODOTTYPE above?
- n := n.(*ir.TypeAssertExpr)
- return n.X.HasCall()
- case ir.OSLICEHEADER:
- // TODO(rsc): What about len and cap?
- n := n.(*ir.SliceHeaderExpr)
- return n.Ptr.HasCall()
- case ir.OAS2DOTTYPE, ir.OAS2FUNC:
- // TODO(rsc): Surely we need to check List and Rlist.
- return false
- }
+ })
}
// itabType loads the _type field from a runtime.itab struct.
@@ -539,7 +391,7 @@ func runtimeField(name string, offset int64, typ *types.Type) *types.Field {
// ifaceData loads the data field from an interface.
// The concrete type must be known to have type t.
-// It follows the pointer if !isdirectiface(t).
+// It follows the pointer if !IsDirectIface(t).
func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
if t.IsInterface() {
base.Fatalf("ifaceData interface: %v", t)