aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--codereview.cfg3
-rw-r--r--misc/cgo/errors/testdata/err2.go12
-rw-r--r--src/cmd/compile/abi-internal.md122
-rw-r--r--src/cmd/compile/internal/amd64/galign.go3
-rw-r--r--src/cmd/compile/internal/amd64/ggen.go10
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go47
-rw-r--r--src/cmd/compile/internal/arm/galign.go1
-rw-r--r--src/cmd/compile/internal/arm64/galign.go3
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go62
-rw-r--r--src/cmd/compile/internal/base/bootstrap_false.go11
-rw-r--r--src/cmd/compile/internal/base/bootstrap_true.go11
-rw-r--r--src/cmd/compile/internal/base/debug.go3
-rw-r--r--src/cmd/compile/internal/base/flag.go4
-rw-r--r--src/cmd/compile/internal/base/mapfile_mmap.go (renamed from src/cmd/compile/internal/typecheck/mapfile_mmap.go)4
-rw-r--r--src/cmd/compile/internal/base/mapfile_read.go (renamed from src/cmd/compile/internal/typecheck/mapfile_read.go)4
-rw-r--r--src/cmd/compile/internal/base/print.go21
-rw-r--r--src/cmd/compile/internal/escape/assign.go120
-rw-r--r--src/cmd/compile/internal/escape/call.go428
-rw-r--r--src/cmd/compile/internal/escape/desugar.go37
-rw-r--r--src/cmd/compile/internal/escape/escape.go1772
-rw-r--r--src/cmd/compile/internal/escape/expr.go335
-rw-r--r--src/cmd/compile/internal/escape/graph.go324
-rw-r--r--src/cmd/compile/internal/escape/leaks.go106
-rw-r--r--src/cmd/compile/internal/escape/solve.go289
-rw-r--r--src/cmd/compile/internal/escape/stmt.go207
-rw-r--r--src/cmd/compile/internal/escape/utils.go215
-rw-r--r--src/cmd/compile/internal/gc/export.go116
-rw-r--r--src/cmd/compile/internal/gc/main.go39
-rw-r--r--src/cmd/compile/internal/gc/obj.go10
-rw-r--r--src/cmd/compile/internal/importer/exportdata.go1
-rw-r--r--src/cmd/compile/internal/importer/gcimporter.go3
-rw-r--r--src/cmd/compile/internal/importer/gcimporter_test.go23
-rw-r--r--src/cmd/compile/internal/importer/iimport.go277
-rw-r--r--src/cmd/compile/internal/importer/support.go4
-rw-r--r--src/cmd/compile/internal/inline/inl.go518
-rw-r--r--src/cmd/compile/internal/ir/expr.go133
-rw-r--r--src/cmd/compile/internal/ir/fmt.go45
-rw-r--r--src/cmd/compile/internal/ir/func.go139
-rw-r--r--src/cmd/compile/internal/ir/name.go69
-rw-r--r--src/cmd/compile/internal/ir/node.go41
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go72
-rw-r--r--src/cmd/compile/internal/ir/op_string.go143
-rw-r--r--src/cmd/compile/internal/ir/package.go3
-rw-r--r--src/cmd/compile/internal/ir/scc.go2
-rw-r--r--src/cmd/compile/internal/ir/type.go27
-rw-r--r--src/cmd/compile/internal/ir/val.go2
-rw-r--r--src/cmd/compile/internal/liveness/plive.go4
-rw-r--r--src/cmd/compile/internal/logopt/logopt_test.go6
-rw-r--r--src/cmd/compile/internal/mips/galign.go1
-rw-r--r--src/cmd/compile/internal/mips64/galign.go1
-rw-r--r--src/cmd/compile/internal/noder/codes.go124
-rw-r--r--src/cmd/compile/internal/noder/decl.go57
-rw-r--r--src/cmd/compile/internal/noder/decoder.go301
-rw-r--r--src/cmd/compile/internal/noder/encoder.go284
-rw-r--r--src/cmd/compile/internal/noder/export.go65
-rw-r--r--src/cmd/compile/internal/noder/expr.go204
-rw-r--r--src/cmd/compile/internal/noder/frames_go1.go20
-rw-r--r--src/cmd/compile/internal/noder/frames_go17.go24
-rw-r--r--src/cmd/compile/internal/noder/helpers.go143
-rw-r--r--src/cmd/compile/internal/noder/import.go302
-rw-r--r--src/cmd/compile/internal/noder/irgen.go112
-rw-r--r--src/cmd/compile/internal/noder/linker.go296
-rw-r--r--src/cmd/compile/internal/noder/noder.go158
-rw-r--r--src/cmd/compile/internal/noder/object.go35
-rw-r--r--src/cmd/compile/internal/noder/quirks.go450
-rw-r--r--src/cmd/compile/internal/noder/reader.go2390
-rw-r--r--src/cmd/compile/internal/noder/reader2.go510
-rw-r--r--src/cmd/compile/internal/noder/reloc.go42
-rw-r--r--src/cmd/compile/internal/noder/stencil.go2098
-rw-r--r--src/cmd/compile/internal/noder/stmt.go26
-rw-r--r--src/cmd/compile/internal/noder/sync.go187
-rw-r--r--src/cmd/compile/internal/noder/syncmarker_string.go155
-rw-r--r--src/cmd/compile/internal/noder/transform.go182
-rw-r--r--src/cmd/compile/internal/noder/types.go206
-rw-r--r--src/cmd/compile/internal/noder/unified.go340
-rw-r--r--src/cmd/compile/internal/noder/unified_test.go153
-rw-r--r--src/cmd/compile/internal/noder/validate.go10
-rw-r--r--src/cmd/compile/internal/noder/writer.go1885
-rw-r--r--src/cmd/compile/internal/pkginit/initorder.go2
-rw-r--r--src/cmd/compile/internal/ppc64/galign.go1
-rw-r--r--src/cmd/compile/internal/ppc64/ggen.go27
-rw-r--r--src/cmd/compile/internal/reflectdata/alg.go6
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go496
-rw-r--r--src/cmd/compile/internal/riscv64/galign.go1
-rw-r--r--src/cmd/compile/internal/s390x/galign.go1
-rw-r--r--src/cmd/compile/internal/ssa/config.go16
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go14
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules9
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go28
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go10
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go36
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go5
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go30
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go2
-rw-r--r--src/cmd/compile/internal/ssa/softfloat.go1
-rw-r--r--src/cmd/compile/internal/ssagen/abi.go3
-rw-r--r--src/cmd/compile/internal/ssagen/arch.go11
-rw-r--r--src/cmd/compile/internal/ssagen/pgen.go5
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go524
-rw-r--r--src/cmd/compile/internal/staticdata/embed.go7
-rw-r--r--src/cmd/compile/internal/staticinit/sched.go4
-rw-r--r--src/cmd/compile/internal/syntax/parser.go31
-rw-r--r--src/cmd/compile/internal/syntax/positions.go4
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/linalg.go216
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/smoketest.go26
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go210
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/typeparams.go268
-rw-r--r--src/cmd/compile/internal/syntax/testdata/interface.go246
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue46558.src14
-rw-r--r--src/cmd/compile/internal/syntax/walk.go74
-rw-r--r--src/cmd/compile/internal/test/abiutils_test.go12
-rw-r--r--src/cmd/compile/internal/test/inl_test.go4
-rw-r--r--src/cmd/compile/internal/typecheck/bexport.go3
-rw-r--r--src/cmd/compile/internal/typecheck/builtin.go423
-rw-r--r--src/cmd/compile/internal/typecheck/builtin/runtime.go19
-rw-r--r--src/cmd/compile/internal/typecheck/const.go2
-rw-r--r--src/cmd/compile/internal/typecheck/crawler.go231
-rw-r--r--src/cmd/compile/internal/typecheck/dcl.go50
-rw-r--r--src/cmd/compile/internal/typecheck/export.go26
-rw-r--r--src/cmd/compile/internal/typecheck/expr.go159
-rw-r--r--src/cmd/compile/internal/typecheck/func.go243
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go336
-rw-r--r--src/cmd/compile/internal/typecheck/iimport.go589
-rw-r--r--src/cmd/compile/internal/typecheck/stmt.go41
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go551
-rw-r--r--src/cmd/compile/internal/typecheck/syms.go4
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go55
-rw-r--r--src/cmd/compile/internal/typecheck/universe.go15
-rw-r--r--src/cmd/compile/internal/types/fmt.go95
-rw-r--r--src/cmd/compile/internal/types/identity.go9
-rw-r--r--src/cmd/compile/internal/types/kind_string.go23
-rw-r--r--src/cmd/compile/internal/types/pkg.go4
-rw-r--r--src/cmd/compile/internal/types/size.go44
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/types/sort.go13
-rw-r--r--src/cmd/compile/internal/types/sym.go8
-rw-r--r--src/cmd/compile/internal/types/type.go202
-rw-r--r--src/cmd/compile/internal/types2/api.go12
-rw-r--r--src/cmd/compile/internal/types2/api_test.go90
-rw-r--r--src/cmd/compile/internal/types2/array.go25
-rw-r--r--src/cmd/compile/internal/types2/assignments.go2
-rw-r--r--src/cmd/compile/internal/types2/basic.go82
-rw-r--r--src/cmd/compile/internal/types2/builtins.go208
-rw-r--r--src/cmd/compile/internal/types2/builtins_test.go13
-rw-r--r--src/cmd/compile/internal/types2/call.go68
-rw-r--r--src/cmd/compile/internal/types2/chan.go35
-rw-r--r--src/cmd/compile/internal/types2/check.go18
-rw-r--r--src/cmd/compile/internal/types2/conversions.go6
-rw-r--r--src/cmd/compile/internal/types2/decl.go238
-rw-r--r--src/cmd/compile/internal/types2/errorcalls_test.go2
-rw-r--r--src/cmd/compile/internal/types2/errors.go6
-rw-r--r--src/cmd/compile/internal/types2/expr.go101
-rw-r--r--src/cmd/compile/internal/types2/index.go125
-rw-r--r--src/cmd/compile/internal/types2/infer.go60
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go206
-rw-r--r--src/cmd/compile/internal/types2/interface.go226
-rw-r--r--src/cmd/compile/internal/types2/issues_test.go5
-rw-r--r--src/cmd/compile/internal/types2/labels.go3
-rw-r--r--src/cmd/compile/internal/types2/lookup.go115
-rw-r--r--src/cmd/compile/internal/types2/map.go24
-rw-r--r--src/cmd/compile/internal/types2/named.go278
-rw-r--r--src/cmd/compile/internal/types2/object.go80
-rw-r--r--src/cmd/compile/internal/types2/object_test.go2
-rw-r--r--src/cmd/compile/internal/types2/operand.go45
-rw-r--r--src/cmd/compile/internal/types2/package.go22
-rw-r--r--src/cmd/compile/internal/types2/pointer.go19
-rw-r--r--src/cmd/compile/internal/types2/predicates.go144
-rw-r--r--src/cmd/compile/internal/types2/resolver.go31
-rw-r--r--src/cmd/compile/internal/types2/resolver_test.go4
-rw-r--r--src/cmd/compile/internal/types2/sanitize.go202
-rw-r--r--src/cmd/compile/internal/types2/scope.go97
-rw-r--r--src/cmd/compile/internal/types2/signature.go393
-rw-r--r--src/cmd/compile/internal/types2/sizeof_test.go14
-rw-r--r--src/cmd/compile/internal/types2/sizes.go10
-rw-r--r--src/cmd/compile/internal/types2/slice.go19
-rw-r--r--src/cmd/compile/internal/types2/stmt.go82
-rw-r--r--src/cmd/compile/internal/types2/struct.go213
-rw-r--r--src/cmd/compile/internal/types2/subst.go271
-rw-r--r--src/cmd/compile/internal/types2/termlist.go167
-rw-r--r--src/cmd/compile/internal/types2/termlist_test.go313
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/builtins.go2230
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/builtins.src2
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/const0.src2
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/cycles4.src15
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls0.src10
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls1.src2
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/expr1.src4
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/expr2.src2
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/expr3.src2
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issues.go248
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issues.src12
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/linalg.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/map2.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/mtypeparams.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/stmt0.src12
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/tinference.go258
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeinst2.go243
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeparams.go2251
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/unions.go266
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/constraints.go291
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/functions.go230
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/inference.go210
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/methods.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/operations.go229
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/types.go270
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go28
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go237
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go218
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go258
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go25
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go212
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go29
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go226
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src28
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go240
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go237
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go226
-rw-r--r--src/cmd/compile/internal/types2/tuple.go36
-rw-r--r--src/cmd/compile/internal/types2/type.go930
-rw-r--r--src/cmd/compile/internal/types2/typeparam.go143
-rw-r--r--src/cmd/compile/internal/types2/types_test.go9
-rw-r--r--src/cmd/compile/internal/types2/typeset.go392
-rw-r--r--src/cmd/compile/internal/types2/typeset_test.go15
-rw-r--r--src/cmd/compile/internal/types2/typestring.go212
-rw-r--r--src/cmd/compile/internal/types2/typestring_test.go57
-rw-r--r--src/cmd/compile/internal/types2/typeterm.go166
-rw-r--r--src/cmd/compile/internal/types2/typeterm_test.go239
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go844
-rw-r--r--src/cmd/compile/internal/types2/unify.go56
-rw-r--r--src/cmd/compile/internal/types2/union.go150
-rw-r--r--src/cmd/compile/internal/types2/universe.go81
-rw-r--r--src/cmd/compile/internal/walk/assign.go1
-rw-r--r--src/cmd/compile/internal/walk/builtin.go13
-rw-r--r--src/cmd/compile/internal/walk/closure.go96
-rw-r--r--src/cmd/compile/internal/walk/complit.go4
-rw-r--r--src/cmd/compile/internal/walk/convert.go296
-rw-r--r--src/cmd/compile/internal/walk/expr.go123
-rw-r--r--src/cmd/compile/internal/walk/order.go460
-rw-r--r--src/cmd/compile/internal/walk/stmt.go150
-rw-r--r--src/cmd/compile/internal/walk/switch.go45
-rw-r--r--src/cmd/compile/internal/walk/walk.go7
-rw-r--r--src/cmd/compile/internal/wasm/ssa.go7
-rw-r--r--src/cmd/compile/internal/x86/galign.go1
-rw-r--r--src/cmd/go.mod4
-rw-r--r--src/cmd/go.sum27
-rw-r--r--src/cmd/go/go_test.go2
-rw-r--r--src/cmd/go/internal/work/gc.go19
-rw-r--r--src/cmd/gofmt/gofmt_test.go7
-rw-r--r--src/cmd/gofmt/testdata/typeparams.golden2
-rw-r--r--src/cmd/gofmt/testdata/typeparams.input2
-rw-r--r--src/cmd/internal/dwarf/dwarf.go13
-rw-r--r--src/cmd/internal/goobj/builtinlist.go7
-rw-r--r--src/cmd/internal/goobj/mkbuiltin.go4
-rw-r--r--src/cmd/internal/obj/arm/asm5.go11
-rw-r--r--src/cmd/internal/obj/arm64/obj7.go100
-rw-r--r--src/cmd/internal/obj/objfile.go5
-rw-r--r--src/cmd/internal/obj/wasm/wasmobj.go36
-rw-r--r--src/cmd/internal/obj/x86/asm6.go1
-rw-r--r--src/cmd/internal/obj/x86/obj6.go4
-rw-r--r--src/cmd/internal/objabi/funcid.go3
-rw-r--r--src/cmd/link/internal/ld/deadcode.go3
-rw-r--r--src/cmd/link/internal/ld/dwarf_test.go7
-rw-r--r--src/cmd/link/internal/ld/pcln.go9
-rw-r--r--src/cmd/link/internal/ld/symtab.go1
-rw-r--r--src/cmd/link/link_test.go10
-rw-r--r--src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go37
-rw-r--r--src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go28
-rw-r--r--src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go10
-rw-r--r--src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go2
-rw-r--r--src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go37
-rw-r--r--src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go23
-rw-r--r--src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/symbol.go224
-rw-r--r--src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go25
-rw-r--r--src/cmd/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go93
-rw-r--r--src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams.go115
-rw-r--r--src/cmd/vendor/modules.txt3
-rw-r--r--src/cmd/vet/testdata/print/print.go8
-rw-r--r--src/go.mod2
-rw-r--r--src/go.sum7
-rw-r--r--src/go/ast/ast.go30
-rw-r--r--src/go/ast/ast_notypeparams.go28
-rw-r--r--src/go/ast/ast_typeparams.go51
-rw-r--r--src/go/ast/walk.go18
-rw-r--r--src/go/ast/walk_notypeparams.go17
-rw-r--r--src/go/ast/walk_typeparams.go36
-rw-r--r--src/go/build/deps_test.go10
-rw-r--r--src/go/constant/kind_string.go28
-rw-r--r--src/go/constant/value.go2
-rw-r--r--src/go/internal/gcimporter/gcimporter_test.go1
-rw-r--r--src/go/internal/gcimporter/iimport.go46
-rw-r--r--src/go/internal/gcimporter/support.go3
-rw-r--r--src/go/internal/typeparams/notypeparams.go40
-rw-r--r--src/go/internal/typeparams/typeparams.go63
-rw-r--r--src/go/parser/error_test.go16
-rw-r--r--src/go/parser/parser.go115
-rw-r--r--src/go/parser/resolver_test.go6
-rw-r--r--src/go/parser/short_test.go14
-rw-r--r--src/go/parser/testdata/interface.go280
-rw-r--r--src/go/printer/nodes.go20
-rw-r--r--src/go/printer/printer_test.go4
-rw-r--r--src/go/scanner/scanner.go2
-rw-r--r--src/go/scanner/scanner_test.go3
-rw-r--r--src/go/token/token.go11
-rw-r--r--src/go/types/api.go112
-rw-r--r--src/go/types/api_notypeparams.go104
-rw-r--r--src/go/types/api_test.go170
-rw-r--r--src/go/types/api_typeparams.go53
-rw-r--r--src/go/types/api_typeparams_test.go139
-rw-r--r--src/go/types/array.go25
-rw-r--r--src/go/types/assignments.go2
-rw-r--r--src/go/types/basic.go82
-rw-r--r--src/go/types/builtins.go163
-rw-r--r--src/go/types/builtins_test.go7
-rw-r--r--src/go/types/call.go110
-rw-r--r--src/go/types/chan.go35
-rw-r--r--src/go/types/check.go25
-rw-r--r--src/go/types/check_test.go114
-rw-r--r--src/go/types/conversions.go6
-rw-r--r--src/go/types/decl.go233
-rw-r--r--src/go/types/errorcodes.go13
-rw-r--r--src/go/types/expr.go114
-rw-r--r--src/go/types/exprstring.go8
-rw-r--r--src/go/types/exprstring_test.go34
-rw-r--r--src/go/types/index.go154
-rw-r--r--src/go/types/infer.go71
-rw-r--r--src/go/types/instantiate.go248
-rw-r--r--src/go/types/interface.go280
-rw-r--r--src/go/types/issues_test.go6
-rw-r--r--src/go/types/labels.go3
-rw-r--r--src/go/types/lookup.go109
-rw-r--r--src/go/types/map.go24
-rw-r--r--src/go/types/methodset.go15
-rw-r--r--src/go/types/methodset_test.go20
-rw-r--r--src/go/types/named.go296
-rw-r--r--src/go/types/object.go11
-rw-r--r--src/go/types/object_test.go2
-rw-r--r--src/go/types/operand.go45
-rw-r--r--src/go/types/pointer.go19
-rw-r--r--src/go/types/predicates.go150
-rw-r--r--src/go/types/resolver.go31
-rw-r--r--src/go/types/sanitize.go206
-rw-r--r--src/go/types/scope.go97
-rw-r--r--src/go/types/signature.go352
-rw-r--r--src/go/types/sizeof_test.go16
-rw-r--r--src/go/types/sizes.go10
-rw-r--r--src/go/types/slice.go19
-rw-r--r--src/go/types/stdlib_test.go3
-rw-r--r--src/go/types/stmt.go49
-rw-r--r--src/go/types/struct.go202
-rw-r--r--src/go/types/subst.go290
-rw-r--r--src/go/types/testdata/check/builtins.go2207
-rw-r--r--src/go/types/testdata/check/builtins.src2
-rw-r--r--src/go/types/testdata/check/const0.src2
-rw-r--r--src/go/types/testdata/check/cycles4.src15
-rw-r--r--src/go/types/testdata/check/decls0.src10
-rw-r--r--src/go/types/testdata/check/decls1.src2
-rw-r--r--src/go/types/testdata/check/expr1.src4
-rw-r--r--src/go/types/testdata/check/expr2.src2
-rw-r--r--src/go/types/testdata/check/expr3.src2
-rw-r--r--src/go/types/testdata/check/issues.go244
-rw-r--r--src/go/types/testdata/check/issues.src2
-rw-r--r--src/go/types/testdata/check/linalg.go216
-rw-r--r--src/go/types/testdata/check/stmt0.src12
-rw-r--r--src/go/types/testdata/check/tinference.go258
-rw-r--r--src/go/types/testdata/check/tmp.go217
-rw-r--r--src/go/types/testdata/check/typeinst.go26
-rw-r--r--src/go/types/testdata/check/typeinst2.go229
-rw-r--r--src/go/types/testdata/check/typeparams.go2101
-rw-r--r--src/go/types/testdata/examples/constraints.go260
-rw-r--r--src/go/types/testdata/examples/functions.go22
-rw-r--r--src/go/types/testdata/examples/inference.go26
-rw-r--r--src/go/types/testdata/examples/methods.go217
-rw-r--r--src/go/types/testdata/examples/operations.go229
-rw-r--r--src/go/types/testdata/examples/types.go256
-rw-r--r--src/go/types/testdata/fixedbugs/issue28251.src2
-rw-r--r--src/go/types/testdata/fixedbugs/issue39634.go211
-rw-r--r--src/go/types/testdata/fixedbugs/issue39680.go28
-rw-r--r--src/go/types/testdata/fixedbugs/issue39693.go217
-rw-r--r--src/go/types/testdata/fixedbugs/issue39699.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue39711.go24
-rw-r--r--src/go/types/testdata/fixedbugs/issue39723.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue39755.go24
-rw-r--r--src/go/types/testdata/fixedbugs/issue39938.go24
-rw-r--r--src/go/types/testdata/fixedbugs/issue39948.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue40301.go24
-rw-r--r--src/go/types/testdata/fixedbugs/issue40789.go237
-rw-r--r--src/go/types/testdata/fixedbugs/issue41124.go24
-rw-r--r--src/go/types/testdata/fixedbugs/issue42758.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue43671.go258
-rw-r--r--src/go/types/testdata/fixedbugs/issue45548.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue45635.go27
-rw-r--r--src/go/types/testdata/fixedbugs/issue45639.go212
-rw-r--r--src/go/types/testdata/fixedbugs/issue45985.go22
-rw-r--r--src/go/types/testdata/fixedbugs/issue46090.go29
-rw-r--r--src/go/types/testdata/fixedbugs/issue46275.go227
-rw-r--r--src/go/types/testdata/fixedbugs/issue47031.go220
-rw-r--r--src/go/types/testdata/fixedbugs/issue47115.go240
-rw-r--r--src/go/types/testdata/fixedbugs/issue47127.go237
-rw-r--r--src/go/types/testdata/fixedbugs/issue47411.go226
-rw-r--r--src/go/types/testdata/manual.go29
-rw-r--r--src/go/types/tuple.go36
-rw-r--r--src/go/types/type.go922
-rw-r--r--src/go/types/typeparam.go136
-rw-r--r--src/go/types/types_test.go15
-rw-r--r--src/go/types/typeset.go298
-rw-r--r--src/go/types/typestring.go110
-rw-r--r--src/go/types/typestring_test.go6
-rw-r--r--src/go/types/typeterm.go166
-rw-r--r--src/go/types/typeterm_test.go205
-rw-r--r--src/go/types/typexpr.go793
-rw-r--r--src/go/types/unify.go52
-rw-r--r--src/go/types/union.go271
-rw-r--r--src/go/types/universe.go82
-rw-r--r--src/internal/abi/abi.go45
-rw-r--r--src/internal/abi/abi_amd64.go3
-rw-r--r--src/internal/abi/abi_arm64.go20
-rw-r--r--src/internal/abi/abi_generic.go4
-rw-r--r--src/internal/buildcfg/exp.go26
-rw-r--r--src/internal/bytealg/compare_amd64.s32
-rw-r--r--src/internal/bytealg/compare_arm64.s113
-rw-r--r--src/internal/bytealg/equal_amd64.s51
-rw-r--r--src/internal/bytealg/equal_arm64.s104
-rw-r--r--src/internal/goarch/gengoarch.go59
-rw-r--r--src/internal/goarch/goarch.go (renamed from src/runtime/internal/sys/arch.go)16
-rw-r--r--src/internal/goarch/goarch_386.go (renamed from src/runtime/internal/sys/arch_386.go)2
-rw-r--r--src/internal/goarch/goarch_amd64.go (renamed from src/runtime/internal/sys/arch_amd64.go)2
-rw-r--r--src/internal/goarch/goarch_arm.go (renamed from src/runtime/internal/sys/arch_arm.go)2
-rw-r--r--src/internal/goarch/goarch_arm64.go (renamed from src/runtime/internal/sys/arch_arm64.go)2
-rw-r--r--src/internal/goarch/goarch_mips.go (renamed from src/runtime/internal/sys/arch_mips.go)2
-rw-r--r--src/internal/goarch/goarch_mips64.go (renamed from src/runtime/internal/sys/arch_mips64le.go)2
-rw-r--r--src/internal/goarch/goarch_mips64le.go (renamed from src/runtime/internal/sys/arch_mips64.go)2
-rw-r--r--src/internal/goarch/goarch_mipsle.go (renamed from src/runtime/internal/sys/arch_mipsle.go)2
-rw-r--r--src/internal/goarch/goarch_ppc64.go (renamed from src/runtime/internal/sys/arch_ppc64.go)2
-rw-r--r--src/internal/goarch/goarch_ppc64le.go (renamed from src/runtime/internal/sys/arch_ppc64le.go)2
-rw-r--r--src/internal/goarch/goarch_riscv64.go (renamed from src/runtime/internal/sys/arch_riscv64.go)2
-rw-r--r--src/internal/goarch/goarch_s390x.go (renamed from src/runtime/internal/sys/arch_s390x.go)2
-rw-r--r--src/internal/goarch/goarch_wasm.go (renamed from src/runtime/internal/sys/arch_wasm.go)2
-rw-r--r--src/internal/goarch/zgoarch_386.go33
-rw-r--r--src/internal/goarch/zgoarch_amd64.go33
-rw-r--r--src/internal/goarch/zgoarch_arm.go33
-rw-r--r--src/internal/goarch/zgoarch_arm64.go33
-rw-r--r--src/internal/goarch/zgoarch_arm64be.go33
-rw-r--r--src/internal/goarch/zgoarch_armbe.go33
-rw-r--r--src/internal/goarch/zgoarch_loong64.go33
-rw-r--r--src/internal/goarch/zgoarch_mips.go33
-rw-r--r--src/internal/goarch/zgoarch_mips64.go33
-rw-r--r--src/internal/goarch/zgoarch_mips64le.go33
-rw-r--r--src/internal/goarch/zgoarch_mips64p32.go33
-rw-r--r--src/internal/goarch/zgoarch_mips64p32le.go33
-rw-r--r--src/internal/goarch/zgoarch_mipsle.go33
-rw-r--r--src/internal/goarch/zgoarch_ppc.go33
-rw-r--r--src/internal/goarch/zgoarch_ppc64.go33
-rw-r--r--src/internal/goarch/zgoarch_ppc64le.go33
-rw-r--r--src/internal/goarch/zgoarch_riscv.go33
-rw-r--r--src/internal/goarch/zgoarch_riscv64.go33
-rw-r--r--src/internal/goarch/zgoarch_s390.go33
-rw-r--r--src/internal/goarch/zgoarch_s390x.go33
-rw-r--r--src/internal/goarch/zgoarch_sparc.go33
-rw-r--r--src/internal/goarch/zgoarch_sparc64.go33
-rw-r--r--src/internal/goarch/zgoarch_wasm.go33
-rw-r--r--src/internal/goexperiment/exp_regabi_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabi_on.go9
-rw-r--r--src/internal/goexperiment/exp_regabidefer_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabidefer_on.go9
-rw-r--r--src/internal/goexperiment/exp_regabig_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabig_on.go9
-rw-r--r--src/internal/goexperiment/exp_unified_off.go9
-rw-r--r--src/internal/goexperiment/exp_unified_on.go9
-rw-r--r--src/internal/goexperiment/flags.go19
-rw-r--r--src/internal/goos/gengoos.go (renamed from src/runtime/internal/sys/gengoos.go)43
-rw-r--r--src/internal/goos/goos.go12
-rw-r--r--src/internal/goos/zgoos_aix.go26
-rw-r--r--src/internal/goos/zgoos_android.go26
-rw-r--r--src/internal/goos/zgoos_darwin.go26
-rw-r--r--src/internal/goos/zgoos_dragonfly.go26
-rw-r--r--src/internal/goos/zgoos_freebsd.go26
-rw-r--r--src/internal/goos/zgoos_hurd.go26
-rw-r--r--src/internal/goos/zgoos_illumos.go26
-rw-r--r--src/internal/goos/zgoos_ios.go26
-rw-r--r--src/internal/goos/zgoos_js.go26
-rw-r--r--src/internal/goos/zgoos_linux.go26
-rw-r--r--src/internal/goos/zgoos_netbsd.go26
-rw-r--r--src/internal/goos/zgoos_openbsd.go26
-rw-r--r--src/internal/goos/zgoos_plan9.go26
-rw-r--r--src/internal/goos/zgoos_solaris.go26
-rw-r--r--src/internal/goos/zgoos_windows.go26
-rw-r--r--src/internal/goos/zgoos_zos.go26
-rw-r--r--src/internal/reflectlite/swapper.go3
-rw-r--r--src/internal/reflectlite/value.go5
-rw-r--r--src/reflect/abi.go23
-rw-r--r--src/reflect/abi_test.go4
-rw-r--r--src/reflect/all_test.go75
-rw-r--r--src/reflect/asm_amd64.s16
-rw-r--r--src/reflect/asm_arm64.s61
-rw-r--r--src/reflect/export_test.go5
-rw-r--r--src/reflect/makefunc.go12
-rw-r--r--src/reflect/swapper.go3
-rw-r--r--src/reflect/type.go47
-rw-r--r--src/reflect/value.go20
-rw-r--r--src/runtime/alg.go14
-rw-r--r--src/runtime/asm.s2
-rw-r--r--src/runtime/asm_386.s22
-rw-r--r--src/runtime/asm_amd64.s245
-rw-r--r--src/runtime/asm_arm.s23
-rw-r--r--src/runtime/asm_arm64.s428
-rw-r--r--src/runtime/asm_mips64x.s23
-rw-r--r--src/runtime/asm_mipsx.s23
-rw-r--r--src/runtime/asm_ppc64x.s31
-rw-r--r--src/runtime/asm_riscv64.s22
-rw-r--r--src/runtime/asm_s390x.s22
-rw-r--r--src/runtime/asm_wasm.s32
-rw-r--r--src/runtime/cgocall.go5
-rw-r--r--src/runtime/cgocheck.go14
-rw-r--r--src/runtime/chan.go5
-rw-r--r--src/runtime/cpuprof.go9
-rw-r--r--src/runtime/debugcall.go2
-rw-r--r--src/runtime/defs_plan9_386.go2
-rw-r--r--src/runtime/defs_plan9_amd64.go2
-rw-r--r--src/runtime/duff_arm64.s4
-rw-r--r--src/runtime/export_debug_test.go10
-rw-r--r--src/runtime/export_test.go30
-rw-r--r--src/runtime/extern.go9
-rw-r--r--src/runtime/gc_test.go1
-rw-r--r--src/runtime/heapdump.go23
-rw-r--r--src/runtime/iface.go108
-rw-r--r--src/runtime/internal/atomic/atomic_test.go4
-rw-r--r--src/runtime/internal/math/math.go4
-rw-r--r--src/runtime/internal/sys/consts.go34
-rw-r--r--src/runtime/internal/sys/sys.go8
-rw-r--r--src/runtime/internal/sys/zgoarch_386.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_amd64.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_arm.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_arm64.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_arm64be.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_armbe.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_loong64.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_mips.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_mips64.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_mips64le.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_mips64p32.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_mips64p32le.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_mipsle.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_ppc.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_ppc64.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_ppc64le.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_riscv.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_riscv64.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_s390.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_s390x.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_sparc.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_sparc64.go33
-rw-r--r--src/runtime/internal/sys/zgoarch_wasm.go33
-rw-r--r--src/runtime/internal/sys/zgoos_aix.go26
-rw-r--r--src/runtime/internal/sys/zgoos_android.go26
-rw-r--r--src/runtime/internal/sys/zgoos_darwin.go26
-rw-r--r--src/runtime/internal/sys/zgoos_dragonfly.go26
-rw-r--r--src/runtime/internal/sys/zgoos_freebsd.go26
-rw-r--r--src/runtime/internal/sys/zgoos_hurd.go26
-rw-r--r--src/runtime/internal/sys/zgoos_illumos.go26
-rw-r--r--src/runtime/internal/sys/zgoos_ios.go26
-rw-r--r--src/runtime/internal/sys/zgoos_js.go26
-rw-r--r--src/runtime/internal/sys/zgoos_linux.go26
-rw-r--r--src/runtime/internal/sys/zgoos_netbsd.go26
-rw-r--r--src/runtime/internal/sys/zgoos_openbsd.go26
-rw-r--r--src/runtime/internal/sys/zgoos_plan9.go26
-rw-r--r--src/runtime/internal/sys/zgoos_solaris.go26
-rw-r--r--src/runtime/internal/sys/zgoos_windows.go26
-rw-r--r--src/runtime/internal/sys/zgoos_zos.go26
-rw-r--r--src/runtime/malloc.go43
-rw-r--r--src/runtime/map.go37
-rw-r--r--src/runtime/map_fast32.go17
-rw-r--r--src/runtime/map_fast64.go17
-rw-r--r--src/runtime/map_faststr.go65
-rw-r--r--src/runtime/map_test.go4
-rw-r--r--src/runtime/mbarrier.go16
-rw-r--r--src/runtime/mbitmap.go147
-rw-r--r--src/runtime/mcheckmark.go4
-rw-r--r--src/runtime/memclr_amd64.s8
-rw-r--r--src/runtime/memclr_arm64.s4
-rw-r--r--src/runtime/memmove_amd64.s10
-rw-r--r--src/runtime/memmove_arm64.s4
-rw-r--r--src/runtime/mfinal.go18
-rw-r--r--src/runtime/mgc.go33
-rw-r--r--src/runtime/mgcmark.go53
-rw-r--r--src/runtime/mgcscavenge.go7
-rw-r--r--src/runtime/mgcstack.go4
-rw-r--r--src/runtime/mgcsweep.go4
-rw-r--r--src/runtime/mgcwork.go4
-rw-r--r--src/runtime/mheap.go8
-rw-r--r--src/runtime/mkduff.go4
-rw-r--r--src/runtime/mkpreempt.go3
-rw-r--r--src/runtime/mprof.go5
-rw-r--r--src/runtime/mranges.go8
-rw-r--r--src/runtime/mspanset.go14
-rw-r--r--src/runtime/mstats.go4
-rw-r--r--src/runtime/mwbbuf.go4
-rw-r--r--src/runtime/norace_linux_test.go3
-rw-r--r--src/runtime/os3_plan9.go9
-rw-r--r--src/runtime/os3_solaris.go14
-rw-r--r--src/runtime/os_aix.go3
-rw-r--r--src/runtime/os_darwin.go2
-rw-r--r--src/runtime/os_dragonfly.go13
-rw-r--r--src/runtime/os_freebsd.go15
-rw-r--r--src/runtime/os_freebsd2.go6
-rw-r--r--src/runtime/os_freebsd_amd64.go8
-rw-r--r--src/runtime/os_linux.go17
-rw-r--r--src/runtime/os_netbsd.go11
-rw-r--r--src/runtime/os_netbsd_386.go7
-rw-r--r--src/runtime/os_netbsd_amd64.go7
-rw-r--r--src/runtime/os_netbsd_arm.go7
-rw-r--r--src/runtime/os_netbsd_arm64.go7
-rw-r--r--src/runtime/os_openbsd.go5
-rw-r--r--src/runtime/os_openbsd_libc.go3
-rw-r--r--src/runtime/os_openbsd_syscall.go7
-rw-r--r--src/runtime/os_plan9.go3
-rw-r--r--src/runtime/os_solaris.go3
-rw-r--r--src/runtime/os_windows.go11
-rw-r--r--src/runtime/panic.go439
-rw-r--r--src/runtime/pprof/pprof.go3
-rw-r--r--src/runtime/pprof/pprof_test.go3
-rw-r--r--src/runtime/pprof/proto.go8
-rw-r--r--src/runtime/pprof/proto_test.go9
-rw-r--r--src/runtime/preempt.go9
-rw-r--r--src/runtime/preempt_386.s3
-rw-r--r--src/runtime/preempt_amd64.s3
-rw-r--r--src/runtime/preempt_arm.s3
-rw-r--r--src/runtime/preempt_arm64.s3
-rw-r--r--src/runtime/preempt_mips64x.s3
-rw-r--r--src/runtime/preempt_mipsx.s3
-rw-r--r--src/runtime/preempt_ppc64x.s3
-rw-r--r--src/runtime/preempt_riscv64.s3
-rw-r--r--src/runtime/preempt_s390x.s3
-rw-r--r--src/runtime/preempt_wasm.s3
-rw-r--r--src/runtime/print.go4
-rw-r--r--src/runtime/proc.go128
-rw-r--r--src/runtime/race.go3
-rw-r--r--src/runtime/race/output_test.go6
-rw-r--r--src/runtime/race_amd64.s41
-rw-r--r--src/runtime/race_arm64.s34
-rw-r--r--src/runtime/runtime1.go8
-rw-r--r--src/runtime/runtime2.go29
-rw-r--r--src/runtime/select.go5
-rw-r--r--src/runtime/signal_386.go9
-rw-r--r--src/runtime/signal_aix_ppc64.go4
-rw-r--r--src/runtime/signal_amd64.go9
-rw-r--r--src/runtime/signal_arm.go7
-rw-r--r--src/runtime/signal_arm64.go3
-rw-r--r--src/runtime/signal_linux_386.go4
-rw-r--r--src/runtime/signal_linux_amd64.go4
-rw-r--r--src/runtime/signal_linux_arm.go4
-rw-r--r--src/runtime/signal_linux_arm64.go4
-rw-r--r--src/runtime/signal_linux_mips64x.go4
-rw-r--r--src/runtime/signal_linux_ppc64x.go4
-rw-r--r--src/runtime/signal_linux_riscv64.go4
-rw-r--r--src/runtime/signal_linux_s390x.go6
-rw-r--r--src/runtime/signal_mips64x.go7
-rw-r--r--src/runtime/signal_mipsx.go3
-rw-r--r--src/runtime/signal_ppc64x.go5
-rw-r--r--src/runtime/signal_riscv64.go9
-rw-r--r--src/runtime/signal_unix.go11
-rw-r--r--src/runtime/signal_windows.go13
-rw-r--r--src/runtime/slice.go22
-rw-r--r--src/runtime/softfloat64.go36
-rw-r--r--src/runtime/stack.go43
-rw-r--r--src/runtime/string.go11
-rw-r--r--src/runtime/stubs.go12
-rw-r--r--src/runtime/stubs_arm64.go7
-rw-r--r--src/runtime/symtab.go12
-rw-r--r--src/runtime/sys_darwin_arm64.go9
-rw-r--r--src/runtime/sys_linux_amd64.s23
-rw-r--r--src/runtime/sys_openbsd.go15
-rw-r--r--src/runtime/sys_openbsd1.go13
-rw-r--r--src/runtime/sys_openbsd2.go53
-rw-r--r--src/runtime/sys_openbsd3.go25
-rw-r--r--src/runtime/sys_openbsd_amd64.s2
-rw-r--r--src/runtime/sys_plan9_386.s4
-rw-r--r--src/runtime/sys_plan9_amd64.s4
-rw-r--r--src/runtime/sys_wasm.go3
-rw-r--r--src/runtime/sys_windows_386.s14
-rw-r--r--src/runtime/sys_windows_amd64.s12
-rw-r--r--src/runtime/sys_windows_arm.s12
-rw-r--r--src/runtime/sys_windows_arm64.s18
-rw-r--r--src/runtime/sys_x86.go4
-rw-r--r--src/runtime/syscall_solaris.go10
-rw-r--r--src/runtime/syscall_windows.go22
-rw-r--r--src/runtime/time.go3
-rw-r--r--src/runtime/time_linux_amd64.s10
-rw-r--r--src/runtime/trace.go9
-rw-r--r--src/runtime/traceback.go58
-rw-r--r--src/runtime/type.go7
-rw-r--r--src/runtime/wincallback.go6
-rw-r--r--src/runtime/zcallback_windows.s2
-rw-r--r--src/runtime/zcallback_windows_arm.s2
-rw-r--r--src/runtime/zcallback_windows_arm64.s2
-rw-r--r--test/chan/perm.go2
-rw-r--r--test/codegen/arithmetic.go4
-rw-r--r--test/codegen/clobberdead.go7
-rw-r--r--test/complit1.go14
-rw-r--r--test/ddd1.go10
-rw-r--r--test/devirt.go3
-rw-r--r--test/escape2.go56
-rw-r--r--test/escape2n.go56
-rw-r--r--test/escape5.go15
-rw-r--r--test/escape_array.go16
-rw-r--r--test/escape_calls.go2
-rw-r--r--test/escape_closure.go8
-rw-r--r--test/escape_goto.go6
-rw-r--r--test/escape_param.go10
-rw-r--r--test/escape_runtime_atomic.go4
-rw-r--r--test/escape_slice.go4
-rw-r--r--test/escape_struct_return.go4
-rw-r--r--test/escape_unsafe.go10
-rw-r--r--test/fixedbugs/bug195.go2
-rw-r--r--test/fixedbugs/bug248.dir/bug2.go4
-rw-r--r--test/fixedbugs/bug267.go2
-rw-r--r--test/fixedbugs/bug345.dir/main.go4
-rw-r--r--test/fixedbugs/bug460.dir/b.go10
-rw-r--r--test/fixedbugs/issue10975.go2
-rw-r--r--test/fixedbugs/issue11614.go2
-rw-r--r--test/fixedbugs/issue12006.go6
-rw-r--r--test/fixedbugs/issue12588.go6
-rw-r--r--test/fixedbugs/issue14652.go2
-rw-r--r--test/fixedbugs/issue14999.go4
-rw-r--r--test/fixedbugs/issue20250.go2
-rw-r--r--test/fixedbugs/issue22076.go4
-rw-r--r--test/fixedbugs/issue24651a.go2
-rw-r--r--test/fixedbugs/issue24651b.go4
-rw-r--r--test/fixedbugs/issue26163.go2
-rw-r--r--test/fixedbugs/issue27557.go6
-rw-r--r--test/fixedbugs/issue28688.go2
-rw-r--r--test/fixedbugs/issue30862.dir/a/a.go (renamed from test/fixedbugs/issue30862.dir/a.go)0
-rw-r--r--test/fixedbugs/issue30862.dir/b/b.go (renamed from test/fixedbugs/issue30862.dir/b.go)2
-rw-r--r--test/fixedbugs/issue30862.dir/main.go2
-rw-r--r--test/fixedbugs/issue30862.go4
-rw-r--r--test/fixedbugs/issue30898.go2
-rw-r--r--test/fixedbugs/issue31573.go30
-rw-r--r--test/fixedbugs/issue41500.go8
-rw-r--r--test/fixedbugs/issue42284.dir/a.go2
-rw-r--r--test/fixedbugs/issue43762.go6
-rw-r--r--test/fixedbugs/issue44432.go4
-rw-r--r--test/fixedbugs/issue45258.go2
-rw-r--r--test/fixedbugs/issue46556.go16
-rw-r--r--test/fixedbugs/issue46749.go10
-rw-r--r--test/fixedbugs/issue46903.go32
-rw-r--r--test/fixedbugs/issue47227.go23
-rw-r--r--test/fixedbugs/issue4909b.go2
-rw-r--r--test/fixedbugs/issue8042.go6
-rw-r--r--test/fixedbugs/issue8761.go6
-rw-r--r--test/inline.go10
-rw-r--r--test/inline_big.go2
-rw-r--r--test/inline_variadic.go2
-rw-r--r--test/live.go16
-rw-r--r--test/live_regabi.go10
-rw-r--r--test/reflectmethod8.go26
-rw-r--r--test/run.go581
-rw-r--r--test/typeparam/absdiff.go25
-rw-r--r--test/typeparam/absdiffimp.dir/a.go75
-rw-r--r--test/typeparam/absdiffimp.dir/main.go29
-rw-r--r--test/typeparam/absdiffimp.go7
-rw-r--r--test/typeparam/adder.go10
-rw-r--r--test/typeparam/aliasimp.dir/a.go9
-rw-r--r--test/typeparam/aliasimp.dir/main.go38
-rw-r--r--test/typeparam/aliasimp.go7
-rw-r--r--test/typeparam/boundmethod.go106
-rw-r--r--test/typeparam/builtins.go111
-rw-r--r--test/typeparam/chans.go2
-rw-r--r--test/typeparam/chansimp.dir/a.go232
-rw-r--r--test/typeparam/chansimp.dir/main.go189
-rw-r--r--test/typeparam/chansimp.go7
-rw-r--r--test/typeparam/combine.go54
-rw-r--r--test/typeparam/cons.go48
-rw-r--r--test/typeparam/dedup.dir/a.go10
-rw-r--r--test/typeparam/dedup.dir/b.go14
-rw-r--r--test/typeparam/dedup.dir/c.go14
-rw-r--r--test/typeparam/dedup.dir/main.go15
-rw-r--r--test/typeparam/dedup.go12
-rw-r--r--test/typeparam/dedup.out4
-rw-r--r--test/typeparam/dictionaryCapture-noinline.go126
-rw-r--r--test/typeparam/dictionaryCapture.go203
-rw-r--r--test/typeparam/dottype.go86
-rw-r--r--test/typeparam/dottype.out10
-rw-r--r--test/typeparam/double.go12
-rw-r--r--test/typeparam/equal.go69
-rw-r--r--test/typeparam/fact.go8
-rw-r--r--test/typeparam/factimp.dir/a.go12
-rw-r--r--test/typeparam/factimp.dir/main.go26
-rw-r--r--test/typeparam/factimp.go7
-rw-r--r--test/typeparam/graph.go1
-rw-r--r--test/typeparam/ifaceconv.go83
-rw-r--r--test/typeparam/index.go43
-rw-r--r--test/typeparam/interfacearg.go12
-rw-r--r--test/typeparam/issue39755.go27
-rw-r--r--test/typeparam/issue44688.go148
-rw-r--r--test/typeparam/issue45547.go2
-rw-r--r--test/typeparam/issue45817.go26
-rw-r--r--test/typeparam/issue46472.go20
-rw-r--r--test/typeparam/issue47258.go32
-rw-r--r--test/typeparam/issue47514.go20
-rw-r--r--test/typeparam/issue47514b.go19
-rw-r--r--test/typeparam/list.go26
-rw-r--r--test/typeparam/list2.go13
-rw-r--r--test/typeparam/listimp.dir/a.go53
-rw-r--r--test/typeparam/listimp.dir/main.go52
-rw-r--r--test/typeparam/listimp.go7
-rw-r--r--test/typeparam/listimp2.dir/a.go298
-rw-r--r--test/typeparam/listimp2.dir/main.go315
-rw-r--r--test/typeparam/listimp2.go7
-rw-r--r--test/typeparam/lockable.go22
-rw-r--r--test/typeparam/mapimp.dir/a.go15
-rw-r--r--test/typeparam/mapimp.dir/main.go28
-rw-r--r--test/typeparam/mapimp.go7
-rw-r--r--test/typeparam/mapsimp.dir/a.go108
-rw-r--r--test/typeparam/mapsimp.dir/main.go156
-rw-r--r--test/typeparam/mapsimp.go7
-rw-r--r--test/typeparam/mdempsky/1.dir/a.go9
-rw-r--r--test/typeparam/mdempsky/1.dir/b.go9
-rw-r--r--test/typeparam/mdempsky/1.go7
-rw-r--r--test/typeparam/mdempsky/10.dir/a.go7
-rw-r--r--test/typeparam/mdempsky/10.dir/b.go17
-rw-r--r--test/typeparam/mdempsky/10.go7
-rw-r--r--test/typeparam/mdempsky/11.go16
-rw-r--r--test/typeparam/mdempsky/12.dir/a.go11
-rw-r--r--test/typeparam/mdempsky/12.dir/main.go13
-rw-r--r--test/typeparam/mdempsky/12.go9
-rw-r--r--test/typeparam/mdempsky/13.go84
-rw-r--r--test/typeparam/mdempsky/14.go40
-rw-r--r--test/typeparam/mdempsky/15.go69
-rw-r--r--test/typeparam/mdempsky/2.go20
-rw-r--r--test/typeparam/mdempsky/3.dir/a.go7
-rw-r--r--test/typeparam/mdempsky/3.dir/b.go9
-rw-r--r--test/typeparam/mdempsky/3.go7
-rw-r--r--test/typeparam/mdempsky/4.dir/a.go12
-rw-r--r--test/typeparam/mdempsky/4.dir/b.go9
-rw-r--r--test/typeparam/mdempsky/4.go7
-rw-r--r--test/typeparam/mdempsky/5.go15
-rw-r--r--test/typeparam/mdempsky/6.go11
-rw-r--r--test/typeparam/mdempsky/7.dir/a.go9
-rw-r--r--test/typeparam/mdempsky/7.dir/b.go9
-rw-r--r--test/typeparam/mdempsky/7.go7
-rw-r--r--test/typeparam/mdempsky/8.dir/a.go7
-rw-r--r--test/typeparam/mdempsky/8.dir/b.go (renamed from src/cmd/gofmt/gofmt_typeparams_test.go)7
-rw-r--r--test/typeparam/mdempsky/8.go7
-rw-r--r--test/typeparam/mdempsky/9.go11
-rw-r--r--test/typeparam/min.go11
-rw-r--r--test/typeparam/mincheck.dir/a.go16
-rw-r--r--test/typeparam/mincheck.dir/main.go38
-rw-r--r--test/typeparam/mincheck.go7
-rw-r--r--test/typeparam/minimp.dir/a.go16
-rw-r--r--test/typeparam/minimp.dir/main.go38
-rw-r--r--test/typeparam/minimp.go7
-rw-r--r--test/typeparam/mutualimp.dir/a.go12
-rw-r--r--test/typeparam/mutualimp.dir/b.go12
-rw-r--r--test/typeparam/mutualimp.go7
-rw-r--r--test/typeparam/nested.go134
-rw-r--r--test/typeparam/nested.out4
-rw-r--r--test/typeparam/ordered.go14
-rw-r--r--test/typeparam/orderedmap.go10
-rw-r--r--test/typeparam/orderedmapsimp.dir/a.go226
-rw-r--r--test/typeparam/orderedmapsimp.dir/main.go64
-rw-r--r--test/typeparam/orderedmapsimp.go7
-rw-r--r--test/typeparam/pair.go6
-rw-r--r--test/typeparam/pairimp.dir/a.go10
-rw-r--r--test/typeparam/pairimp.dir/main.go30
-rw-r--r--test/typeparam/pairimp.go7
-rw-r--r--test/typeparam/sets.go2
-rw-r--r--test/typeparam/setsimp.dir/a.go128
-rw-r--r--test/typeparam/setsimp.dir/main.go156
-rw-r--r--test/typeparam/setsimp.go7
-rw-r--r--test/typeparam/settable.go11
-rw-r--r--test/typeparam/shape1.go50
-rw-r--r--test/typeparam/shape1.out4
-rw-r--r--test/typeparam/sliceimp.dir/a.go141
-rw-r--r--test/typeparam/sliceimp.dir/main.go179
-rw-r--r--test/typeparam/sliceimp.go7
-rw-r--r--test/typeparam/slices.go90
-rw-r--r--test/typeparam/smallest.go14
-rw-r--r--test/typeparam/smoketest.go12
-rw-r--r--test/typeparam/stringable.go10
-rw-r--r--test/typeparam/stringerimp.dir/a.go16
-rw-r--r--test/typeparam/stringerimp.dir/main.go38
-rw-r--r--test/typeparam/stringerimp.go7
-rw-r--r--test/typeparam/struct.go40
-rw-r--r--test/typeparam/subdict.go41
-rw-r--r--test/typeparam/sum.go16
-rw-r--r--test/typeparam/tparam1.go32
-rw-r--r--test/typeparam/typelist.go50
-rw-r--r--test/typeparam/typeswitch1.go29
-rw-r--r--test/typeparam/typeswitch1.out5
-rw-r--r--test/typeparam/typeswitch2.go31
-rw-r--r--test/typeparam/typeswitch2.out5
-rw-r--r--test/typeparam/typeswitch3.go35
-rw-r--r--test/typeparam/typeswitch3.out3
-rw-r--r--test/typeparam/typeswitch4.go33
-rw-r--r--test/typeparam/typeswitch4.out3
-rw-r--r--test/typeparam/typeswitch5.go28
-rw-r--r--test/typeparam/typeswitch5.out4
-rw-r--r--test/typeparam/typeswitch6.go30
-rw-r--r--test/typeparam/typeswitch6.out5
-rw-r--r--test/typeparam/typeswitch7.go37
-rw-r--r--test/typeparam/typeswitch7.out3
-rw-r--r--test/typeparam/valimp.dir/a.go32
-rw-r--r--test/typeparam/valimp.dir/main.go55
-rw-r--r--test/typeparam/valimp.go7
-rw-r--r--test/typeparam/value.go6
-rw-r--r--test/uintptrescapes2.go2
925 files changed, 37184 insertions, 17330 deletions
diff --git a/codereview.cfg b/codereview.cfg
index 77a74f108e..1f58fdbeb2 100644
--- a/codereview.cfg
+++ b/codereview.cfg
@@ -1 +1,2 @@
-branch: master
+branch: dev.typeparams
+parent-branch: master
diff --git a/misc/cgo/errors/testdata/err2.go b/misc/cgo/errors/testdata/err2.go
index a90598fe35..aa941584c3 100644
--- a/misc/cgo/errors/testdata/err2.go
+++ b/misc/cgo/errors/testdata/err2.go
@@ -91,10 +91,18 @@ func main() {
// issue 26745
_ = func(i int) int {
- return C.i + 1 // ERROR HERE: 14
+ // typecheck reports at column 14 ('+'), but types2 reports at
+ // column 10 ('C').
+ // TODO(mdempsky): Investigate why, and see if types2 can be
+ // updated to match typecheck behavior.
+ return C.i + 1 // ERROR HERE: \b(10|14)\b
}
_ = func(i int) {
- C.fi(i) // ERROR HERE: 7
+ // typecheck reports at column 7 ('('), but types2 reports at
+ // column 8 ('i'). The types2 position is more correct, but
+ // updating typecheck here is fundamentally challenging because of
+ // IR limitations.
+ C.fi(i) // ERROR HERE: \b(7|8)\b
}
C.fi = C.fi // ERROR HERE
diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md
index 2bb4055083..3619aea4aa 100644
--- a/src/cmd/compile/abi-internal.md
+++ b/src/cmd/compile/abi-internal.md
@@ -505,6 +505,128 @@ control bits specified by the ELF AMD64 ABI.
The x87 floating-point control word is not used by Go on amd64.
+### arm64 architecture
+
+The arm64 architecture uses R0 – R15 for integer arguments and results.
+
+It uses F0 – F15 for floating-point arguments and results.
+
+*Rationale*: 16 integer registers and 16 floating-point registers are
+more than enough for passing arguments and results for practically all
+functions (see Appendix). While there are more registers available,
+using more registers provides little benefit. Additionally, it will add
+overhead on code paths where the number of arguments are not statically
+known (e.g. reflect call), and will consume more stack space when there
+is only limited stack space available to fit in the nosplit limit.
+
+Registers R16 and R17 are permanent scratch registers. They are also
+used as scratch registers by the linker (Go linker and external
+linker) in trampolines.
+
+Register R18 is reserved and never used. It is reserved for the OS
+on some platforms (e.g. macOS).
+
+Registers R19 – R25 are permanent scratch registers. In addition,
+R27 is a permanent scratch register used by the assembler when
+expanding instructions.
+
+Floating-point registers F16 – F31 are also permanent scratch
+registers.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| RSP | Stack pointer | Same | Same |
+| R30 | Link register | Same | Scratch (non-leaf functions) |
+| R29 | Frame pointer | Same | Same |
+| R28 | Current goroutine | Same | Same |
+| R27 | Scratch | Scratch | Scratch |
+| R26 | Closure context pointer | Scratch | Scratch |
+| R18 | Reserved (not used) | Same | Same |
+| ZR | Zero value | Same | Same |
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention.
+
+*Rationale*: The link register, R30, holds the function return
+address at the function entry. For functions that have frames
+(including most non-leaf functions), R30 is saved to stack in the
+function prologue and restored in the epilogue. Within the function
+body, R30 can be used as a scratch register.
+
+*Implementation note*: Registers with fixed meaning at calls but not
+in function bodies must be initialized by "injected" calls such as
+signal-based panics.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 16 bytes.
+
+*Rationale*: The arm64 architecture requires the stack pointer to be
+16-byte aligned.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | return PC | ← RSP points to
+ | frame pointer on entry |
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, R30, as part of the
+arm64 `CALL` operation.
+
+On entry, a function subtracts from RSP to open its stack frame, and
+saves the values of R30 and R29 at the bottom of the frame.
+Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
+after RSP is updated.
+
+A leaf function that does not require any stack space may omit the
+saved R30 and R29.
+
+The Go ABI's use of R29 as a frame pointer register is compatible with
+arm64 architecture requirement so that Go can inter-operate with platform
+debuggers and profilers.
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+#### Flags
+
+The arithmetic status flags (NZCV) are treated like scratch registers
+and not preserved across calls.
+All other bits in PSTATE are system flags and are not modified by Go.
+
+The floating-point status register (FPSR) is treated like scratch
+registers and not preserved across calls.
+
+At calls, the floating-point control register (FPCR) bits are always
+set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| DN | 25 | 0 | Propagate NaN operands |
+| FZ | 24 | 0 | Do not flush to zero |
+| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
+| IDE | 15 | 0 | Denormal operations trap disabled |
+| IXE | 12 | 0 | Inexact trap disabled |
+| UFE | 11 | 0 | Underflow trap disabled |
+| OFE | 10 | 0 | Overflow trap disabled |
+| DZE | 9 | 0 | Divide-by-zero trap disabled |
+| IOE | 8 | 0 | Invalid operations trap disabled |
+| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
+| AH | 1 | 0 | No alternate handling of de-normal inputs |
+| FIZ | 0 | 0 | Do not zero de-normals |
+
+*Rationale*: Having a fixed FPCR control configuration allows Go
+functions to use floating-point and vector (SIMD) operations without
+modifying or saving the FPCR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+
## Future directions
### Spill path improvements
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index 2785aa0336..ca44263afc 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -18,11 +18,10 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
- arch.LoadRegResults = loadRegResults
+ arch.LoadRegResult = loadRegResult
arch.SpillArgReg = spillArgReg
}
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 1484ad5404..b8dce81a92 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -57,7 +57,6 @@ func dzDI(b int64) int64 {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
r13 = 1 << iota // if R13 is already zeroed.
- x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero.
)
if cnt == 0 {
@@ -85,11 +84,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
}
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
- if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
- p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
- *state |= x15
- }
-
for i := int64(0); i < cnt/16; i++ {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
@@ -98,10 +92,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
- if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
- p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
- *state |= x15
- }
// Save DI to r12. With the amd64 Go register abi, DI can contain
// an incoming parameter, whereas R12 is always scratch.
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index ca5f36e775..30dba057d0 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -823,7 +823,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, sc.Off64())
case ssa.OpAMD64MOVOstorezero:
- if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
+ if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@@ -914,7 +914,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64DUFFZERO:
- if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
+ if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@@ -997,22 +997,26 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Closure pointer is DX.
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal {
+ if s.ABI == obj.ABIInternal {
v.Fatalf("LoweredGetG should not appear in ABIInternal")
}
r := v.Reg()
getgFromTLS(s, r)
case ssa.OpAMD64CALLstatic:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
+ if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
s.Call(v)
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
+ if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@@ -1304,9 +1308,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
+ if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@@ -1348,20 +1354,15 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
}
}
-func loadRegResults(s *ssagen.State, f *ssa.Func) {
- for _, o := range f.OwnAux.ABIInfo().OutParams() {
- n := o.Name.(*ir.Name)
- rts, offs := o.RegisterTypesAndOffsets()
- for i := range o.Registers {
- p := s.Prog(loadByType(rts[i]))
- p.From.Type = obj.TYPE_MEM
- p.From.Name = obj.NAME_AUTO
- p.From.Sym = n.Linksym()
- p.From.Offset = n.FrameOffset() + offs[i]
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ssa.ObjRegForAbiReg(o.Registers[i], f.Config)
- }
- }
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
}
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
index d68500280d..23e52bacbf 100644
--- a/src/cmd/compile/internal/arm/galign.go
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -18,7 +18,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOARM == 5
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index d3db37e16f..3ebd860de8 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -18,9 +18,10 @@ func Init(arch *ssagen.ArchInfo) {
arch.PadFrame = padframe
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 0c997bc4b3..c3319f9491 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -10,6 +10,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
@@ -161,6 +162,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@@ -1101,8 +1114,34 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
case ssa.OpARM64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
- case ssa.OpClobber, ssa.OpClobberReg:
- // TODO: implement for clobberdead experiment. Nop is ok for now.
+ case ssa.OpClobber:
+ // MOVW $0xdeaddead, REGTMP
+ // MOVW REGTMP, (slot)
+ // MOVW REGTMP, 4(slot)
+ p := s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux2(&p.To, v, v.AuxInt+4)
+ case ssa.OpClobberReg:
+ x := uint64(0xdeaddeaddeaddead)
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(x)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
@@ -1266,3 +1305,22 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
b.Fatalf("branch not implemented: %s", b.LongString())
}
}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/base/bootstrap_false.go b/src/cmd/compile/internal/base/bootstrap_false.go
new file mode 100644
index 0000000000..de86644527
--- /dev/null
+++ b/src/cmd/compile/internal/base/bootstrap_false.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = false
diff --git a/src/cmd/compile/internal/base/bootstrap_true.go b/src/cmd/compile/internal/base/bootstrap_true.go
new file mode 100644
index 0000000000..81a17e1f6e
--- /dev/null
+++ b/src/cmd/compile/internal/base/bootstrap_true.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = true
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
index 71712ab1a5..e2245e1c26 100644
--- a/src/cmd/compile/internal/base/debug.go
+++ b/src/cmd/compile/internal/base/debug.go
@@ -44,8 +44,11 @@ type DebugFlags struct {
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
+ SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
+ Unified int `help:"enable unified IR construction"`
+ UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index 42c0c1b94b..b8b205f412 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -159,7 +159,11 @@ func ParseFlags() {
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
+
Debug.InlFuncsWithClosures = 1
+ if buildcfg.Experiment.Unified {
+ Debug.Unified = 1
+ }
Debug.Checkptr = -1 // so we can tell whether it is set explicitly
diff --git a/src/cmd/compile/internal/typecheck/mapfile_mmap.go b/src/cmd/compile/internal/base/mapfile_mmap.go
index 298b385bcb..c1616db8e9 100644
--- a/src/cmd/compile/internal/typecheck/mapfile_mmap.go
+++ b/src/cmd/compile/internal/base/mapfile_mmap.go
@@ -5,7 +5,7 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
// +build darwin dragonfly freebsd linux netbsd openbsd
-package typecheck
+package base
import (
"os"
@@ -19,7 +19,7 @@ import (
// mapFile returns length bytes from the file starting at the
// specified offset as a string.
-func mapFile(f *os.File, offset, length int64) (string, error) {
+func MapFile(f *os.File, offset, length int64) (string, error) {
// POSIX mmap: "The implementation may require that off is a
// multiple of the page size."
x := offset & int64(os.Getpagesize()-1)
diff --git a/src/cmd/compile/internal/typecheck/mapfile_read.go b/src/cmd/compile/internal/base/mapfile_read.go
index 9637ab97ab..01796a9bab 100644
--- a/src/cmd/compile/internal/typecheck/mapfile_read.go
+++ b/src/cmd/compile/internal/base/mapfile_read.go
@@ -5,14 +5,14 @@
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
-package typecheck
+package base
import (
"io"
"os"
)
-func mapFile(f *os.File, offset, length int64) (string, error) {
+func MapFile(f *os.File, offset, length int64) (string, error) {
buf := make([]byte, length)
_, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
if err != nil {
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index b095fd704d..4afe2eb9ee 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -233,6 +233,27 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) {
ErrorExit()
}
+// Assert reports "assertion failed" with Fatalf, unless b is true.
+func Assert(b bool) {
+ if !b {
+ Fatalf("assertion failed")
+ }
+}
+
+// Assertf reports a fatal error with Fatalf, unless b is true.
+func Assertf(b bool, format string, args ...interface{}) {
+ if !b {
+ Fatalf(format, args...)
+ }
+}
+
+// AssertfAt reports a fatal error with FatalfAt, unless b is true.
+func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) {
+ if !b {
+ FatalfAt(pos, format, args...)
+ }
+}
+
// hcrash crashes the compiler when -h is set, to find out where a message is generated.
func hcrash() {
if Flag.LowerH != 0 {
diff --git a/src/cmd/compile/internal/escape/assign.go b/src/cmd/compile/internal/escape/assign.go
new file mode 100644
index 0000000000..80697bf37b
--- /dev/null
+++ b/src/cmd/compile/internal/escape/assign.go
@@ -0,0 +1,120 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+// addr evaluates an addressable expression n and returns a hole
+// that represents storing into the represented location.
+func (e *escape) addr(n ir.Node) hole {
+ if n == nil || ir.IsBlank(n) {
+ // Can happen in select case, range, maybe others.
+ return e.discardHole()
+ }
+
+ k := e.heapHole()
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected addr: %v", n)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PEXTERN {
+ break
+ }
+ k = e.oldLoc(n).asHole()
+ case ir.OLINKSYMOFFSET:
+ break
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ k = e.addr(n.X)
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.Index)
+ if n.X.Type().IsArray() {
+ k = e.addr(n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.ODEREF, ir.ODOTPTR:
+ e.discard(n)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.assignHeap(n.Index, "key of map put", n)
+ }
+
+ return k
+}
+
+func (e *escape) addrs(l ir.Nodes) []hole {
+ var ks []hole
+ for _, n := range l {
+ ks = append(ks, e.addr(n))
+ }
+ return ks
+}
+
+func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
+ e.expr(e.heapHole().note(where, why), src)
+}
+
+// assignList evaluates the assignment dsts... = srcs....
+func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
+ ks := e.addrs(dsts)
+ for i, k := range ks {
+ var src ir.Node
+ if i < len(srcs) {
+ src = srcs[i]
+ }
+
+ if dst := dsts[i]; dst != nil {
+ // Detect implicit conversion of uintptr to unsafe.Pointer when
+ // storing into reflect.{Slice,String}Header.
+ if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
+ e.unsafeValue(e.heapHole().note(where, why), src)
+ continue
+ }
+
+ // Filter out some no-op assignments for escape analysis.
+ if src != nil && isSelfAssign(dst, src) {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
+ }
+ k = e.discardHole()
+ }
+ }
+
+ e.expr(k.note(where, why), src)
+ }
+
+ e.reassigned(ks, where)
+}
+
+// reassigned marks the locations associated with the given holes as
+// reassigned, unless the location represents a variable declared and
+// assigned exactly once by where.
+func (e *escape) reassigned(ks []hole, where ir.Node) {
+ if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
+ if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
+ // Zero-value assignment for variable declared without an
+ // explicit initial value. Assume this is its initialization
+ // statement.
+ return
+ }
+ }
+
+ for _, k := range ks {
+ loc := k.dst
+ // Variables declared by range statements are assigned on every iteration.
+ if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
+ continue
+ }
+ loc.reassigned = true
+ }
+}
diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go
new file mode 100644
index 0000000000..9e5abed591
--- /dev/null
+++ b/src/cmd/compile/internal/escape/call.go
@@ -0,0 +1,428 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// call evaluates a call expressions, including builtin calls. ks
+// should contain the holes representing where the function callee's
+// results flows.
+func (e *escape) call(ks []hole, call ir.Node) {
+ var init ir.Nodes
+ e.callCommon(ks, call, &init, nil)
+ if len(init) != 0 {
+ call.(*ir.CallExpr).PtrInit().Append(init...)
+ }
+}
+
+func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
+
+ // argumentPragma handles escape analysis of argument *argp to the
+ // given hole. If the function callee is known, pragma is the
+ // function's pragma flags; otherwise 0.
+ argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
+ e.rewriteArgument(argp, init, call, fn, wrapper)
+
+ e.expr(k.note(call, "call parameter"), *argp)
+ }
+
+ argument := func(k hole, argp *ir.Node) {
+ argumentFunc(nil, k, argp)
+ }
+
+ switch call.Op() {
+ default:
+ ir.Dump("esc", call)
+ base.Fatalf("unexpected call op: %v", call.Op())
+
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ typecheck.FixVariadicCall(call)
+ typecheck.FixMethodCall(call)
+
+ // Pick out the function callee, if statically known.
+ //
+ // TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
+ // functions (e.g., runtime builtins, method wrappers, generated
+ // eq/hash functions) don't have it set. Investigate whether
+ // that's a concern.
+ var fn *ir.Name
+ switch call.Op() {
+ case ir.OCALLFUNC:
+ // If we have a direct call to a closure (not just one we were
+ // able to statically resolve with ir.StaticValue), mark it as
+ // such so batch.outlives can optimize the flow results.
+ if call.X.Op() == ir.OCLOSURE {
+ call.X.(*ir.ClosureExpr).Func.SetClosureCalled(true)
+ }
+
+ switch v := ir.StaticValue(call.X); v.Op() {
+ case ir.ONAME:
+ if v := v.(*ir.Name); v.Class == ir.PFUNC {
+ fn = v
+ }
+ case ir.OCLOSURE:
+ fn = v.(*ir.ClosureExpr).Func.Nname
+ case ir.OMETHEXPR:
+ fn = ir.MethodExprName(v)
+ }
+ case ir.OCALLMETH:
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ fntype := call.X.Type()
+ if fn != nil {
+ fntype = fn.Type()
+ }
+
+ if ks != nil && fn != nil && e.inMutualBatch(fn) {
+ for i, result := range fn.Type().Results().FieldSlice() {
+ e.expr(ks[i], ir.AsNode(result.Nname))
+ }
+ }
+
+ var recvp *ir.Node
+ if call.Op() == ir.OCALLFUNC {
+ // Evaluate callee function expression.
+ //
+ // Note: We use argument and not argumentFunc, because while
+ // call.X here may be an argument to runtime.{new,defer}proc,
+ // it's not an argument to fn itself.
+ argument(e.discardHole(), &call.X)
+ } else {
+ recvp = &call.X.(*ir.SelectorExpr).X
+ }
+
+ args := call.Args
+ if recv := fntype.Recv(); recv != nil {
+ if recvp == nil {
+ // Function call using method expression. Recevier argument is
+ // at the front of the regular arguments list.
+ recvp = &args[0]
+ args = args[1:]
+ }
+
+ argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
+ }
+
+ for i, param := range fntype.Params().FieldSlice() {
+ argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
+ }
+
+ case ir.OINLCALL:
+ call := call.(*ir.InlinedCallExpr)
+ e.stmts(call.Body)
+ for i, result := range call.ReturnVars {
+ k := e.discardHole()
+ if ks != nil {
+ k = ks[i]
+ }
+ e.expr(k, result)
+ }
+
+ case ir.OAPPEND:
+ call := call.(*ir.CallExpr)
+ args := call.Args
+
+ // Appendee slice may flow directly to the result, if
+ // it has enough capacity. Alternatively, a new heap
+ // slice might be allocated, and all slice elements
+ // might flow to heap.
+ appendeeK := ks[0]
+ if args[0].Type().Elem().HasPointers() {
+ appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
+ }
+ argument(appendeeK, &args[0])
+
+ if call.IsDDD {
+ appendedK := e.discardHole()
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
+ appendedK = e.heapHole().deref(call, "appended slice...")
+ }
+ argument(appendedK, &args[1])
+ } else {
+ for i := 1; i < len(args); i++ {
+ argument(e.heapHole(), &args[i])
+ }
+ }
+
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), &call.X)
+
+ copiedK := e.discardHole()
+ if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
+ copiedK = e.heapHole().deref(call, "copied slice")
+ }
+ argument(copiedK, &call.Y)
+
+ case ir.OPANIC:
+ call := call.(*ir.UnaryExpr)
+ argument(e.heapHole(), &call.X)
+
+ case ir.OCOMPLEX:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), &call.X)
+ argument(e.discardHole(), &call.Y)
+
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ call := call.(*ir.CallExpr)
+ fixRecoverCall(call)
+ for i := range call.Args {
+ argument(e.discardHole(), &call.Args[i])
+ }
+
+ case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+ call := call.(*ir.UnaryExpr)
+ argument(e.discardHole(), &call.X)
+
+ case ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ call := call.(*ir.BinaryExpr)
+ argument(ks[0], &call.X)
+ argument(e.discardHole(), &call.Y)
+ }
+}
+
+// goDeferStmt analyzes a "go" or "defer" statement.
+//
+// In the process, it also normalizes the statement to always use a
+// simple function call with no arguments and no results. For example,
+// it rewrites:
+//
+// defer f(x, y)
+//
+// into:
+//
+// x1, y1 := x, y
+// defer func() { f(x1, y1) }()
+func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
+ k := e.heapHole()
+ if n.Op() == ir.ODEFER && e.loopDepth == 1 {
+ // Top-level defer arguments don't escape to the heap,
+ // but they do need to last until they're invoked.
+ k = e.later(e.discardHole())
+
+ // force stack allocation of defer record, unless
+ // open-coded defers are used (see ssa.go)
+ n.SetEsc(ir.EscNever)
+ }
+
+ call := n.Call
+
+ init := n.PtrInit()
+ init.Append(ir.TakeInit(call)...)
+ e.stmts(*init)
+
+ // If the function is already a zero argument/result function call,
+ // just escape analyze it normally.
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+ if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
+ if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
+ clo.IsGoWrap = true
+ }
+ e.expr(k, call.X)
+ return
+ }
+ }
+
+ // Create a new no-argument function that we'll hand off to defer.
+ fn := ir.NewClosureFunc(n.Pos(), true)
+ fn.SetWrapper(true)
+ fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
+ fn.Body = []ir.Node{call}
+
+ clo := fn.OClosure
+ if n.Op() == ir.OGO {
+ clo.IsGoWrap = true
+ }
+
+ e.callCommon(nil, call, init, fn)
+ e.closures = append(e.closures, closure{e.spill(k, clo), clo})
+
+ // Create new top level call to closure.
+ n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
+ ir.WithFunc(e.curfn, func() {
+ typecheck.Stmt(n.Call)
+ })
+}
+
+// rewriteArgument rewrites the argument *argp of the given call expression.
+// fn is the static callee function, if known.
+// wrapper is the go/defer wrapper function for call, if any.
+func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
+ var pragma ir.PragmaFlag
+ if fn != nil && fn.Func != nil {
+ pragma = fn.Func.Pragma
+ }
+
+ // unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
+ // functions, so that ptr is kept alive and/or escaped as
+ // appropriate. unsafeUintptr also reports whether it modified arg0.
+ unsafeUintptr := func(arg0 ir.Node) bool {
+ if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
+ return false
+ }
+
+ // If the argument is really a pointer being converted to uintptr,
+ // arrange for the pointer to be kept alive until the call returns,
+ // by copying it into a temp and marking that temp
+ // still alive when we pop the temp stack.
+ if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
+ return false
+ }
+ arg := arg0.(*ir.ConvExpr)
+
+ if !arg.X.Type().IsUnsafePtr() {
+ return false
+ }
+
+ // Create and declare a new pointer-typed temp variable.
+ tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
+
+ if pragma&ir.UintptrEscapes != 0 {
+ e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
+ }
+
+ if pragma&ir.UintptrKeepAlive != 0 {
+ call := call.(*ir.CallExpr)
+
+ // SSA implements CallExpr.KeepAlive using OpVarLive, which
+ // doesn't support PAUTOHEAP variables. I tried changing it to
+ // use OpKeepAlive, but that ran into issues of its own.
+ // For now, the easy solution is to explicitly copy to (yet
+ // another) new temporary variable.
+ keep := tmp
+ if keep.Class == ir.PAUTOHEAP {
+ keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
+ }
+
+ keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
+ call.KeepAlive = append(call.KeepAlive, keep)
+ }
+
+ return true
+ }
+
+ visit := func(pos src.XPos, argp *ir.Node) {
+ // Optimize a few common constant expressions. By leaving these
+ // untouched in the call expression, we let the wrapper handle
+ // evaluating them, rather than taking up closure context space.
+ switch arg := *argp; arg.Op() {
+ case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
+ return
+ case ir.ONAME:
+ if arg.(*ir.Name).Class == ir.PFUNC {
+ return
+ }
+ }
+
+ if unsafeUintptr(*argp) {
+ return
+ }
+
+ if wrapper != nil {
+ e.wrapExpr(pos, argp, init, call, wrapper)
+ }
+ }
+
+ // Peel away any slice lits.
+ if arg := *argp; arg.Op() == ir.OSLICELIT {
+ list := arg.(*ir.CompLitExpr).List
+ for i := range list {
+ visit(arg.Pos(), &list[i])
+ }
+ } else {
+ visit(call.Pos(), argp)
+ }
+}
+
+// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
+// is non-nil, the variable will be captured for use within that
+// function.
+func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
+ tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
+
+ if wrapper != nil {
+ // Currently for "defer i.M()" if i is nil it panics at the point
+ // of defer statement, not when deferred function is called. We
+ // need to do the nil check outside of the wrapper.
+ if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
+ check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
+ init.Append(typecheck.Stmt(check))
+ }
+
+ e.oldLoc(tmp).captured = true
+
+ tmp = ir.NewClosureVar(pos, wrapper, tmp)
+ }
+
+ *exprp = tmp
+ return tmp
+}
+
+// copyExpr creates and returns a new temporary variable within fn;
+// appends statements to init to declare and initialize it to expr;
+// and escape analyzes the data flow if analyze is true.
+func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
+ if ir.HasUniquePos(expr) {
+ pos = expr.Pos()
+ }
+
+ tmp := typecheck.TempAt(pos, fn, expr.Type())
+
+ stmts := []ir.Node{
+ ir.NewDecl(pos, ir.ODCL, tmp),
+ ir.NewAssignStmt(pos, tmp, expr),
+ }
+ typecheck.Stmts(stmts)
+ init.Append(stmts...)
+
+ if analyze {
+ e.newLoc(tmp, false)
+ e.stmts(stmts)
+ }
+
+ return tmp
+}
+
+// tagHole returns a hole for evaluating an argument passed to param.
+// ks should contain the holes representing where the function
+// callee's results flows. fn is the statically-known callee function,
+// if any.
+func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
+ // If this is a dynamic call, we can't rely on param.Note.
+ if fn == nil {
+ return e.heapHole()
+ }
+
+ if e.inMutualBatch(fn) {
+ return e.addr(ir.AsNode(param.Nname))
+ }
+
+ // Call to previously tagged function.
+
+ var tagKs []hole
+
+ esc := parseLeaks(param.Note)
+ if x := esc.Heap(); x >= 0 {
+ tagKs = append(tagKs, e.heapHole().shift(x))
+ }
+
+ if ks != nil {
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ tagKs = append(tagKs, ks[i].shift(x))
+ }
+ }
+ }
+
+ return e.teeHole(tagKs...)
+}
diff --git a/src/cmd/compile/internal/escape/desugar.go b/src/cmd/compile/internal/escape/desugar.go
new file mode 100644
index 0000000000..8b3cc25cf9
--- /dev/null
+++ b/src/cmd/compile/internal/escape/desugar.go
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
+// but for now it's the most convenient place for some rewrites.
+
+// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
+// adding an explicit frame pointer argument.
+// If call is not an ORECOVER call, it's left unmodified.
+func fixRecoverCall(call *ir.CallExpr) {
+ if call.Op() != ir.ORECOVER {
+ return
+ }
+
+ pos := call.Pos()
+
+ // FP is equal to caller's SP plus FixedFrameSize().
+ var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
+ if off := base.Ctxt.FixedFrameSize(); off != 0 {
+ fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
+ }
+ // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
+ fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
+
+ call.SetOp(ir.ORECOVERFP)
+ call.Args = []ir.Node{typecheck.Expr(fp)}
+}
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index cd56f07b61..61e0121a40 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -6,15 +6,11 @@ package escape
import (
"fmt"
- "math"
- "strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
- "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
- "cmd/internal/src"
)
// Escape analysis.
@@ -118,90 +114,8 @@ type escape struct {
loopDepth int
}
-// An location represents an abstract location that stores a Go
-// variable.
-type location struct {
- n ir.Node // represented variable or expression, if any
- curfn *ir.Func // enclosing function
- edges []edge // incoming edges
- loopDepth int // loopDepth at declaration
-
- // resultIndex records the tuple index (starting at 1) for
- // PPARAMOUT variables within their function's result type.
- // For non-PPARAMOUT variables it's 0.
- resultIndex int
-
- // derefs and walkgen are used during walkOne to track the
- // minimal dereferences from the walk root.
- derefs int // >= -1
- walkgen uint32
-
- // dst and dstEdgeindex track the next immediate assignment
- // destination location during walkone, along with the index
- // of the edge pointing back to this location.
- dst *location
- dstEdgeIdx int
-
- // queued is used by walkAll to track whether this location is
- // in the walk queue.
- queued bool
-
- // escapes reports whether the represented variable's address
- // escapes; that is, whether the variable must be heap
- // allocated.
- escapes bool
-
- // transient reports whether the represented expression's
- // address does not outlive the statement; that is, whether
- // its storage can be immediately reused.
- transient bool
-
- // paramEsc records the represented parameter's leak set.
- paramEsc leaks
-
- captured bool // has a closure captured this variable?
- reassigned bool // has this variable been reassigned?
- addrtaken bool // has this variable's address been taken?
-}
-
-// An edge represents an assignment edge between two Go variables.
-type edge struct {
- src *location
- derefs int // >= -1
- notes *note
-}
-
-// Fmt is called from node printing to print information about escape analysis results.
-func Fmt(n ir.Node) string {
- text := ""
- switch n.Esc() {
- case ir.EscUnknown:
- break
-
- case ir.EscHeap:
- text = "esc(h)"
-
- case ir.EscNone:
- text = "esc(no)"
-
- case ir.EscNever:
- text = "esc(N)"
-
- default:
- text = fmt.Sprintf("esc(%d)", n.Esc())
- }
-
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
- if text != "" {
- text += " "
- }
- text += fmt.Sprintf("ld(%d)", loc.loopDepth)
- }
- }
-
- return text
+func Funcs(all []ir.Node) {
+ ir.VisitFuncsBottomUp(all, Batch)
}
// Batch performs escape analysis on a minimal batch of
@@ -269,8 +183,14 @@ func (b *batch) initFunc(fn *ir.Func) {
// Allocate locations for local variables.
for _, n := range fn.Dcl {
- if n.Op() == ir.ONAME {
- e.newLoc(n, false)
+ e.newLoc(n, false)
+ }
+
+ // Also for hidden parameters (e.g., the ".this" parameter to a
+ // method value wrapper).
+ if fn.OClosure == nil {
+ for _, n := range fn.ClosureVars {
+ e.newLoc(n.Canonical(), false)
}
}
@@ -342,1316 +262,6 @@ func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
}
}
-// Below we implement the methods for walking the AST and recording
-// data flow edges. Note that because a sub-expression might have
-// side-effects, it's important to always visit the entire AST.
-//
-// For example, write either:
-//
-// if x {
-// e.discard(n.Left)
-// } else {
-// e.value(k, n.Left)
-// }
-//
-// or
-//
-// if x {
-// k = e.discardHole()
-// }
-// e.value(k, n.Left)
-//
-// Do NOT write:
-//
-// // BAD: possibly loses side-effects within n.Left
-// if !x {
-// e.value(k, n.Left)
-// }
-
-// stmt evaluates a single Go statement.
-func (e *escape) stmt(n ir.Node) {
- if n == nil {
- return
- }
-
- lno := ir.SetPos(n)
- defer func() {
- base.Pos = lno
- }()
-
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
- }
-
- e.stmts(n.Init())
-
- switch n.Op() {
- default:
- base.Fatalf("unexpected stmt: %v", n)
-
- case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
- // nop
-
- case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
- // TODO(mdempsky): Handle dead code?
-
- case ir.OBLOCK:
- n := n.(*ir.BlockStmt)
- e.stmts(n.List)
-
- case ir.ODCL:
- // Record loop depth at declaration.
- n := n.(*ir.Decl)
- if !ir.IsBlank(n.X) {
- e.dcl(n.X)
- }
-
- case ir.OLABEL:
- n := n.(*ir.LabelStmt)
- switch e.labels[n.Label] {
- case nonlooping:
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
- }
- case looping:
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
- }
- e.loopDepth++
- default:
- base.Fatalf("label missing tag")
- }
- delete(e.labels, n.Label)
-
- case ir.OIF:
- n := n.(*ir.IfStmt)
- e.discard(n.Cond)
- e.block(n.Body)
- e.block(n.Else)
-
- case ir.OFOR, ir.OFORUNTIL:
- n := n.(*ir.ForStmt)
- e.loopDepth++
- e.discard(n.Cond)
- e.stmt(n.Post)
- e.block(n.Body)
- e.loopDepth--
-
- case ir.ORANGE:
- // for Key, Value = range X { Body }
- n := n.(*ir.RangeStmt)
-
- // X is evaluated outside the loop.
- tmp := e.newLoc(nil, false)
- e.expr(tmp.asHole(), n.X)
-
- e.loopDepth++
- ks := e.addrs([]ir.Node{n.Key, n.Value})
- if n.X.Type().IsArray() {
- e.flow(ks[1].note(n, "range"), tmp)
- } else {
- e.flow(ks[1].deref(n, "range-deref"), tmp)
- }
- e.reassigned(ks, n)
-
- e.block(n.Body)
- e.loopDepth--
-
- case ir.OSWITCH:
- n := n.(*ir.SwitchStmt)
-
- if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
- var ks []hole
- if guard.Tag != nil {
- for _, cas := range n.Cases {
- cv := cas.Var
- k := e.dcl(cv) // type switch variables have no ODCL.
- if cv.Type().HasPointers() {
- ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
- }
- }
- }
- e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
- } else {
- e.discard(n.Tag)
- }
-
- for _, cas := range n.Cases {
- e.discards(cas.List)
- e.block(cas.Body)
- }
-
- case ir.OSELECT:
- n := n.(*ir.SelectStmt)
- for _, cas := range n.Cases {
- e.stmt(cas.Comm)
- e.block(cas.Body)
- }
- case ir.ORECV:
- // TODO(mdempsky): Consider e.discard(n.Left).
- n := n.(*ir.UnaryExpr)
- e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
- case ir.OSEND:
- n := n.(*ir.SendStmt)
- e.discard(n.Chan)
- e.assignHeap(n.Value, "send", n)
-
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
- case ir.OASOP:
- n := n.(*ir.AssignOpStmt)
- // TODO(mdempsky): Worry about OLSH/ORSH?
- e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
- case ir.OAS2:
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
-
- case ir.OAS2DOTTYPE: // v, ok = x.(type)
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
- case ir.OAS2MAPR: // v, ok = m[k]
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
- case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
-
- case ir.OAS2FUNC:
- n := n.(*ir.AssignListStmt)
- e.stmts(n.Rhs[0].Init())
- ks := e.addrs(n.Lhs)
- e.call(ks, n.Rhs[0], nil)
- e.reassigned(ks, n)
- case ir.ORETURN:
- n := n.(*ir.ReturnStmt)
- results := e.curfn.Type().Results().FieldSlice()
- dsts := make([]ir.Node, len(results))
- for i, res := range results {
- dsts[i] = res.Nname.(*ir.Name)
- }
- e.assignList(dsts, n.Results, "return", n)
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- e.call(nil, n, nil)
- case ir.OGO, ir.ODEFER:
- n := n.(*ir.GoDeferStmt)
- e.stmts(n.Call.Init())
- e.call(nil, n.Call, n)
-
- case ir.OTAILCALL:
- // TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
- }
-}
-
-func (e *escape) stmts(l ir.Nodes) {
- for _, n := range l {
- e.stmt(n)
- }
-}
-
-// block is like stmts, but preserves loopDepth.
-func (e *escape) block(l ir.Nodes) {
- old := e.loopDepth
- e.stmts(l)
- e.loopDepth = old
-}
-
-// expr models evaluating an expression n and flowing the result into
-// hole k.
-func (e *escape) expr(k hole, n ir.Node) {
- if n == nil {
- return
- }
- e.stmts(n.Init())
- e.exprSkipInit(k, n)
-}
-
-func (e *escape) exprSkipInit(k hole, n ir.Node) {
- if n == nil {
- return
- }
-
- lno := ir.SetPos(n)
- defer func() {
- base.Pos = lno
- }()
-
- uintptrEscapesHack := k.uintptrEscapesHack
- k.uintptrEscapesHack = false
-
- if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
- // nop
- } else if k.derefs >= 0 && !n.Type().HasPointers() {
- k.dst = &e.blankLoc
- }
-
- switch n.Op() {
- default:
- base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
-
- case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
- // nop
-
- case ir.ONAME:
- n := n.(*ir.Name)
- if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
- return
- }
- if n.IsClosureVar() && n.Defn == nil {
- return // ".this" from method value wrapper
- }
- e.flow(k, e.oldLoc(n))
-
- case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
- n := n.(*ir.UnaryExpr)
- e.discard(n.X)
- case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- n := n.(*ir.BinaryExpr)
- e.discard(n.X)
- e.discard(n.Y)
- case ir.OANDAND, ir.OOROR:
- n := n.(*ir.LogicalExpr)
- e.discard(n.X)
- e.discard(n.Y)
- case ir.OADDR:
- n := n.(*ir.AddrExpr)
- e.expr(k.addr(n, "address-of"), n.X) // "address-of"
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- e.expr(k.deref(n, "indirection"), n.X) // "indirection"
- case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
- n := n.(*ir.SelectorExpr)
- e.expr(k.note(n, "dot"), n.X)
- case ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
- case ir.ODOTTYPE, ir.ODOTTYPE2:
- n := n.(*ir.TypeAssertExpr)
- e.expr(k.dotType(n.Type(), n, "dot"), n.X)
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- if n.X.Type().IsArray() {
- e.expr(k.note(n, "fixed-array-index-of"), n.X)
- } else {
- // TODO(mdempsky): Fix why reason text.
- e.expr(k.deref(n, "dot of pointer"), n.X)
- }
- e.discard(n.Index)
- case ir.OINDEXMAP:
- n := n.(*ir.IndexExpr)
- e.discard(n.X)
- e.discard(n.Index)
- case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
- n := n.(*ir.SliceExpr)
- e.expr(k.note(n, "slice"), n.X)
- e.discard(n.Low)
- e.discard(n.High)
- e.discard(n.Max)
-
- case ir.OCONV, ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
- // When -d=checkptr=2 is enabled, treat
- // conversions to unsafe.Pointer as an
- // escaping operation. This allows better
- // runtime instrumentation, since we can more
- // easily detect object boundaries on the heap
- // than the stack.
- e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
- } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
- e.unsafeValue(k, n.X)
- } else {
- e.expr(k, n.X)
- }
- case ir.OCONVIFACE:
- n := n.(*ir.ConvExpr)
- if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
- k = e.spill(k, n)
- }
- e.expr(k.note(n, "interface-converted"), n.X)
- case ir.OSLICE2ARRPTR:
- // the slice pointer flows directly to the result
- n := n.(*ir.ConvExpr)
- e.expr(k, n.X)
- case ir.ORECV:
- n := n.(*ir.UnaryExpr)
- e.discard(n.X)
-
- case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE:
- e.call([]hole{k}, n, nil)
-
- case ir.ONEW:
- n := n.(*ir.UnaryExpr)
- e.spill(k, n)
-
- case ir.OMAKESLICE:
- n := n.(*ir.MakeExpr)
- e.spill(k, n)
- e.discard(n.Len)
- e.discard(n.Cap)
- case ir.OMAKECHAN:
- n := n.(*ir.MakeExpr)
- e.discard(n.Len)
- case ir.OMAKEMAP:
- n := n.(*ir.MakeExpr)
- e.spill(k, n)
- e.discard(n.Len)
-
- case ir.ORECOVER:
- // nop
-
- case ir.OCALLPART:
- // Flow the receiver argument to both the closure and
- // to the receiver parameter.
-
- n := n.(*ir.SelectorExpr)
- closureK := e.spill(k, n)
-
- m := n.Selection
-
- // We don't know how the method value will be called
- // later, so conservatively assume the result
- // parameters all flow to the heap.
- //
- // TODO(mdempsky): Change ks into a callback, so that
- // we don't have to create this slice?
- var ks []hole
- for i := m.Type.NumResults(); i > 0; i-- {
- ks = append(ks, e.heapHole())
- }
- name, _ := m.Nname.(*ir.Name)
- paramK := e.tagHole(ks, name, m.Type.Recv())
-
- e.expr(e.teeHole(paramK, closureK), n.X)
-
- case ir.OPTRLIT:
- n := n.(*ir.AddrExpr)
- e.expr(e.spill(k, n), n.X)
-
- case ir.OARRAYLIT:
- n := n.(*ir.CompLitExpr)
- for _, elt := range n.List {
- if elt.Op() == ir.OKEY {
- elt = elt.(*ir.KeyExpr).Value
- }
- e.expr(k.note(n, "array literal element"), elt)
- }
-
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- k = e.spill(k, n)
- k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
-
- for _, elt := range n.List {
- if elt.Op() == ir.OKEY {
- elt = elt.(*ir.KeyExpr).Value
- }
- e.expr(k.note(n, "slice-literal-element"), elt)
- }
-
- case ir.OSTRUCTLIT:
- n := n.(*ir.CompLitExpr)
- for _, elt := range n.List {
- e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
- }
-
- case ir.OMAPLIT:
- n := n.(*ir.CompLitExpr)
- e.spill(k, n)
-
- // Map keys and values are always stored in the heap.
- for _, elt := range n.List {
- elt := elt.(*ir.KeyExpr)
- e.assignHeap(elt.Key, "map literal key", n)
- e.assignHeap(elt.Value, "map literal value", n)
- }
-
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- k = e.spill(k, n)
- e.closures = append(e.closures, closure{k, n})
-
- if fn := n.Func; fn.IsHiddenClosure() {
- for _, cv := range fn.ClosureVars {
- if loc := e.oldLoc(cv); !loc.captured {
- loc.captured = true
-
- // Ignore reassignments to the variable in straightline code
- // preceding the first capture by a closure.
- if loc.loopDepth == e.loopDepth {
- loc.reassigned = false
- }
- }
- }
-
- for _, n := range fn.Dcl {
- // Add locations for local variables of the
- // closure, if needed, in case we're not including
- // the closure func in the batch for escape
- // analysis (happens for escape analysis called
- // from reflectdata.methodWrapper)
- if n.Op() == ir.ONAME && n.Opt == nil {
- e.with(fn).newLoc(n, false)
- }
- }
- e.walkFunc(fn)
- }
-
- case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
- n := n.(*ir.ConvExpr)
- e.spill(k, n)
- e.discard(n.X)
-
- case ir.OADDSTR:
- n := n.(*ir.AddStringExpr)
- e.spill(k, n)
-
- // Arguments of OADDSTR never escape;
- // runtime.concatstrings makes sure of that.
- e.discards(n.List)
- }
-}
-
-// unsafeValue evaluates a uintptr-typed arithmetic expression looking
-// for conversions from an unsafe.Pointer.
-func (e *escape) unsafeValue(k hole, n ir.Node) {
- if n.Type().Kind() != types.TUINTPTR {
- base.Fatalf("unexpected type %v for %v", n.Type(), n)
- }
- if k.addrtaken {
- base.Fatalf("unexpected addrtaken")
- }
-
- e.stmts(n.Init())
-
- switch n.Op() {
- case ir.OCONV, ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- if n.X.Type().IsUnsafePtr() {
- e.expr(k, n.X)
- } else {
- e.discard(n.X)
- }
- case ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- if ir.IsReflectHeaderDataField(n) {
- e.expr(k.deref(n, "reflect.Header.Data"), n.X)
- } else {
- e.discard(n.X)
- }
- case ir.OPLUS, ir.ONEG, ir.OBITNOT:
- n := n.(*ir.UnaryExpr)
- e.unsafeValue(k, n.X)
- case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
- n := n.(*ir.BinaryExpr)
- e.unsafeValue(k, n.X)
- e.unsafeValue(k, n.Y)
- case ir.OLSH, ir.ORSH:
- n := n.(*ir.BinaryExpr)
- e.unsafeValue(k, n.X)
- // RHS need not be uintptr-typed (#32959) and can't meaningfully
- // flow pointers anyway.
- e.discard(n.Y)
- default:
- e.exprSkipInit(e.discardHole(), n)
- }
-}
-
-// discard evaluates an expression n for side-effects, but discards
-// its value.
-func (e *escape) discard(n ir.Node) {
- e.expr(e.discardHole(), n)
-}
-
-func (e *escape) discards(l ir.Nodes) {
- for _, n := range l {
- e.discard(n)
- }
-}
-
-// addr evaluates an addressable expression n and returns a hole
-// that represents storing into the represented location.
-func (e *escape) addr(n ir.Node) hole {
- if n == nil || ir.IsBlank(n) {
- // Can happen in select case, range, maybe others.
- return e.discardHole()
- }
-
- k := e.heapHole()
-
- switch n.Op() {
- default:
- base.Fatalf("unexpected addr: %v", n)
- case ir.ONAME:
- n := n.(*ir.Name)
- if n.Class == ir.PEXTERN {
- break
- }
- k = e.oldLoc(n).asHole()
- case ir.OLINKSYMOFFSET:
- break
- case ir.ODOT:
- n := n.(*ir.SelectorExpr)
- k = e.addr(n.X)
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- e.discard(n.Index)
- if n.X.Type().IsArray() {
- k = e.addr(n.X)
- } else {
- e.discard(n.X)
- }
- case ir.ODEREF, ir.ODOTPTR:
- e.discard(n)
- case ir.OINDEXMAP:
- n := n.(*ir.IndexExpr)
- e.discard(n.X)
- e.assignHeap(n.Index, "key of map put", n)
- }
-
- return k
-}
-
-func (e *escape) addrs(l ir.Nodes) []hole {
- var ks []hole
- for _, n := range l {
- ks = append(ks, e.addr(n))
- }
- return ks
-}
-
-// reassigned marks the locations associated with the given holes as
-// reassigned, unless the location represents a variable declared and
-// assigned exactly once by where.
-func (e *escape) reassigned(ks []hole, where ir.Node) {
- if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
- if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
- // Zero-value assignment for variable declared without an
- // explicit initial value. Assume this is its initialization
- // statement.
- return
- }
- }
-
- for _, k := range ks {
- loc := k.dst
- // Variables declared by range statements are assigned on every iteration.
- if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
- continue
- }
- loc.reassigned = true
- }
-}
-
-// assignList evaluates the assignment dsts... = srcs....
-func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
- ks := e.addrs(dsts)
- for i, k := range ks {
- var src ir.Node
- if i < len(srcs) {
- src = srcs[i]
- }
-
- if dst := dsts[i]; dst != nil {
- // Detect implicit conversion of uintptr to unsafe.Pointer when
- // storing into reflect.{Slice,String}Header.
- if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
- e.unsafeValue(e.heapHole().note(where, why), src)
- continue
- }
-
- // Filter out some no-op assignments for escape analysis.
- if src != nil && isSelfAssign(dst, src) {
- if base.Flag.LowerM != 0 {
- base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
- }
- k = e.discardHole()
- }
- }
-
- e.expr(k.note(where, why), src)
- }
-
- e.reassigned(ks, where)
-}
-
-func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
- e.expr(e.heapHole().note(where, why), src)
-}
-
-// call evaluates a call expressions, including builtin calls. ks
-// should contain the holes representing where the function callee's
-// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *escape) call(ks []hole, call, where ir.Node) {
- topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
- if topLevelDefer {
- // force stack allocation of defer record, unless
- // open-coded defers are used (see ssa.go)
- where.SetEsc(ir.EscNever)
- }
-
- argument := func(k hole, arg ir.Node) {
- if topLevelDefer {
- // Top level defers arguments don't escape to
- // heap, but they do need to last until end of
- // function.
- k = e.later(k)
- } else if where != nil {
- k = e.heapHole()
- }
-
- e.expr(k.note(call, "call parameter"), arg)
- }
-
- switch call.Op() {
- default:
- ir.Dump("esc", call)
- base.Fatalf("unexpected call op: %v", call.Op())
-
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- call := call.(*ir.CallExpr)
- typecheck.FixVariadicCall(call)
-
- // Pick out the function callee, if statically known.
- var fn *ir.Name
- switch call.Op() {
- case ir.OCALLFUNC:
- switch v := ir.StaticValue(call.X); {
- case v.Op() == ir.ONAME && v.(*ir.Name).Class == ir.PFUNC:
- fn = v.(*ir.Name)
- case v.Op() == ir.OCLOSURE:
- fn = v.(*ir.ClosureExpr).Func.Nname
- }
- case ir.OCALLMETH:
- fn = ir.MethodExprName(call.X)
- }
-
- fntype := call.X.Type()
- if fn != nil {
- fntype = fn.Type()
- }
-
- if ks != nil && fn != nil && e.inMutualBatch(fn) {
- for i, result := range fn.Type().Results().FieldSlice() {
- e.expr(ks[i], ir.AsNode(result.Nname))
- }
- }
-
- if r := fntype.Recv(); r != nil {
- argument(e.tagHole(ks, fn, r), call.X.(*ir.SelectorExpr).X)
- } else {
- // Evaluate callee function expression.
- argument(e.discardHole(), call.X)
- }
-
- args := call.Args
- for i, param := range fntype.Params().FieldSlice() {
- argument(e.tagHole(ks, fn, param), args[i])
- }
-
- case ir.OAPPEND:
- call := call.(*ir.CallExpr)
- args := call.Args
-
- // Appendee slice may flow directly to the result, if
- // it has enough capacity. Alternatively, a new heap
- // slice might be allocated, and all slice elements
- // might flow to heap.
- appendeeK := ks[0]
- if args[0].Type().Elem().HasPointers() {
- appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
- }
- argument(appendeeK, args[0])
-
- if call.IsDDD {
- appendedK := e.discardHole()
- if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
- appendedK = e.heapHole().deref(call, "appended slice...")
- }
- argument(appendedK, args[1])
- } else {
- for _, arg := range args[1:] {
- argument(e.heapHole(), arg)
- }
- }
-
- case ir.OCOPY:
- call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), call.X)
-
- copiedK := e.discardHole()
- if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
- copiedK = e.heapHole().deref(call, "copied slice")
- }
- argument(copiedK, call.Y)
-
- case ir.OPANIC:
- call := call.(*ir.UnaryExpr)
- argument(e.heapHole(), call.X)
-
- case ir.OCOMPLEX:
- call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), call.X)
- argument(e.discardHole(), call.Y)
- case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- call := call.(*ir.CallExpr)
- for _, arg := range call.Args {
- argument(e.discardHole(), arg)
- }
- case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
- call := call.(*ir.UnaryExpr)
- argument(e.discardHole(), call.X)
-
- case ir.OUNSAFEADD, ir.OUNSAFESLICE:
- call := call.(*ir.BinaryExpr)
- argument(ks[0], call.X)
- argument(e.discardHole(), call.Y)
- }
-}
-
-// tagHole returns a hole for evaluating an argument passed to param.
-// ks should contain the holes representing where the function
-// callee's results flows. fn is the statically-known callee function,
-// if any.
-func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
- // If this is a dynamic call, we can't rely on param.Note.
- if fn == nil {
- return e.heapHole()
- }
-
- if e.inMutualBatch(fn) {
- return e.addr(ir.AsNode(param.Nname))
- }
-
- // Call to previously tagged function.
-
- if param.Note == UintptrEscapesNote {
- k := e.heapHole()
- k.uintptrEscapesHack = true
- return k
- }
-
- var tagKs []hole
-
- esc := parseLeaks(param.Note)
- if x := esc.Heap(); x >= 0 {
- tagKs = append(tagKs, e.heapHole().shift(x))
- }
-
- if ks != nil {
- for i := 0; i < numEscResults; i++ {
- if x := esc.Result(i); x >= 0 {
- tagKs = append(tagKs, ks[i].shift(x))
- }
- }
- }
-
- return e.teeHole(tagKs...)
-}
-
-// inMutualBatch reports whether function fn is in the batch of
-// mutually recursive functions being analyzed. When this is true,
-// fn has not yet been analyzed, so its parameters and results
-// should be incorporated directly into the flow graph instead of
-// relying on its escape analysis tagging.
-func (e *escape) inMutualBatch(fn *ir.Name) bool {
- if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
- if fn.Defn.Esc() == escFuncUnknown {
- base.Fatalf("graph inconsistency: %v", fn)
- }
- return true
- }
- return false
-}
-
-// An hole represents a context for evaluation a Go
-// expression. E.g., when evaluating p in "x = **p", we'd have a hole
-// with dst==x and derefs==2.
-type hole struct {
- dst *location
- derefs int // >= -1
- notes *note
-
- // addrtaken indicates whether this context is taking the address of
- // the expression, independent of whether the address will actually
- // be stored into a variable.
- addrtaken bool
-
- // uintptrEscapesHack indicates this context is evaluating an
- // argument for a //go:uintptrescapes function.
- uintptrEscapesHack bool
-}
-
-type note struct {
- next *note
- where ir.Node
- why string
-}
-
-func (k hole) note(where ir.Node, why string) hole {
- if where == nil || why == "" {
- base.Fatalf("note: missing where/why")
- }
- if base.Flag.LowerM >= 2 || logopt.Enabled() {
- k.notes = &note{
- next: k.notes,
- where: where,
- why: why,
- }
- }
- return k
-}
-
-func (k hole) shift(delta int) hole {
- k.derefs += delta
- if k.derefs < -1 {
- base.Fatalf("derefs underflow: %v", k.derefs)
- }
- k.addrtaken = delta < 0
- return k
-}
-
-func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
-func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
-
-func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
- if !t.IsInterface() && !types.IsDirectIface(t) {
- k = k.shift(1)
- }
- return k.note(where, why)
-}
-
-// teeHole returns a new hole that flows into each hole of ks,
-// similar to the Unix tee(1) command.
-func (e *escape) teeHole(ks ...hole) hole {
- if len(ks) == 0 {
- return e.discardHole()
- }
- if len(ks) == 1 {
- return ks[0]
- }
- // TODO(mdempsky): Optimize if there's only one non-discard hole?
-
- // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
- // new temporary location ltmp, wire it into place, and return
- // a hole for "ltmp = _".
- loc := e.newLoc(nil, true)
- for _, k := range ks {
- // N.B., "p = &q" and "p = &tmp; tmp = q" are not
- // semantically equivalent. To combine holes like "l1
- // = _" and "l2 = &_", we'd need to wire them as "l1 =
- // *ltmp" and "l2 = ltmp" and return "ltmp = &_"
- // instead.
- if k.derefs < 0 {
- base.Fatalf("teeHole: negative derefs")
- }
-
- e.flow(k, loc)
- }
- return loc.asHole()
-}
-
-func (e *escape) dcl(n *ir.Name) hole {
- if n.Curfn != e.curfn || n.IsClosureVar() {
- base.Fatalf("bad declaration of %v", n)
- }
- loc := e.oldLoc(n)
- loc.loopDepth = e.loopDepth
- return loc.asHole()
-}
-
-// spill allocates a new location associated with expression n, flows
-// its address to k, and returns a hole that flows values to it. It's
-// intended for use with most expressions that allocate storage.
-func (e *escape) spill(k hole, n ir.Node) hole {
- loc := e.newLoc(n, true)
- e.flow(k.addr(n, "spill"), loc)
- return loc.asHole()
-}
-
-// later returns a new hole that flows into k, but some time later.
-// Its main effect is to prevent immediate reuse of temporary
-// variables introduced during Order.
-func (e *escape) later(k hole) hole {
- loc := e.newLoc(nil, false)
- e.flow(k, loc)
- return loc.asHole()
-}
-
-func (e *escape) newLoc(n ir.Node, transient bool) *location {
- if e.curfn == nil {
- base.Fatalf("e.curfn isn't set")
- }
- if n != nil && n.Type() != nil && n.Type().NotInHeap() {
- base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
- }
-
- if n != nil && n.Op() == ir.ONAME {
- n = n.(*ir.Name).Canonical()
- }
- loc := &location{
- n: n,
- curfn: e.curfn,
- loopDepth: e.loopDepth,
- transient: transient,
- }
- e.allLocs = append(e.allLocs, loc)
- if n != nil {
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Curfn != e.curfn {
- base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
- }
-
- if n.Opt != nil {
- base.Fatalf("%v already has a location", n)
- }
- n.Opt = loc
- }
- }
- return loc
-}
-
-func (b *batch) oldLoc(n *ir.Name) *location {
- if n.Canonical().Opt == nil {
- base.Fatalf("%v has no location", n)
- }
- return n.Canonical().Opt.(*location)
-}
-
-func (l *location) asHole() hole {
- return hole{dst: l}
-}
-
-func (b *batch) flow(k hole, src *location) {
- if k.addrtaken {
- src.addrtaken = true
- }
-
- dst := k.dst
- if dst == &b.blankLoc {
- return
- }
- if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
- return
- }
- if dst.escapes && k.derefs < 0 { // dst = &src
- if base.Flag.LowerM >= 2 || logopt.Enabled() {
- pos := base.FmtPos(src.n.Pos())
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
- }
- explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
- }
-
- }
- src.escapes = true
- return
- }
-
- // TODO(mdempsky): Deduplicate edges?
- dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
-}
-
-func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
-func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
-
-// walkAll computes the minimal dereferences between all pairs of
-// locations.
-func (b *batch) walkAll() {
- // We use a work queue to keep track of locations that we need
- // to visit, and repeatedly walk until we reach a fixed point.
- //
- // We walk once from each location (including the heap), and
- // then re-enqueue each location on its transition from
- // transient->!transient and !escapes->escapes, which can each
- // happen at most once. So we take Θ(len(e.allLocs)) walks.
-
- // LIFO queue, has enough room for e.allLocs and e.heapLoc.
- todo := make([]*location, 0, len(b.allLocs)+1)
- enqueue := func(loc *location) {
- if !loc.queued {
- todo = append(todo, loc)
- loc.queued = true
- }
- }
-
- for _, loc := range b.allLocs {
- enqueue(loc)
- }
- enqueue(&b.heapLoc)
-
- var walkgen uint32
- for len(todo) > 0 {
- root := todo[len(todo)-1]
- todo = todo[:len(todo)-1]
- root.queued = false
-
- walkgen++
- b.walkOne(root, walkgen, enqueue)
- }
-}
-
-// walkOne computes the minimal number of dereferences from root to
-// all other locations.
-func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
- // The data flow graph has negative edges (from addressing
- // operations), so we use the Bellman-Ford algorithm. However,
- // we don't have to worry about infinite negative cycles since
- // we bound intermediate dereference counts to 0.
-
- root.walkgen = walkgen
- root.derefs = 0
- root.dst = nil
-
- todo := []*location{root} // LIFO queue
- for len(todo) > 0 {
- l := todo[len(todo)-1]
- todo = todo[:len(todo)-1]
-
- derefs := l.derefs
-
- // If l.derefs < 0, then l's address flows to root.
- addressOf := derefs < 0
- if addressOf {
- // For a flow path like "root = &l; l = x",
- // l's address flows to root, but x's does
- // not. We recognize this by lower bounding
- // derefs at 0.
- derefs = 0
-
- // If l's address flows to a non-transient
- // location, then l can't be transiently
- // allocated.
- if !root.transient && l.transient {
- l.transient = false
- enqueue(l)
- }
- }
-
- if b.outlives(root, l) {
- // l's value flows to root. If l is a function
- // parameter and root is the heap or a
- // corresponding result parameter, then record
- // that value flow for tagging the function
- // later.
- if l.isName(ir.PPARAM) {
- if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
- }
- explanation := b.explainPath(root, l)
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
- fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
- }
- }
- l.leakTo(root, derefs)
- }
-
- // If l's address flows somewhere that
- // outlives it, then l needs to be heap
- // allocated.
- if addressOf && !l.escapes {
- if logopt.Enabled() || base.Flag.LowerM >= 2 {
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
- }
- explanation := b.explainPath(root, l)
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
- }
- }
- l.escapes = true
- enqueue(l)
- continue
- }
- }
-
- for i, edge := range l.edges {
- if edge.src.escapes {
- continue
- }
- d := derefs + edge.derefs
- if edge.src.walkgen != walkgen || edge.src.derefs > d {
- edge.src.walkgen = walkgen
- edge.src.derefs = d
- edge.src.dst = l
- edge.src.dstEdgeIdx = i
- todo = append(todo, edge.src)
- }
- }
- }
-}
-
-// explainPath prints an explanation of how src flows to the walk root.
-func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
- visited := make(map[*location]bool)
- pos := base.FmtPos(src.n.Pos())
- var explanation []*logopt.LoggedOpt
- for {
- // Prevent infinite loop.
- if visited[src] {
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
- }
- break
- }
- visited[src] = true
- dst := src.dst
- edge := &dst.edges[src.dstEdgeIdx]
- if edge.src != src {
- base.Fatalf("path inconsistency: %v != %v", edge.src, src)
- }
-
- explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
-
- if dst == root {
- break
- }
- src = dst
- }
-
- return explanation
-}
-
-func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
- ops := "&"
- if derefs >= 0 {
- ops = strings.Repeat("*", derefs)
- }
- print := base.Flag.LowerM >= 2
-
- flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
- if print {
- fmt.Printf("%s:%s\n", pos, flow)
- }
- if logopt.Enabled() {
- var epos src.XPos
- if notes != nil {
- epos = notes.where.Pos()
- } else if srcloc != nil && srcloc.n != nil {
- epos = srcloc.n.Pos()
- }
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
- }
-
- for note := notes; note != nil; note = note.next {
- if print {
- fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
- }
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
- fmt.Sprintf(" from %v (%v)", note.where, note.why)))
- }
- }
- return explanation
-}
-
-func (b *batch) explainLoc(l *location) string {
- if l == &b.heapLoc {
- return "{heap}"
- }
- if l.n == nil {
- // TODO(mdempsky): Omit entirely.
- return "{temp}"
- }
- if l.n.Op() == ir.ONAME {
- return fmt.Sprintf("%v", l.n)
- }
- return fmt.Sprintf("{storage for %v}", l.n)
-}
-
-// outlives reports whether values stored in l may survive beyond
-// other's lifetime if stack allocated.
-func (b *batch) outlives(l, other *location) bool {
- // The heap outlives everything.
- if l.escapes {
- return true
- }
-
- // We don't know what callers do with returned values, so
- // pessimistically we need to assume they flow to the heap and
- // outlive everything too.
- if l.isName(ir.PPARAMOUT) {
- // Exception: Directly called closures can return
- // locations allocated outside of them without forcing
- // them to the heap. For example:
- //
- // var u int // okay to stack allocate
- // *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
- return false
- }
-
- return true
- }
-
- // If l and other are within the same function, then l
- // outlives other if it was declared outside other's loop
- // scope. For example:
- //
- // var l *int
- // for {
- // l = new(int)
- // }
- if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
- return true
- }
-
- // If other is declared within a child closure of where l is
- // declared, then l outlives it. For example:
- //
- // var l *int
- // func() {
- // l = new(int)
- // }
- if containsClosure(l.curfn, other.curfn) {
- return true
- }
-
- return false
-}
-
-// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *ir.Func) bool {
- // Common case.
- if f == c {
- return false
- }
-
- // Closures within function Foo are named like "Foo.funcN..."
- // TODO(mdempsky): Better way to recognize this.
- fn := f.Sym().Name
- cn := c.Sym().Name
- return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
-}
-
-// leak records that parameter l leaks to sink.
-func (l *location) leakTo(sink *location, derefs int) {
- // If sink is a result parameter that doesn't escape (#44614)
- // and we can fit return bits into the escape analysis tag,
- // then record as a result leak.
- if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
- ri := sink.resultIndex - 1
- if ri < numEscResults {
- // Leak to result parameter.
- l.paramEsc.AddResult(ri, derefs)
- return
- }
- }
-
- // Otherwise, record as heap leak.
- l.paramEsc.AddHeap(derefs)
-}
-
func (b *batch) finish(fns []*ir.Func) {
// Record parameter tags for package export data.
for _, fn := range fns {
@@ -1678,6 +288,11 @@ func (b *batch) finish(fns []*ir.Func) {
// Update n.Esc based on escape analysis results.
+ // Omit escape diagnostics for go/defer wrappers, at least for now.
+ // Historically, we haven't printed them, and test cases don't expect them.
+ // TODO(mdempsky): Update tests to expect this.
+ goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
+
if loc.escapes {
if n.Op() == ir.ONAME {
if base.Flag.CompilingRuntime {
@@ -1687,7 +302,7 @@ func (b *batch) finish(fns []*ir.Func) {
base.WarnfAt(n.Pos(), "moved to heap: %v", n)
}
} else {
- if base.Flag.LowerM != 0 {
+ if base.Flag.LowerM != 0 && !goDeferWrapper {
base.WarnfAt(n.Pos(), "%v escapes to heap", n)
}
if logopt.Enabled() {
@@ -1697,7 +312,7 @@ func (b *batch) finish(fns []*ir.Func) {
}
n.SetEsc(ir.EscHeap)
} else {
- if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME && !goDeferWrapper {
base.WarnfAt(n.Pos(), "%v does not escape", n)
}
n.SetEsc(ir.EscNone)
@@ -1706,7 +321,7 @@ func (b *batch) finish(fns []*ir.Func) {
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
n.SetTransient(true)
- case ir.OCALLPART:
+ case ir.OMETHVALUE:
n := n.(*ir.SelectorExpr)
n.SetTransient(true)
case ir.OSLICELIT:
@@ -1718,107 +333,19 @@ func (b *batch) finish(fns []*ir.Func) {
}
}
-func (l *location) isName(c ir.Class) bool {
- return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
-}
-
-const numEscResults = 7
-
-// An leaks represents a set of assignment flows from a parameter
-// to the heap or to any of its function's (first numEscResults)
-// result parameters.
-type leaks [1 + numEscResults]uint8
-
-// Empty reports whether l is an empty set (i.e., no assignment flows).
-func (l leaks) Empty() bool { return l == leaks{} }
-
-// Heap returns the minimum deref count of any assignment flow from l
-// to the heap. If no such flows exist, Heap returns -1.
-func (l leaks) Heap() int { return l.get(0) }
-
-// Result returns the minimum deref count of any assignment flow from
-// l to its function's i'th result parameter. If no such flows exist,
-// Result returns -1.
-func (l leaks) Result(i int) int { return l.get(1 + i) }
-
-// AddHeap adds an assignment flow from l to the heap.
-func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
-
-// AddResult adds an assignment flow from l to its function's i'th
-// result parameter.
-func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
-
-func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
-
-func (l leaks) get(i int) int { return int(l[i]) - 1 }
-
-func (l *leaks) add(i, derefs int) {
- if old := l.get(i); old < 0 || derefs < old {
- l.set(i, derefs)
- }
-}
-
-func (l *leaks) set(i, derefs int) {
- v := derefs + 1
- if v < 0 {
- base.Fatalf("invalid derefs count: %v", derefs)
- }
- if v > math.MaxUint8 {
- v = math.MaxUint8
- }
-
- l[i] = uint8(v)
-}
-
-// Optimize removes result flow paths that are equal in length or
-// longer than the shortest heap flow path.
-func (l *leaks) Optimize() {
- // If we have a path to the heap, then there's no use in
- // keeping equal or longer paths elsewhere.
- if x := l.Heap(); x >= 0 {
- for i := 0; i < numEscResults; i++ {
- if l.Result(i) >= x {
- l.setResult(i, -1)
- }
+// inMutualBatch reports whether function fn is in the batch of
+// mutually recursive functions being analyzed. When this is true,
+// fn has not yet been analyzed, so its parameters and results
+// should be incorporated directly into the flow graph instead of
+// relying on its escape analysis tagging.
+func (e *escape) inMutualBatch(fn *ir.Name) bool {
+ if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
+ if fn.Defn.Esc() == escFuncUnknown {
+ base.Fatalf("graph inconsistency: %v", fn)
}
+ return true
}
-}
-
-var leakTagCache = map[leaks]string{}
-
-// Encode converts l into a binary string for export data.
-func (l leaks) Encode() string {
- if l.Heap() == 0 {
- // Space optimization: empty string encodes more
- // efficiently in export data.
- return ""
- }
- if s, ok := leakTagCache[l]; ok {
- return s
- }
-
- n := len(l)
- for n > 0 && l[n-1] == 0 {
- n--
- }
- s := "esc:" + string(l[:n])
- leakTagCache[l] = s
- return s
-}
-
-// parseLeaks parses a binary string representing a leaks
-func parseLeaks(s string) leaks {
- var l leaks
- if !strings.HasPrefix(s, "esc:") {
- l.AddHeap(0)
- return l
- }
- copy(l[:], s[4:])
- return l
-}
-
-func Funcs(all []ir.Node) {
- ir.VisitFuncsBottomUp(all, Batch)
+ return false
}
const (
@@ -1836,220 +363,6 @@ const (
nonlooping
)
-func isSliceSelfAssign(dst, src ir.Node) bool {
- // Detect the following special case.
- //
- // func (b *Buffer) Foo() {
- // n, m := ...
- // b.buf = b.buf[n:m]
- // }
- //
- // This assignment is a no-op for escape analysis,
- // it does not store any new pointers into b that were not already there.
- // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
- // Here we assume that the statement will not contain calls,
- // that is, that order will move any calls to init.
- // Otherwise base ONAME value could change between the moments
- // when we evaluate it for dst and for src.
-
- // dst is ONAME dereference.
- var dstX ir.Node
- switch dst.Op() {
- default:
- return false
- case ir.ODEREF:
- dst := dst.(*ir.StarExpr)
- dstX = dst.X
- case ir.ODOTPTR:
- dst := dst.(*ir.SelectorExpr)
- dstX = dst.X
- }
- if dstX.Op() != ir.ONAME {
- return false
- }
- // src is a slice operation.
- switch src.Op() {
- case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
- // OK.
- case ir.OSLICEARR, ir.OSLICE3ARR:
- // Since arrays are embedded into containing object,
- // slice of non-pointer array will introduce a new pointer into b that was not already there
- // (pointer to b itself). After such assignment, if b contents escape,
- // b escapes as well. If we ignore such OSLICEARR, we will conclude
- // that b does not escape when b contents do.
- //
- // Pointer to an array is OK since it's not stored inside b directly.
- // For slicing an array (not pointer to array), there is an implicit OADDR.
- // We check that to determine non-pointer array slicing.
- src := src.(*ir.SliceExpr)
- if src.X.Op() == ir.OADDR {
- return false
- }
- default:
- return false
- }
- // slice is applied to ONAME dereference.
- var baseX ir.Node
- switch base := src.(*ir.SliceExpr).X; base.Op() {
- default:
- return false
- case ir.ODEREF:
- base := base.(*ir.StarExpr)
- baseX = base.X
- case ir.ODOTPTR:
- base := base.(*ir.SelectorExpr)
- baseX = base.X
- }
- if baseX.Op() != ir.ONAME {
- return false
- }
- // dst and src reference the same base ONAME.
- return dstX.(*ir.Name) == baseX.(*ir.Name)
-}
-
-// isSelfAssign reports whether assignment from src to dst can
-// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src ir.Node) bool {
- if isSliceSelfAssign(dst, src) {
- return true
- }
-
- // Detect trivial assignments that assign back to the same object.
- //
- // It covers these cases:
- // val.x = val.y
- // val.x[i] = val.y[j]
- // val.x1.x2 = val.x1.y2
- // ... etc
- //
- // These assignments do not change assigned object lifetime.
-
- if dst == nil || src == nil || dst.Op() != src.Op() {
- return false
- }
-
- // The expression prefix must be both "safe" and identical.
- switch dst.Op() {
- case ir.ODOT, ir.ODOTPTR:
- // Safe trailing accessors that are permitted to differ.
- dst := dst.(*ir.SelectorExpr)
- src := src.(*ir.SelectorExpr)
- return ir.SameSafeExpr(dst.X, src.X)
- case ir.OINDEX:
- dst := dst.(*ir.IndexExpr)
- src := src.(*ir.IndexExpr)
- if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
- return false
- }
- return ir.SameSafeExpr(dst.X, src.X)
- default:
- return false
- }
-}
-
-// mayAffectMemory reports whether evaluation of n may affect the program's
-// memory state. If the expression can't affect memory state, then it can be
-// safely ignored by the escape analysis.
-func mayAffectMemory(n ir.Node) bool {
- // We may want to use a list of "memory safe" ops instead of generally
- // "side-effect free", which would include all calls and other ops that can
- // allocate or change global state. For now, it's safer to start with the latter.
- //
- // We're ignoring things like division by zero, index out of range,
- // and nil pointer dereference here.
-
- // TODO(rsc): It seems like it should be possible to replace this with
- // an ir.Any looking for any op that's not the ones in the case statement.
- // But that produces changes in the compiled output detected by buildall.
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL:
- return false
-
- case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
- n := n.(*ir.BinaryExpr)
- return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
-
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
-
- case ir.OCONVNOP, ir.OCONV:
- n := n.(*ir.ConvExpr)
- return mayAffectMemory(n.X)
-
- case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- n := n.(*ir.UnaryExpr)
- return mayAffectMemory(n.X)
-
- case ir.ODOT, ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- return mayAffectMemory(n.X)
-
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- return mayAffectMemory(n.X)
-
- default:
- return true
- }
-}
-
-// HeapAllocReason returns the reason the given Node must be heap
-// allocated, or the empty string if it doesn't.
-func HeapAllocReason(n ir.Node) string {
- if n == nil || n.Type() == nil {
- return ""
- }
-
- // Parameters are always passed via the stack.
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
- return ""
- }
- }
-
- if n.Type().Width > ir.MaxStackVarSize {
- return "too large for stack"
- }
-
- if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width > ir.MaxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
- return "too large for stack"
- }
- if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op() == ir.OMAKESLICE {
- n := n.(*ir.MakeExpr)
- r := n.Cap
- if r == nil {
- r = n.Len
- }
- if !ir.IsSmallIntConst(r) {
- return "non-constant size"
- }
- if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Width {
- return "too large for stack"
- }
- }
-
- return ""
-}
-
-// This special tag is applied to uintptr variables
-// that we believe may hold unsafe.Pointers for
-// calls into assembly functions.
-const UnsafeUintptrNote = "unsafe-uintptr"
-
-// This special tag is applied to uintptr parameters of functions
-// marked go:uintptrescapes.
-const UintptrEscapesNote = "uintptr-escapes"
-
func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
@@ -2058,6 +371,11 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
return fmt.Sprintf("arg#%d", narg)
}
+ // Only report diagnostics for user code;
+ // not for wrappers generated around them.
+ // TODO(mdempsky): Generalize this.
+ diagnose := base.Flag.LowerM != 0 && !(fn.Wrapper() || fn.Dupok())
+
if len(fn.Body) == 0 {
// Assume that uintptr arguments must be held live across the call.
// This is most important for syscall.Syscall.
@@ -2065,11 +383,13 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
// This really doesn't have much to do with escape analysis per se,
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
+ fn.Pragma |= ir.UintptrKeepAlive
+
if f.Type.IsUintptr() {
- if base.Flag.LowerM != 0 {
+ if diagnose {
base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
}
- return UnsafeUintptrNote
+ return ""
}
if !f.Type.HasPointers() { // don't bother tagging for scalars
@@ -2081,11 +401,11 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
if fn.Pragma&ir.Noescape != 0 {
- if base.Flag.LowerM != 0 && f.Sym != nil {
+ if diagnose && f.Sym != nil {
base.WarnfAt(f.Pos, "%v does not escape", name())
}
} else {
- if base.Flag.LowerM != 0 && f.Sym != nil {
+ if diagnose && f.Sym != nil {
base.WarnfAt(f.Pos, "leaking param: %v", name())
}
esc.AddHeap(0)
@@ -2095,18 +415,20 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
}
if fn.Pragma&ir.UintptrEscapes != 0 {
+ fn.Pragma |= ir.UintptrKeepAlive
+
if f.Type.IsUintptr() {
- if base.Flag.LowerM != 0 {
+ if diagnose {
base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
}
- return UintptrEscapesNote
+ return ""
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
- if base.Flag.LowerM != 0 {
+ if diagnose {
base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
}
- return UintptrEscapesNote
+ return ""
}
}
@@ -2125,7 +447,7 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
esc := loc.paramEsc
esc.Optimize()
- if base.Flag.LowerM != 0 && !loc.escapes {
+ if diagnose && !loc.escapes {
if esc.Empty() {
base.WarnfAt(f.Pos, "%v does not escape", name())
}
diff --git a/src/cmd/compile/internal/escape/expr.go b/src/cmd/compile/internal/escape/expr.go
new file mode 100644
index 0000000000..62afb5b928
--- /dev/null
+++ b/src/cmd/compile/internal/escape/expr.go
@@ -0,0 +1,335 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// expr models evaluating an expression n and flowing the result into
+// hole k.
+func (e *escape) expr(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+ e.stmts(n.Init())
+ e.exprSkipInit(k, n)
+}
+
+func (e *escape) exprSkipInit(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if k.derefs >= 0 && !n.Type().HasPointers() {
+ k.dst = &e.blankLoc
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
+
+ case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
+ // nop
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
+ return
+ }
+ e.flow(k, e.oldLoc(n))
+
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ e.expr(k.addr(n, "address-of"), n.X) // "address-of"
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ e.expr(k.deref(n, "indirection"), n.X) // "indirection"
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.note(n, "dot"), n.X)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+ // n.T doesn't need to be tracked; it always points to read-only storage.
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.X)
+ } else {
+ // TODO(mdempsky): Fix why reason text.
+ e.expr(k.deref(n, "dot of pointer"), n.X)
+ }
+ e.discard(n.Index)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.discard(n.Index)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+ n := n.(*ir.SliceExpr)
+ e.expr(k.note(n, "slice"), n.X)
+ e.discard(n.Low)
+ e.discard(n.High)
+ e.discard(n.Max)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
+ // When -d=checkptr=2 is enabled, treat
+ // conversions to unsafe.Pointer as an
+ // escaping operation. This allows better
+ // runtime instrumentation, since we can more
+ // easily detect object boundaries on the heap
+ // than the stack.
+ e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
+ e.unsafeValue(k, n.X)
+ } else {
+ e.expr(k, n.X)
+ }
+ case ir.OCONVIFACE, ir.OCONVIDATA:
+ n := n.(*ir.ConvExpr)
+ if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
+ k = e.spill(k, n)
+ }
+ e.expr(k.note(n, "interface-converted"), n.X)
+ case ir.OEFACE:
+ n := n.(*ir.BinaryExpr)
+ // Note: n.X is not needed because it can never point to memory that might escape.
+ e.expr(k, n.Y)
+ case ir.OIDATA, ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ e.expr(k, n.X)
+ case ir.OSLICE2ARRPTR:
+ // the slice pointer flows directly to the result
+ n := n.(*ir.ConvExpr)
+ e.expr(k, n.X)
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+
+ case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ e.call([]hole{k}, n)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ e.spill(k, n)
+
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+ e.discard(n.Cap)
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ e.discard(n.Len)
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+
+ case ir.OMETHVALUE:
+ // Flow the receiver argument to both the closure and
+ // to the receiver parameter.
+
+ n := n.(*ir.SelectorExpr)
+ closureK := e.spill(k, n)
+
+ m := n.Selection
+
+ // We don't know how the method value will be called
+ // later, so conservatively assume the result
+ // parameters all flow to the heap.
+ //
+ // TODO(mdempsky): Change ks into a callback, so that
+ // we don't have to create this slice?
+ var ks []hole
+ for i := m.Type.NumResults(); i > 0; i-- {
+ ks = append(ks, e.heapHole())
+ }
+ name, _ := m.Nname.(*ir.Name)
+ paramK := e.tagHole(ks, name, m.Type.Recv())
+
+ e.expr(e.teeHole(paramK, closureK), n.X)
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ e.expr(e.spill(k, n), n.X)
+
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "array literal element"), elt)
+ }
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ k = e.spill(k, n)
+
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "slice-literal-element"), elt)
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
+ }
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ e.spill(k, n)
+
+ // Map keys and values are always stored in the heap.
+ for _, elt := range n.List {
+ elt := elt.(*ir.KeyExpr)
+ e.assignHeap(elt.Key, "map literal key", n)
+ e.assignHeap(elt.Value, "map literal value", n)
+ }
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ k = e.spill(k, n)
+ e.closures = append(e.closures, closure{k, n})
+
+ if fn := n.Func; fn.IsHiddenClosure() {
+ for _, cv := range fn.ClosureVars {
+ if loc := e.oldLoc(cv); !loc.captured {
+ loc.captured = true
+
+ // Ignore reassignments to the variable in straightline code
+ // preceding the first capture by a closure.
+ if loc.loopDepth == e.loopDepth {
+ loc.reassigned = false
+ }
+ }
+ }
+
+ for _, n := range fn.Dcl {
+ // Add locations for local variables of the
+ // closure, if needed, in case we're not including
+ // the closure func in the batch for escape
+ // analysis (happens for escape analysis called
+ // from reflectdata.methodWrapper)
+ if n.Op() == ir.ONAME && n.Opt == nil {
+ e.with(fn).newLoc(n, false)
+ }
+ }
+ e.walkFunc(fn)
+ }
+
+ case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ e.spill(k, n)
+ e.discard(n.X)
+
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ e.spill(k, n)
+
+ // Arguments of OADDSTR never escape;
+ // runtime.concatstrings makes sure of that.
+ e.discards(n.List)
+
+ case ir.ODYNAMICTYPE:
+ // Nothing to do - argument is a *runtime._type (+ maybe a *runtime.itab) pointing to static data section
+ }
+}
+
+// unsafeValue evaluates a uintptr-typed arithmetic expression looking
+// for conversions from an unsafe.Pointer.
+func (e *escape) unsafeValue(k hole, n ir.Node) {
+ if n.Type().Kind() != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
+ }
+ if k.addrtaken {
+ base.Fatalf("unexpected addrtaken")
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Type().IsUnsafePtr() {
+ e.expr(k, n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ if ir.IsReflectHeaderDataField(n) {
+ e.expr(k.deref(n, "reflect.Header.Data"), n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+ n := n.(*ir.UnaryExpr)
+ e.unsafeValue(k, n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ e.unsafeValue(k, n.Y)
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ // RHS need not be uintptr-typed (#32959) and can't meaningfully
+ // flow pointers anyway.
+ e.discard(n.Y)
+ default:
+ e.exprSkipInit(e.discardHole(), n)
+ }
+}
+
+// discard evaluates an expression n for side-effects, but discards
+// its value.
+func (e *escape) discard(n ir.Node) {
+ e.expr(e.discardHole(), n)
+}
+
+func (e *escape) discards(l ir.Nodes) {
+ for _, n := range l {
+ e.discard(n)
+ }
+}
+
+// spill allocates a new location associated with expression n, flows
+// its address to k, and returns a hole that flows values to it. It's
+// intended for use with most expressions that allocate storage.
+func (e *escape) spill(k hole, n ir.Node) hole {
+ loc := e.newLoc(n, true)
+ e.flow(k.addr(n, "spill"), loc)
+ return loc.asHole()
+}
diff --git a/src/cmd/compile/internal/escape/graph.go b/src/cmd/compile/internal/escape/graph.go
new file mode 100644
index 0000000000..d3ae1da693
--- /dev/null
+++ b/src/cmd/compile/internal/escape/graph.go
@@ -0,0 +1,324 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// Below we implement the methods for walking the AST and recording
+// data flow edges. Note that because a sub-expression might have
+// side-effects, it's important to always visit the entire AST.
+//
+// For example, write either:
+//
+// if x {
+// e.discard(n.Left)
+// } else {
+// e.value(k, n.Left)
+// }
+//
+// or
+//
+// if x {
+// k = e.discardHole()
+// }
+// e.value(k, n.Left)
+//
+// Do NOT write:
+//
+// // BAD: possibly loses side-effects within n.Left
+// if !x {
+// e.value(k, n.Left)
+// }
+
+// An location represents an abstract location that stores a Go
+// variable.
+type location struct {
+ n ir.Node // represented variable or expression, if any
+ curfn *ir.Func // enclosing function
+ edges []edge // incoming edges
+ loopDepth int // loopDepth at declaration
+
+ // resultIndex records the tuple index (starting at 1) for
+ // PPARAMOUT variables within their function's result type.
+ // For non-PPARAMOUT variables it's 0.
+ resultIndex int
+
+ // derefs and walkgen are used during walkOne to track the
+ // minimal dereferences from the walk root.
+ derefs int // >= -1
+ walkgen uint32
+
+ // dst and dstEdgeindex track the next immediate assignment
+ // destination location during walkone, along with the index
+ // of the edge pointing back to this location.
+ dst *location
+ dstEdgeIdx int
+
+ // queued is used by walkAll to track whether this location is
+ // in the walk queue.
+ queued bool
+
+ // escapes reports whether the represented variable's address
+ // escapes; that is, whether the variable must be heap
+ // allocated.
+ escapes bool
+
+ // transient reports whether the represented expression's
+ // address does not outlive the statement; that is, whether
+ // its storage can be immediately reused.
+ transient bool
+
+ // paramEsc records the represented parameter's leak set.
+ paramEsc leaks
+
+ captured bool // has a closure captured this variable?
+ reassigned bool // has this variable been reassigned?
+ addrtaken bool // has this variable's address been taken?
+}
+
+// An edge represents an assignment edge between two Go variables.
+type edge struct {
+ src *location
+ derefs int // >= -1
+ notes *note
+}
+
+func (l *location) asHole() hole {
+ return hole{dst: l}
+}
+
+// leak records that parameter l leaks to sink.
+func (l *location) leakTo(sink *location, derefs int) {
+ // If sink is a result parameter that doesn't escape (#44614)
+ // and we can fit return bits into the escape analysis tag,
+ // then record as a result leak.
+ if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+ ri := sink.resultIndex - 1
+ if ri < numEscResults {
+ // Leak to result parameter.
+ l.paramEsc.AddResult(ri, derefs)
+ return
+ }
+ }
+
+ // Otherwise, record as heap leak.
+ l.paramEsc.AddHeap(derefs)
+}
+
+func (l *location) isName(c ir.Class) bool {
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
+}
+
+// An hole represents a context for evaluation a Go
+// expression. E.g., when evaluating p in "x = **p", we'd have a hole
+// with dst==x and derefs==2.
+type hole struct {
+ dst *location
+ derefs int // >= -1
+ notes *note
+
+ // addrtaken indicates whether this context is taking the address of
+ // the expression, independent of whether the address will actually
+ // be stored into a variable.
+ addrtaken bool
+}
+
+type note struct {
+ next *note
+ where ir.Node
+ why string
+}
+
+func (k hole) note(where ir.Node, why string) hole {
+ if where == nil || why == "" {
+ base.Fatalf("note: missing where/why")
+ }
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ k.notes = &note{
+ next: k.notes,
+ where: where,
+ why: why,
+ }
+ }
+ return k
+}
+
+func (k hole) shift(delta int) hole {
+ k.derefs += delta
+ if k.derefs < -1 {
+ base.Fatalf("derefs underflow: %v", k.derefs)
+ }
+ k.addrtaken = delta < 0
+ return k
+}
+
+func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
+func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
+
+func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
+ if !t.IsInterface() && !types.IsDirectIface(t) {
+ k = k.shift(1)
+ }
+ return k.note(where, why)
+}
+
+func (b *batch) flow(k hole, src *location) {
+ if k.addrtaken {
+ src.addrtaken = true
+ }
+
+ dst := k.dst
+ if dst == &b.blankLoc {
+ return
+ }
+ if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
+ return
+ }
+ if dst.escapes && k.derefs < 0 { // dst = &src
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ pos := base.FmtPos(src.n.Pos())
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
+ }
+ explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ }
+
+ }
+ src.escapes = true
+ return
+ }
+
+ // TODO(mdempsky): Deduplicate edges?
+ dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
+}
+
+func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
+func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
+
+func (b *batch) oldLoc(n *ir.Name) *location {
+ if n.Canonical().Opt == nil {
+ base.Fatalf("%v has no location", n)
+ }
+ return n.Canonical().Opt.(*location)
+}
+
+func (e *escape) newLoc(n ir.Node, transient bool) *location {
+ if e.curfn == nil {
+ base.Fatalf("e.curfn isn't set")
+ }
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
+ }
+
+ if n != nil && n.Op() == ir.ONAME {
+ if canon := n.(*ir.Name).Canonical(); n != canon {
+ base.Fatalf("newLoc on non-canonical %v (canonical is %v)", n, canon)
+ }
+ }
+ loc := &location{
+ n: n,
+ curfn: e.curfn,
+ loopDepth: e.loopDepth,
+ transient: transient,
+ }
+ e.allLocs = append(e.allLocs, loc)
+ if n != nil {
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class == ir.PPARAM && n.Curfn == nil {
+ // ok; hidden parameter
+ } else if n.Curfn != e.curfn {
+ base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
+ }
+
+ if n.Opt != nil {
+ base.Fatalf("%v already has a location", n)
+ }
+ n.Opt = loc
+ }
+ }
+ return loc
+}
+
+// teeHole returns a new hole that flows into each hole of ks,
+// similar to the Unix tee(1) command.
+func (e *escape) teeHole(ks ...hole) hole {
+ if len(ks) == 0 {
+ return e.discardHole()
+ }
+ if len(ks) == 1 {
+ return ks[0]
+ }
+ // TODO(mdempsky): Optimize if there's only one non-discard hole?
+
+ // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
+ // new temporary location ltmp, wire it into place, and return
+ // a hole for "ltmp = _".
+ loc := e.newLoc(nil, true)
+ for _, k := range ks {
+ // N.B., "p = &q" and "p = &tmp; tmp = q" are not
+ // semantically equivalent. To combine holes like "l1
+ // = _" and "l2 = &_", we'd need to wire them as "l1 =
+ // *ltmp" and "l2 = ltmp" and return "ltmp = &_"
+ // instead.
+ if k.derefs < 0 {
+ base.Fatalf("teeHole: negative derefs")
+ }
+
+ e.flow(k, loc)
+ }
+ return loc.asHole()
+}
+
+// later returns a new hole that flows into k, but some time later.
+// Its main effect is to prevent immediate reuse of temporary
+// variables introduced during Order.
+func (e *escape) later(k hole) hole {
+ loc := e.newLoc(nil, false)
+ e.flow(k, loc)
+ return loc.asHole()
+}
+
+// Fmt is called from node printing to print information about escape analysis results.
+func Fmt(n ir.Node) string {
+ text := ""
+ switch n.Esc() {
+ case ir.EscUnknown:
+ break
+
+ case ir.EscHeap:
+ text = "esc(h)"
+
+ case ir.EscNone:
+ text = "esc(no)"
+
+ case ir.EscNever:
+ text = "esc(N)"
+
+ default:
+ text = fmt.Sprintf("esc(%d)", n.Esc())
+ }
+
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
+ if text != "" {
+ text += " "
+ }
+ text += fmt.Sprintf("ld(%d)", loc.loopDepth)
+ }
+ }
+
+ return text
+}
diff --git a/src/cmd/compile/internal/escape/leaks.go b/src/cmd/compile/internal/escape/leaks.go
new file mode 100644
index 0000000000..4c848a5ee7
--- /dev/null
+++ b/src/cmd/compile/internal/escape/leaks.go
@@ -0,0 +1,106 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "math"
+ "strings"
+)
+
+const numEscResults = 7
+
+// An leaks represents a set of assignment flows from a parameter
+// to the heap or to any of its function's (first numEscResults)
+// result parameters.
+type leaks [1 + numEscResults]uint8
+
+// Empty reports whether l is an empty set (i.e., no assignment flows).
+func (l leaks) Empty() bool { return l == leaks{} }
+
+// Heap returns the minimum deref count of any assignment flow from l
+// to the heap. If no such flows exist, Heap returns -1.
+func (l leaks) Heap() int { return l.get(0) }
+
+// Result returns the minimum deref count of any assignment flow from
+// l to its function's i'th result parameter. If no such flows exist,
+// Result returns -1.
+func (l leaks) Result(i int) int { return l.get(1 + i) }
+
+// AddHeap adds an assignment flow from l to the heap.
+func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
+
+// AddResult adds an assignment flow from l to its function's i'th
+// result parameter.
+func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
+
+func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+
+func (l leaks) get(i int) int { return int(l[i]) - 1 }
+
+func (l *leaks) add(i, derefs int) {
+ if old := l.get(i); old < 0 || derefs < old {
+ l.set(i, derefs)
+ }
+}
+
+func (l *leaks) set(i, derefs int) {
+ v := derefs + 1
+ if v < 0 {
+ base.Fatalf("invalid derefs count: %v", derefs)
+ }
+ if v > math.MaxUint8 {
+ v = math.MaxUint8
+ }
+
+ l[i] = uint8(v)
+}
+
+// Optimize removes result flow paths that are equal in length or
+// longer than the shortest heap flow path.
+func (l *leaks) Optimize() {
+ // If we have a path to the heap, then there's no use in
+ // keeping equal or longer paths elsewhere.
+ if x := l.Heap(); x >= 0 {
+ for i := 0; i < numEscResults; i++ {
+ if l.Result(i) >= x {
+ l.setResult(i, -1)
+ }
+ }
+ }
+}
+
+var leakTagCache = map[leaks]string{}
+
+// Encode converts l into a binary string for export data.
+func (l leaks) Encode() string {
+ if l.Heap() == 0 {
+ // Space optimization: empty string encodes more
+ // efficiently in export data.
+ return ""
+ }
+ if s, ok := leakTagCache[l]; ok {
+ return s
+ }
+
+ n := len(l)
+ for n > 0 && l[n-1] == 0 {
+ n--
+ }
+ s := "esc:" + string(l[:n])
+ leakTagCache[l] = s
+ return s
+}
+
+// parseLeaks parses a binary string representing a leaks
+func parseLeaks(s string) leaks {
+ var l leaks
+ if !strings.HasPrefix(s, "esc:") {
+ l.AddHeap(0)
+ return l
+ }
+ copy(l[:], s[4:])
+ return l
+}
diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go
new file mode 100644
index 0000000000..77d6b27dd7
--- /dev/null
+++ b/src/cmd/compile/internal/escape/solve.go
@@ -0,0 +1,289 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// walkAll computes the minimal dereferences between all pairs of
+// locations.
+func (b *batch) walkAll() {
+ // We use a work queue to keep track of locations that we need
+ // to visit, and repeatedly walk until we reach a fixed point.
+ //
+ // We walk once from each location (including the heap), and
+ // then re-enqueue each location on its transition from
+ // transient->!transient and !escapes->escapes, which can each
+ // happen at most once. So we take Θ(len(e.allLocs)) walks.
+
+ // LIFO queue, has enough room for e.allLocs and e.heapLoc.
+ todo := make([]*location, 0, len(b.allLocs)+1)
+ enqueue := func(loc *location) {
+ if !loc.queued {
+ todo = append(todo, loc)
+ loc.queued = true
+ }
+ }
+
+ for _, loc := range b.allLocs {
+ enqueue(loc)
+ }
+ enqueue(&b.heapLoc)
+
+ var walkgen uint32
+ for len(todo) > 0 {
+ root := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+ root.queued = false
+
+ walkgen++
+ b.walkOne(root, walkgen, enqueue)
+ }
+}
+
+// walkOne computes the minimal number of dereferences from root to
+// all other locations.
+func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
+ // The data flow graph has negative edges (from addressing
+ // operations), so we use the Bellman-Ford algorithm. However,
+ // we don't have to worry about infinite negative cycles since
+ // we bound intermediate dereference counts to 0.
+
+ root.walkgen = walkgen
+ root.derefs = 0
+ root.dst = nil
+
+ todo := []*location{root} // LIFO queue
+ for len(todo) > 0 {
+ l := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ derefs := l.derefs
+
+ // If l.derefs < 0, then l's address flows to root.
+ addressOf := derefs < 0
+ if addressOf {
+ // For a flow path like "root = &l; l = x",
+ // l's address flows to root, but x's does
+ // not. We recognize this by lower bounding
+ // derefs at 0.
+ derefs = 0
+
+ // If l's address flows to a non-transient
+ // location, then l can't be transiently
+ // allocated.
+ if !root.transient && l.transient {
+ l.transient = false
+ enqueue(l)
+ }
+ }
+
+ if b.outlives(root, l) {
+ // l's value flows to root. If l is a function
+ // parameter and root is the heap or a
+ // corresponding result parameter, then record
+ // that value flow for tagging the function
+ // later.
+ if l.isName(ir.PPARAM) {
+ if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
+ }
+ }
+ l.leakTo(root, derefs)
+ }
+
+ // If l's address flows somewhere that
+ // outlives it, then l needs to be heap
+ // allocated.
+ if addressOf && !l.escapes {
+ if logopt.Enabled() || base.Flag.LowerM >= 2 {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ }
+ }
+ l.escapes = true
+ enqueue(l)
+ continue
+ }
+ }
+
+ for i, edge := range l.edges {
+ if edge.src.escapes {
+ continue
+ }
+ d := derefs + edge.derefs
+ if edge.src.walkgen != walkgen || edge.src.derefs > d {
+ edge.src.walkgen = walkgen
+ edge.src.derefs = d
+ edge.src.dst = l
+ edge.src.dstEdgeIdx = i
+ todo = append(todo, edge.src)
+ }
+ }
+ }
+}
+
+// explainPath prints an explanation of how src flows to the walk root.
+func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
+ visited := make(map[*location]bool)
+ pos := base.FmtPos(src.n.Pos())
+ var explanation []*logopt.LoggedOpt
+ for {
+ // Prevent infinite loop.
+ if visited[src] {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
+ }
+ break
+ }
+ visited[src] = true
+ dst := src.dst
+ edge := &dst.edges[src.dstEdgeIdx]
+ if edge.src != src {
+ base.Fatalf("path inconsistency: %v != %v", edge.src, src)
+ }
+
+ explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
+
+ if dst == root {
+ break
+ }
+ src = dst
+ }
+
+ return explanation
+}
+
+func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+ ops := "&"
+ if derefs >= 0 {
+ ops = strings.Repeat("*", derefs)
+ }
+ print := base.Flag.LowerM >= 2
+
+ flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
+ if print {
+ fmt.Printf("%s:%s\n", pos, flow)
+ }
+ if logopt.Enabled() {
+ var epos src.XPos
+ if notes != nil {
+ epos = notes.where.Pos()
+ } else if srcloc != nil && srcloc.n != nil {
+ epos = srcloc.n.Pos()
+ }
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
+ }
+
+ for note := notes; note != nil; note = note.next {
+ if print {
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
+ }
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf(" from %v (%v)", note.where, note.why)))
+ }
+ }
+ return explanation
+}
+
+func (b *batch) explainLoc(l *location) string {
+ if l == &b.heapLoc {
+ return "{heap}"
+ }
+ if l.n == nil {
+ // TODO(mdempsky): Omit entirely.
+ return "{temp}"
+ }
+ if l.n.Op() == ir.ONAME {
+ return fmt.Sprintf("%v", l.n)
+ }
+ return fmt.Sprintf("{storage for %v}", l.n)
+}
+
+// outlives reports whether values stored in l may survive beyond
+// other's lifetime if stack allocated.
+func (b *batch) outlives(l, other *location) bool {
+ // The heap outlives everything.
+ if l.escapes {
+ return true
+ }
+
+ // We don't know what callers do with returned values, so
+ // pessimistically we need to assume they flow to the heap and
+ // outlive everything too.
+ if l.isName(ir.PPARAMOUT) {
+ // Exception: Directly called closures can return
+ // locations allocated outside of them without forcing
+ // them to the heap. For example:
+ //
+ // var u int // okay to stack allocate
+ // *(func() *int { return &u }()) = 42
+ if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
+ return false
+ }
+
+ return true
+ }
+
+ // If l and other are within the same function, then l
+ // outlives other if it was declared outside other's loop
+ // scope. For example:
+ //
+ // var l *int
+ // for {
+ // l = new(int)
+ // }
+ if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
+ return true
+ }
+
+ // If other is declared within a child closure of where l is
+ // declared, then l outlives it. For example:
+ //
+ // var l *int
+ // func() {
+ // l = new(int)
+ // }
+ if containsClosure(l.curfn, other.curfn) {
+ return true
+ }
+
+ return false
+}
+
+// containsClosure reports whether c is a closure contained within f.
+func containsClosure(f, c *ir.Func) bool {
+ // Common case.
+ if f == c {
+ return false
+ }
+
+ // Closures within function Foo are named like "Foo.funcN..."
+ // TODO(mdempsky): Better way to recognize this.
+ fn := f.Sym().Name
+ cn := c.Sym().Name
+ return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
+}
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
new file mode 100644
index 0000000000..c71848b8a1
--- /dev/null
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -0,0 +1,207 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "fmt"
+)
+
+// stmt evaluates a single Go statement.
+func (e *escape) stmt(n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected stmt: %v", n)
+
+ case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
+ // nop
+
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+ // TODO(mdempsky): Handle dead code?
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ e.stmts(n.List)
+
+ case ir.ODCL:
+ // Record loop depth at declaration.
+ n := n.(*ir.Decl)
+ if !ir.IsBlank(n.X) {
+ e.dcl(n.X)
+ }
+
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ switch e.labels[n.Label] {
+ case nonlooping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
+ }
+ case looping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
+ }
+ e.loopDepth++
+ default:
+ base.Fatalf("label missing tag")
+ }
+ delete(e.labels, n.Label)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ e.discard(n.Cond)
+ e.block(n.Body)
+ e.block(n.Else)
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ e.loopDepth++
+ e.discard(n.Cond)
+ e.stmt(n.Post)
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.ORANGE:
+ // for Key, Value = range X { Body }
+ n := n.(*ir.RangeStmt)
+
+ // X is evaluated outside the loop.
+ tmp := e.newLoc(nil, false)
+ e.expr(tmp.asHole(), n.X)
+
+ e.loopDepth++
+ ks := e.addrs([]ir.Node{n.Key, n.Value})
+ if n.X.Type().IsArray() {
+ e.flow(ks[1].note(n, "range"), tmp)
+ } else {
+ e.flow(ks[1].deref(n, "range-deref"), tmp)
+ }
+ e.reassigned(ks, n)
+
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+
+ if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+ var ks []hole
+ if guard.Tag != nil {
+ for _, cas := range n.Cases {
+ cv := cas.Var
+ k := e.dcl(cv) // type switch variables have no ODCL.
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
+ }
+ }
+ }
+ e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
+ } else {
+ e.discard(n.Tag)
+ }
+
+ for _, cas := range n.Cases {
+ e.discards(cas.List)
+ e.block(cas.Body)
+ }
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ for _, cas := range n.Cases {
+ e.stmt(cas.Comm)
+ e.block(cas.Body)
+ }
+ case ir.ORECV:
+ // TODO(mdempsky): Consider e.discard(n.Left).
+ n := n.(*ir.UnaryExpr)
+ e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ e.discard(n.Chan)
+ e.assignHeap(n.Value, "send", n)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ // TODO(mdempsky): Worry about OLSH/ORSH?
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
+
+ case ir.OAS2DOTTYPE: // v, ok = x.(type)
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
+ case ir.OAS2MAPR: // v, ok = m[k]
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
+ case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
+
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ e.stmts(n.Rhs[0].Init())
+ ks := e.addrs(n.Lhs)
+ e.call(ks, n.Rhs[0])
+ e.reassigned(ks, n)
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ results := e.curfn.Type().Results().FieldSlice()
+ dsts := make([]ir.Node, len(results))
+ for i, res := range results {
+ dsts[i] = res.Nname.(*ir.Name)
+ }
+ e.assignList(dsts, n.Results, "return", n)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ e.call(nil, n)
+ case ir.OGO, ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ e.goDeferStmt(n)
+
+ case ir.OTAILCALL:
+ // TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
+ }
+}
+
+func (e *escape) stmts(l ir.Nodes) {
+ for _, n := range l {
+ e.stmt(n)
+ }
+}
+
+// block is like stmts, but preserves loopDepth.
+func (e *escape) block(l ir.Nodes) {
+ old := e.loopDepth
+ e.stmts(l)
+ e.loopDepth = old
+}
+
+func (e *escape) dcl(n *ir.Name) hole {
+ if n.Curfn != e.curfn || n.IsClosureVar() {
+ base.Fatalf("bad declaration of %v", n)
+ }
+ loc := e.oldLoc(n)
+ loc.loopDepth = e.loopDepth
+ return loc.asHole()
+}
diff --git a/src/cmd/compile/internal/escape/utils.go b/src/cmd/compile/internal/escape/utils.go
new file mode 100644
index 0000000000..5f462ef570
--- /dev/null
+++ b/src/cmd/compile/internal/escape/utils.go
@@ -0,0 +1,215 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ var dstX ir.Node
+ switch dst.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ dst := dst.(*ir.StarExpr)
+ dstX = dst.X
+ case ir.ODOTPTR:
+ dst := dst.(*ir.SelectorExpr)
+ dstX = dst.X
+ }
+ if dstX.Op() != ir.ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op() {
+ case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+ // OK.
+ case ir.OSLICEARR, ir.OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ src := src.(*ir.SliceExpr)
+ if src.X.Op() == ir.OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ var baseX ir.Node
+ switch base := src.(*ir.SliceExpr).X; base.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ base := base.(*ir.StarExpr)
+ baseX = base.X
+ case ir.ODOTPTR:
+ base := base.(*ir.SelectorExpr)
+ baseX = base.X
+ }
+ if baseX.Op() != ir.ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dstX.(*ir.Name) == baseX.(*ir.Name)
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+ if isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
+ if dst == nil || src == nil || dst.Op() != src.Op() {
+ return false
+ }
+
+ // The expression prefix must be both "safe" and identical.
+ switch dst.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ // Safe trailing accessors that are permitted to differ.
+ dst := dst.(*ir.SelectorExpr)
+ src := src.(*ir.SelectorExpr)
+ return ir.SameSafeExpr(dst.X, src.X)
+ case ir.OINDEX:
+ dst := dst.(*ir.IndexExpr)
+ src := src.(*ir.IndexExpr)
+ if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
+ return false
+ }
+ return ir.SameSafeExpr(dst.X, src.X)
+ default:
+ return false
+ }
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+ // We may want to use a list of "memory safe" ops instead of generally
+ // "side-effect free", which would include all calls and other ops that can
+ // allocate or change global state. For now, it's safer to start with the latter.
+ //
+ // We're ignoring things like division by zero, index out of range,
+ // and nil pointer dereference here.
+
+ // TODO(rsc): It seems like it should be possible to replace this with
+ // an ir.Any looking for any op that's not the ones in the case statement.
+ // But that produces changes in the compiled output detected by buildall.
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return false
+
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
+
+ case ir.OCONVNOP, ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return mayAffectMemory(n.X)
+
+ default:
+ return true
+ }
+}
+
+// HeapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func HeapAllocReason(n ir.Node) string {
+ if n == nil || n.Type() == nil {
+ return ""
+ }
+
+ // Parameters are always passed via the stack.
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+ return ""
+ }
+ }
+
+ if n.Type().Width > ir.MaxStackVarSize {
+ return "too large for stack"
+ }
+
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OMAKESLICE {
+ n := n.(*ir.MakeExpr)
+ r := n.Cap
+ if r == nil {
+ r = n.Len
+ }
+ if !ir.IsSmallIntConst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Width {
+ return "too large for stack"
+ }
+ }
+
+ return ""
+}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index 2137f1d196..9bf3c7240a 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -5,46 +5,16 @@
package gc
import (
+ "fmt"
+ "go/constant"
+
"cmd/compile/internal/base"
- "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
- "fmt"
- "go/constant"
)
-func exportf(bout *bio.Writer, format string, args ...interface{}) {
- fmt.Fprintf(bout, format, args...)
- if base.Debug.Export != 0 {
- fmt.Printf(format, args...)
- }
-}
-
-func dumpexport(bout *bio.Writer) {
- p := &exporter{marked: make(map[*types.Type]bool)}
- for _, n := range typecheck.Target.Exports {
- // Must catch it here rather than Export(), because the type can be
- // not fully set (still TFORW) when Export() is called.
- if n.Type() != nil && n.Type().HasTParam() {
- base.Fatalf("Cannot (yet) export a generic type: %v", n)
- }
- p.markObject(n)
- }
-
- // The linker also looks for the $$ marker - use char after $$ to distinguish format.
- exportf(bout, "\n$$B\n") // indicate binary export format
- off := bout.Offset()
- typecheck.WriteExports(bout.Writer)
- size := bout.Offset() - off
- exportf(bout, "\n$$\n")
-
- if base.Debug.Export != 0 {
- fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
- }
-}
-
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
@@ -79,83 +49,3 @@ func dumpasmhdr() {
b.Close()
}
-
-type exporter struct {
- marked map[*types.Type]bool // types already seen by markType
-}
-
-// markObject visits a reachable object.
-func (p *exporter) markObject(n ir.Node) {
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Class == ir.PFUNC {
- inline.Inline_Flood(n, typecheck.Export)
- }
- }
-
- p.markType(n.Type())
-}
-
-// markType recursively visits types reachable from t to identify
-// functions whose inline bodies may be needed.
-func (p *exporter) markType(t *types.Type) {
- if p.marked[t] {
- return
- }
- p.marked[t] = true
-
- // If this is a named type, mark all of its associated
- // methods. Skip interface types because t.Methods contains
- // only their unexpanded method set (i.e., exclusive of
- // interface embeddings), and the switch statement below
- // handles their full method set.
- if t.Sym() != nil && t.Kind() != types.TINTER {
- for _, m := range t.Methods().Slice() {
- if types.IsExported(m.Sym.Name) {
- p.markObject(ir.AsNode(m.Nname))
- }
- }
- }
-
- // Recursively mark any types that can be produced given a
- // value of type t: dereferencing a pointer; indexing or
- // iterating over an array, slice, or map; receiving from a
- // channel; accessing a struct field or interface method; or
- // calling a function.
- //
- // Notably, we don't mark function parameter types, because
- // the user already needs some way to construct values of
- // those types.
- switch t.Kind() {
- case types.TPTR, types.TARRAY, types.TSLICE:
- p.markType(t.Elem())
-
- case types.TCHAN:
- if t.ChanDir().CanRecv() {
- p.markType(t.Elem())
- }
-
- case types.TMAP:
- p.markType(t.Key())
- p.markType(t.Elem())
-
- case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
- if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
- p.markType(f.Type)
- }
- }
-
- case types.TFUNC:
- for _, f := range t.Results().FieldSlice() {
- p.markType(f.Type)
- }
-
- case types.TINTER:
- for _, f := range t.AllMethods().Slice() {
- if types.IsExported(f.Sym.Name) {
- p.markType(f.Type)
- }
- }
- }
-}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index ce50cbb4c2..6a373ce33d 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -32,6 +32,7 @@ import (
"log"
"os"
"runtime"
+ "sort"
)
func hidePanic() {
@@ -159,9 +160,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
dwarf.EnableLogging(base.Debug.DwarfInl != 0)
}
if base.Debug.SoftFloat != 0 {
- if buildcfg.Experiment.RegabiArgs {
- log.Fatalf("softfloat mode with GOEXPERIMENT=regabiargs not implemented ")
- }
ssagen.Arch.SoftFloat = true
}
@@ -181,23 +179,41 @@ func Main(archInit func(*ssagen.ArchInfo)) {
typecheck.Target = new(ir.Package)
- typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
typecheck.InitUniverse()
+ typecheck.InitRuntime()
// Parse and typecheck input.
noder.LoadPackage(flag.Args())
dwarfgen.RecordPackageName()
+ // Prepare for backend processing. This must happen before pkginit,
+ // because it generates itabs for initializing global variables.
+ ssagen.InitConfig()
+
// Build init task.
if initTask := pkginit.Task(); initTask != nil {
typecheck.Export(initTask)
}
+ // Stability quirk: sort top-level declarations, so we're not
+ // sensitive to the order that functions are added. In particular,
+ // the order that noder+typecheck add function closures is very
+ // subtle, and not important to reproduce.
+ //
+ // Note: This needs to happen after pkginit.Task, otherwise it risks
+ // changing the order in which top-level variables are initialized.
+ if base.Debug.UnifiedQuirks != 0 {
+ s := typecheck.Target.Decls
+ sort.SliceStable(s, func(i, j int) bool {
+ return s[i].Pos().Before(s[j].Pos())
+ })
+ }
+
// Eliminate some obviously dead code.
// Must happen after typechecking.
for _, n := range typecheck.Target.Decls {
@@ -252,6 +268,11 @@ func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "escapes")
escape.Funcs(typecheck.Target.Decls)
+ // TODO(mdempsky): This is a hack. We need a proper, global work
+ // queue for scheduling function compilation so components don't
+ // need to adjust their behavior depending on when they're called.
+ reflectdata.AfterGlobalEscapeAnalysis = true
+
// Collect information for go:nowritebarrierrec
// checking. This must happen before transforming closures during Walk
// We'll do the final check after write barriers are
@@ -260,17 +281,7 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ssagen.EnableNoWriteBarrierRecCheck()
}
- // Prepare for SSA compilation.
- // This must be before CompileITabs, because CompileITabs
- // can trigger function compilation.
- typecheck.InitRuntime()
- ssagen.InitConfig()
-
- // Just before compilation, compile itabs found on
- // the right side of OCONVIFACE so that methods
- // can be de-virtualized during compilation.
ir.CurFunc = nil
- reflectdata.CompileITabs()
// Compile top level functions.
// Don't use range--walk can add functions to Target.Decls.
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 474d718525..c86bf5f084 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -7,6 +7,7 @@ package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/noder"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
@@ -103,7 +104,7 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
func dumpCompilerObj(bout *bio.Writer) {
printObjHeader(bout)
- dumpexport(bout)
+ noder.WriteExports(bout)
}
func dumpdata() {
@@ -116,7 +117,7 @@ func dumpdata() {
addsignats(typecheck.Target.Externs)
reflectdata.WriteRuntimeTypes()
reflectdata.WriteTabs()
- numPTabs, numITabs := reflectdata.CountTabs()
+ numPTabs := reflectdata.CountPTabs()
reflectdata.WriteImportStrings()
reflectdata.WriteBasicTypes()
dumpembeds()
@@ -157,13 +158,10 @@ func dumpdata() {
if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
- newNumPTabs, newNumITabs := reflectdata.CountTabs()
+ newNumPTabs := reflectdata.CountPTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
- if newNumITabs != numITabs {
- base.Fatalf("itabs changed after compile functions loop")
- }
}
func dumpLinkerObj(bout *bio.Writer) {
diff --git a/src/cmd/compile/internal/importer/exportdata.go b/src/cmd/compile/internal/importer/exportdata.go
index 3925a64314..6a672be9c1 100644
--- a/src/cmd/compile/internal/importer/exportdata.go
+++ b/src/cmd/compile/internal/importer/exportdata.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/src/cmd/compile/internal/importer/gcimporter.go b/src/cmd/compile/internal/importer/gcimporter.go
index feb18cf2c9..ff40be65bb 100644
--- a/src/cmd/compile/internal/importer/gcimporter.go
+++ b/src/cmd/compile/internal/importer/gcimporter.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -156,7 +155,7 @@ func Import(packages map[string]*types2.Package, path, srcDir string, lookup fun
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
- _, pkg, err = iImportData(packages, data[1:], id)
+ pkg, err = ImportData(packages, string(data[1:]), id)
} else {
err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
}
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
index 7fb8fed59c..44c5e06cd6 100644
--- a/src/cmd/compile/internal/importer/gcimporter_test.go
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -10,7 +9,6 @@ import (
"cmd/compile/internal/types2"
"fmt"
"internal/testenv"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -64,7 +62,7 @@ const maxTime = 30 * time.Second
func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
- list, err := ioutil.ReadDir(dirname)
+ list, err := os.ReadDir(dirname)
if err != nil {
t.Fatalf("testDir(%s): %s", dirname, err)
}
@@ -92,7 +90,7 @@ func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
}
func mktmpdir(t *testing.T) string {
- tmpdir, err := ioutil.TempDir("", "gcimporter_test")
+ tmpdir, err := os.MkdirTemp("", "gcimporter_test")
if err != nil {
t.Fatal("mktmpdir:", err)
}
@@ -142,7 +140,7 @@ func TestVersionHandling(t *testing.T) {
}
const dir = "./testdata/versions"
- list, err := ioutil.ReadDir(dir)
+ list, err := os.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
@@ -195,7 +193,7 @@ func TestVersionHandling(t *testing.T) {
// create file with corrupted export data
// 1) read file
- data, err := ioutil.ReadFile(filepath.Join(dir, name))
+ data, err := os.ReadFile(filepath.Join(dir, name))
if err != nil {
t.Fatal(err)
}
@@ -212,7 +210,7 @@ func TestVersionHandling(t *testing.T) {
// 4) write the file
pkgpath += "_corrupted"
filename := filepath.Join(corruptdir, pkgpath) + ".a"
- ioutil.WriteFile(filename, data, 0666)
+ os.WriteFile(filename, data, 0666)
// test that importing the corrupted file results in an error
_, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil)
@@ -261,8 +259,7 @@ var importedObjectTests = []struct {
{"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
{"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
{"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
- // go/types.Type has grown much larger - excluded for now
- // {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
+ {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
}
func TestImportedTypes(t *testing.T) {
@@ -457,17 +454,17 @@ func TestIssue13898(t *testing.T) {
t.Fatal("go/types not found")
}
- // look for go/types2.Object type
+ // look for go/types.Object type
obj := lookupObj(t, goTypesPkg.Scope(), "Object")
typ, ok := obj.Type().(*types2.Named)
if !ok {
- t.Fatalf("go/types2.Object type is %v; wanted named type", typ)
+ t.Fatalf("go/types.Object type is %v; wanted named type", typ)
}
- // lookup go/types2.Object.Pkg method
+ // lookup go/types.Object.Pkg method
m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg")
if m == nil {
- t.Fatalf("go/types2.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
+ t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
}
// the method must belong to go/types
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index 8ab0b7b989..a317dfc34a 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -9,7 +8,6 @@
package importer
import (
- "bytes"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"encoding/binary"
@@ -19,10 +17,11 @@ import (
"io"
"math/big"
"sort"
+ "strings"
)
type intReader struct {
- *bytes.Reader
+ *strings.Reader
path string
}
@@ -42,6 +41,21 @@ func (r *intReader) uint64() uint64 {
return i
}
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ // TODO: before release, change this back to 2.
+ iexportVersionGenerics = iexportVersionPosCol
+
+ iexportVersionCurrent = iexportVersionGenerics
+)
+
+type ident struct {
+ pkg string
+ name string
+}
+
const predeclReserved = 32
type itag uint64
@@ -57,6 +71,9 @@ const (
signatureType
structType
interfaceType
+ typeParamType
+ instType
+ unionType
)
const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
@@ -65,8 +82,8 @@ const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
-func iImportData(imports map[string]*types2.Package, data []byte, path string) (_ int, pkg *types2.Package, err error) {
- const currentVersion = 1
+func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) {
+ const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
@@ -78,13 +95,17 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
}
}()
- r := &intReader{bytes.NewReader(data), path}
+ r := &intReader{strings.NewReader(data), path}
version = int64(r.uint64())
switch version {
- case currentVersion, 0:
+ case /* iexportVersionGenerics, */ iexportVersionPosCol, iexportVersionGo1_11:
default:
- errorf("unknown iexport format version %d", version)
+ if version > iexportVersionGenerics {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
}
sLen := int64(r.uint64())
@@ -96,16 +117,20 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
r.Seek(sLen+dLen, io_SeekCurrent)
p := iimporter{
- ipath: path,
- version: int(version),
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
- stringData: stringData,
- stringCache: make(map[uint64]string),
- pkgCache: make(map[uint64]*types2.Package),
+ stringData: stringData,
+ pkgCache: make(map[uint64]*types2.Package),
+ posBaseCache: make(map[uint64]*syntax.PosBase),
declData: declData,
pkgIndex: make(map[*types2.Package]map[string]uint64),
typCache: make(map[uint64]types2.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name (name with subscript).
+ tparamIndex: make(map[ident]types2.Type),
}
for i, pt := range predeclared {
@@ -117,17 +142,22 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
- _ = r.uint64() // package height; unused by go/types
+ pkgHeight := int(r.uint64())
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
- pkg = types2.NewPackage(pkgPath, pkgName)
+ pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight)
imports[pkgPath] = pkg
- } else if pkg.Name() != pkgName {
- errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ } else {
+ if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+ if pkg.Height() != pkgHeight {
+ errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path)
+ }
}
p.pkgCache[pkgPathOff] = pkg
@@ -153,10 +183,6 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
p.doDecl(localpkg, name)
}
- for _, typ := range p.interfaceList {
- typ.Complete()
- }
-
// record all referenced packages as imports
list := append(([]*types2.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
@@ -165,21 +191,22 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
// package was imported completely and without errors
localpkg.MarkComplete()
- consumed, _ := r.Seek(0, io_SeekCurrent)
- return int(consumed), localpkg, nil
+ return localpkg, nil
}
type iimporter struct {
- ipath string
- version int
+ exportVersion int64
+ ipath string
+ version int
- stringData []byte
- stringCache map[uint64]string
- pkgCache map[uint64]*types2.Package
+ stringData string
+ pkgCache map[uint64]*types2.Package
+ posBaseCache map[uint64]*syntax.PosBase
- declData []byte
- pkgIndex map[*types2.Package]map[string]uint64
- typCache map[uint64]types2.Type
+ declData string
+ pkgIndex map[*types2.Package]map[string]uint64
+ typCache map[uint64]types2.Type
+ tparamIndex map[ident]types2.Type
interfaceList []*types2.Interface
}
@@ -199,24 +226,21 @@ func (p *iimporter) doDecl(pkg *types2.Package, name string) {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off:])
- r.declReader = *bytes.NewReader(p.declData[off:])
+ r.declReader = *strings.NewReader(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
- if s, ok := p.stringCache[off]; ok {
- return s
- }
+ var x [binary.MaxVarintLen64]byte
+ n := copy(x[:], p.stringData[off:])
- slen, n := binary.Uvarint(p.stringData[off:])
+ slen, n := binary.Uvarint(x[:n])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
- s := string(p.stringData[spos : spos+slen])
- p.stringCache[off] = s
- return s
+ return p.stringData[spos : spos+slen]
}
func (p *iimporter) pkgAt(off uint64) *types2.Package {
@@ -228,6 +252,16 @@ func (p *iimporter) pkgAt(off uint64) *types2.Package {
return nil
}
+func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
+ if posBase, ok := p.posBaseCache[off]; ok {
+ return posBase
+ }
+ filename := p.stringAt(off)
+ posBase := syntax.NewFileBase(filename)
+ p.posBaseCache[off] = posBase
+ return posBase
+}
+
func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
@@ -241,7 +275,7 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off-predeclReserved:])
- r.declReader = *bytes.NewReader(p.declData[off-predeclReserved:])
+ r.declReader = *strings.NewReader(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
@@ -251,12 +285,12 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
}
type importReader struct {
- p *iimporter
- declReader bytes.Reader
- currPkg *types2.Package
- prevFile string
- prevLine int64
- prevColumn int64
+ p *iimporter
+ declReader strings.Reader
+ currPkg *types2.Package
+ prevPosBase *syntax.PosBase
+ prevLine int64
+ prevColumn int64
}
func (r *importReader) obj(name string) {
@@ -274,16 +308,28 @@ func (r *importReader) obj(name string) {
r.declare(types2.NewConst(pos, r.currPkg, name, typ, val))
- case 'F':
+ case 'F', 'G':
+ var tparams []*types2.TypeName
+ if tag == 'G' {
+ tparams = r.tparamList()
+ }
sig := r.signature(nil)
-
+ sig.SetTParams(tparams)
r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
- case 'T':
+ case 'T', 'U':
+ var tparams []*types2.TypeName
+ if tag == 'U' {
+ tparams = r.tparamList()
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types2.NewTypeName(pos, r.currPkg, name, nil)
named := types2.NewNamed(obj, nil, nil)
+ if tag == 'U' {
+ named.SetTParams(tparams)
+ }
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
@@ -296,10 +342,43 @@ func (r *importReader) obj(name string) {
recv := r.param()
msig := r.signature(recv)
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ targs := baseType(msig.Recv().Type()).TArgs()
+ if len(targs) > 0 {
+ rparams := make([]*types2.TypeName, len(targs))
+ for i, targ := range targs {
+ rparams[i] = types2.AsTypeParam(targ).Obj()
+ }
+ msig.SetRParams(rparams)
+ }
+
named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
}
}
+ case 'P':
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0, sub := parseSubscript(name)
+ tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
+ t := (*types2.Checker)(nil).NewTypeParam(tn, nil)
+ if sub == 0 {
+ errorf("missing subscript")
+ }
+ t.SetId(sub)
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg.Name(), name}
+ r.p.tparamIndex[id] = t
+
+ t.SetConstraint(r.typ())
+
case 'V':
typ := r.typ()
@@ -439,12 +518,11 @@ func (r *importReader) pos() syntax.Pos {
r.posv0()
}
- if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
return syntax.Pos{}
}
- // TODO(gri) fix this
- // return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
- return syntax.Pos{}
+
+ return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn))
}
func (r *importReader) posv0() {
@@ -454,7 +532,7 @@ func (r *importReader) posv0() {
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
- r.prevFile = r.string()
+ r.prevPosBase = r.posBase()
r.prevLine = l
}
}
@@ -466,7 +544,7 @@ func (r *importReader) posv1() {
delta = r.int64()
r.prevLine += delta >> 1
if delta&1 != 0 {
- r.prevFile = r.string()
+ r.prevPosBase = r.posBase()
}
}
}
@@ -480,8 +558,9 @@ func isInterface(t types2.Type) bool {
return ok
}
-func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
-func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) }
func (r *importReader) doType(base *types2.Named) types2.Type {
switch k := r.kind(); k {
@@ -554,6 +633,47 @@ func (r *importReader) doType(base *types2.Named) types2.Type {
typ := types2.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg.Name(), name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ pos := r.pos()
+ len := r.uint64()
+ targs := make([]types2.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ var check *types2.Checker // TODO provide a non-nil *Checker
+ t := check.Instantiate(pos, baseType, targs, nil, false)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*types2.Term, r.uint64())
+ for i := range terms {
+ terms[i] = types2.NewTerm(r.bool(), r.typ())
+ }
+ return types2.NewUnion(terms)
}
}
@@ -568,6 +688,19 @@ func (r *importReader) signature(recv *types2.Var) *types2.Signature {
return types2.NewSignature(recv, params, results, variadic)
}
+func (r *importReader) tparamList() []*types2.TypeName {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types2.TypeName, n)
+ for i := range xs {
+ typ := r.typ()
+ xs[i] = types2.AsTypeParam(typ).Obj()
+ }
+ return xs
+}
+
func (r *importReader) paramList() *types2.Tuple {
xs := make([]*types2.Var, r.uint64())
for i := range xs {
@@ -610,3 +743,33 @@ func (r *importReader) byte() byte {
}
return x
}
+
+func baseType(typ types2.Type) *types2.Named {
+ // pointer receivers are never types2.Named types
+ if p, _ := typ.(*types2.Pointer); p != nil {
+ typ = p.Elem()
+ }
+ // receiver base types are always (possibly generic) types2.Named types
+ n, _ := typ.(*types2.Named)
+ return n
+}
+
+func parseSubscript(name string) (string, uint64) {
+ // Extract the subscript value from the type param name. We export
+ // and import the subscript value, so that all type params have
+ // unique names.
+ sub := uint64(0)
+ startsub := -1
+ for i, r := range name {
+ if '₀' <= r && r < '₀'+10 {
+ if startsub == -1 {
+ startsub = i
+ }
+ sub = sub*10 + uint64(r-'₀')
+ }
+ }
+ if startsub >= 0 {
+ name = name[:startsub]
+ }
+ return name, sub
+}
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
index 40b9c7c958..6ceb413601 100644
--- a/src/cmd/compile/internal/importer/support.go
+++ b/src/cmd/compile/internal/importer/support.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -120,6 +119,9 @@ var predeclared = []types2.Type{
// used internally by gc; never used by this package or in .a files
anyType{},
+
+ // comparable
+ types2.Universe.Lookup("comparable").Type(),
}
type anyType struct{}
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index d6b4ced4e1..45a533fcaf 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -179,6 +179,8 @@ func CanInline(fn *ir.Func) {
Cost: inlineMaxBudget - visitor.budget,
Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
Body: inlcopylist(fn.Body),
+
+ CanDelayResults: canDelayResults(fn),
}
if base.Flag.LowerM > 1 {
@@ -191,60 +193,36 @@ func CanInline(fn *ir.Func) {
}
}
-// Inline_Flood marks n's inline body for export and recursively ensures
-// all called functions are marked too.
-func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
- if n == nil {
- return
- }
- if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
- base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
- }
- fn := n.Func
- if fn == nil {
- base.Fatalf("Inline_Flood: missing Func on %v", n)
- }
- if fn.Inl == nil {
- return
- }
-
- if fn.ExportInline() {
- return
- }
- fn.SetExportInline(true)
-
- typecheck.ImportedBody(fn)
-
- var doFlood func(n ir.Node)
- doFlood = func(n ir.Node) {
- switch n.Op() {
- case ir.OMETHEXPR, ir.ODOTMETH:
- Inline_Flood(ir.MethodExprName(n), exportsym)
+// canDelayResults reports whether inlined calls to fn can delay
+// declaring the result parameter until the "return" statement.
+func canDelayResults(fn *ir.Func) bool {
+ // We can delay declaring+initializing result parameters if:
+ // (1) there's exactly one "return" statement in the inlined function;
+ // (2) it's not an empty return statement (#44355); and
+ // (3) the result parameters aren't named.
- case ir.ONAME:
- n := n.(*ir.Name)
- switch n.Class {
- case ir.PFUNC:
- Inline_Flood(n, exportsym)
- exportsym(n)
- case ir.PEXTERN:
- exportsym(n)
+ nreturns := 0
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ if n, ok := n.(*ir.ReturnStmt); ok {
+ nreturns++
+ if len(n.Results) == 0 {
+ nreturns++ // empty return statement (case 2)
}
+ }
+ })
- case ir.OCALLPART:
- // Okay, because we don't yet inline indirect
- // calls to method values.
- case ir.OCLOSURE:
- // VisitList doesn't visit closure bodies, so force a
- // recursive call to VisitList on the body of the closure.
- ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+ if nreturns != 1 {
+ return false // not exactly one return statement (case 1)
+ }
+
+ // temporaries for return values.
+ for _, param := range fn.Type().Results().FieldSlice() {
+ if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
+ return false // found a named result parameter (case 3)
}
}
- // Recursively identify all referenced functions for
- // reexport. We want to include even non-called functions,
- // because after inlining they might be callable.
- ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
+ return true
}
// hairyVisitor visits a function body to determine its inlining
@@ -295,6 +273,19 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
}
}
}
+ if n.X.Op() == ir.OMETHEXPR {
+ if meth := ir.MethodExprName(n.X); meth != nil {
+ fn := meth.Func
+ if fn != nil && types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
+ // Special case: explicitly allow
+ // mid-stack inlining of
+ // runtime.heapBits.next even though
+ // it calls slow-path
+ // runtime.heapBits.nextArena.
+ break
+ }
+ }
+ }
if ir.IsIntrinsicCall(n) {
// Treat like any other node.
@@ -309,28 +300,8 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
- // Call is okay if inlinable and we have the budget for the body.
case ir.OCALLMETH:
- n := n.(*ir.CallExpr)
- t := n.X.Type()
- if t == nil {
- base.Fatalf("no function type for [%p] %+v\n", n.X, n.X)
- }
- fn := ir.MethodExprName(n.X).Func
- if types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
- // Special case: explicitly allow
- // mid-stack inlining of
- // runtime.heapBits.next even though
- // it calls slow-path
- // runtime.heapBits.nextArena.
- break
- }
- if fn.Inl != nil {
- v.budget -= fn.Inl.Cost
- break
- }
- // Call cost for non-leaf inlining.
- v.budget -= v.extraCallCost
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
// Things that are too hairy, irrespective of the budget
case ir.OCALL, ir.OCALLINTER:
@@ -445,7 +416,7 @@ func (v *hairyVisitor) doNode(n ir.Node) bool {
// and don't charge for the OBLOCK itself. The ++ undoes the -- below.
v.budget++
- case ir.OCALLPART, ir.OSLICELIT:
+ case ir.OMETHVALUE, ir.OSLICELIT:
v.budget-- // Hack for toolstash -cmp.
case ir.OMETHEXPR:
@@ -499,9 +470,6 @@ func inlcopy(n ir.Node) ir.Node {
// x.Func.Body for iexport and local inlining.
oldfn := x.Func
newfn := ir.NewFunc(oldfn.Pos())
- if oldfn.ClosureCalled() {
- newfn.SetClosureCalled(true)
- }
m.(*ir.ClosureExpr).Func = newfn
newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
// XXX OK to share fn.Type() ??
@@ -544,37 +512,6 @@ func InlineCalls(fn *ir.Func) {
ir.CurFunc = savefn
}
-// Turn an OINLCALL into a statement.
-func inlconv2stmt(inlcall *ir.InlinedCallExpr) ir.Node {
- n := ir.NewBlockStmt(inlcall.Pos(), nil)
- n.List = inlcall.Init()
- n.List.Append(inlcall.Body.Take()...)
- return n
-}
-
-// Turn an OINLCALL into a single valued expression.
-// The result of inlconv2expr MUST be assigned back to n, e.g.
-// n.Left = inlconv2expr(n.Left)
-func inlconv2expr(n *ir.InlinedCallExpr) ir.Node {
- r := n.ReturnVars[0]
- return ir.InitExpr(append(n.Init(), n.Body...), r)
-}
-
-// Turn the rlist (with the return values) of the OINLCALL in
-// n into an expression list lumping the ninit and body
-// containing the inlined statements on the first list element so
-// order will be preserved. Used in return, oas2func and call
-// statements.
-func inlconv2list(n *ir.InlinedCallExpr) []ir.Node {
- if n.Op() != ir.OINLCALL || len(n.ReturnVars) == 0 {
- base.Fatalf("inlconv2list %+v\n", n)
- }
-
- s := n.ReturnVars
- s[0] = ir.InitExpr(append(n.Init(), n.Body...), s[0])
- return s
-}
-
// inlnode recurses over the tree to find inlineable calls, which will
// be turned into OINLCALLs by mkinlcall. When the recursion comes
// back up will examine left, right, list, rlist, ninit, ntest, nincr,
@@ -597,7 +534,9 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
case ir.ODEFER, ir.OGO:
n := n.(*ir.GoDeferStmt)
switch call := n.Call; call.Op() {
- case ir.OCALLFUNC, ir.OCALLMETH:
+ case ir.OCALLMETH:
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC:
call := call.(*ir.CallExpr)
call.NoInline = true
}
@@ -607,11 +546,18 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
case ir.OCLOSURE:
return n
case ir.OCALLMETH:
- // Prevent inlining some reflect.Value methods when using checkptr,
- // even when package reflect was compiled without it (#35073).
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
- if s := ir.MethodExprName(n.X).Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
- return n
+ if n.X.Op() == ir.OMETHEXPR {
+ // Prevent inlining some reflect.Value methods when using checkptr,
+ // even when package reflect was compiled without it (#35073).
+ if meth := ir.MethodExprName(n.X); meth != nil {
+ s := meth.Sym()
+ if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ return n
+ }
+ }
}
}
@@ -619,31 +565,18 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
ir.EditChildren(n, edit)
- if as := n; as.Op() == ir.OAS2FUNC {
- as := as.(*ir.AssignListStmt)
- if as.Rhs[0].Op() == ir.OINLCALL {
- as.Rhs = inlconv2list(as.Rhs[0].(*ir.InlinedCallExpr))
- as.SetOp(ir.OAS2)
- as.SetTypecheck(0)
- n = typecheck.Stmt(as)
- }
- }
-
// with all the branches out of the way, it is now time to
// transmogrify this node itself unless inhibited by the
// switch at the top of this function.
switch n.Op() {
- case ir.OCALLFUNC, ir.OCALLMETH:
- n := n.(*ir.CallExpr)
- if n.NoInline {
- return n
- }
- }
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
- var call *ir.CallExpr
- switch n.Op() {
case ir.OCALLFUNC:
- call = n.(*ir.CallExpr)
+ call := n.(*ir.CallExpr)
+ if call.NoInline {
+ break
+ }
if base.Flag.LowerM > 3 {
fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
}
@@ -653,38 +586,10 @@ func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.No
if fn := inlCallee(call.X); fn != nil && fn.Inl != nil {
n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
-
- case ir.OCALLMETH:
- call = n.(*ir.CallExpr)
- if base.Flag.LowerM > 3 {
- fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.X.(*ir.SelectorExpr).Sel)
- }
-
- // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
- if call.X.Type() == nil {
- base.Fatalf("no function type for [%p] %+v\n", call.X, call.X)
- }
-
- n = mkinlcall(call, ir.MethodExprName(call.X).Func, maxCost, inlMap, edit)
}
base.Pos = lno
- if n.Op() == ir.OINLCALL {
- ic := n.(*ir.InlinedCallExpr)
- switch call.Use {
- default:
- ir.Dump("call", call)
- base.Fatalf("call missing use")
- case ir.CallUseExpr:
- n = inlconv2expr(ic)
- case ir.CallUseStmt:
- n = inlconv2stmt(ic)
- case ir.CallUseList:
- // leave for caller to convert
- }
- }
-
return n
}
@@ -740,7 +645,12 @@ var inlgen int
// when producing output for debugging the compiler itself.
var SSADumpInline = func(*ir.Func) {}
-// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
+// NewInline allows the inliner implementation to be overridden.
+// If it returns nil, the legacy inliner will handle this call
+// instead.
+var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil }
+
+// If n is a OCALLFUNC node, and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
// The returned node's Ninit has the parameter assignments, the Nbody is the
// inlined function body, and (List, Rlist) contain the (input, output)
@@ -793,38 +703,90 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
defer func() {
inlMap[fn] = false
}()
- if base.Debug.TypecheckInl == 0 {
- typecheck.ImportedBody(fn)
+
+ typecheck.FixVariadicCall(n)
+
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+
+ sym := fn.Linksym()
+ inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
+
+ if base.Flag.GenDwarfInl > 0 {
+ if !sym.WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+ sym.Set(obj.AttrWasInlined, true)
+ }
}
- // We have a function node, and it has an inlineable body.
- if base.Flag.LowerM > 1 {
- fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body))
- } else if base.Flag.LowerM != 0 {
+ if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
if base.Flag.LowerM > 2 {
fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
- SSADumpInline(fn)
+ res := NewInline(n, fn, inlIndex)
+ if res == nil {
+ res = oldInline(n, fn, inlIndex)
+ }
+
+ // transitive inlining
+ // might be nice to do this before exporting the body,
+ // but can't emit the body with inlining expanded.
+ // instead we emit the things that the body needs
+ // and each use must redo the inlining.
+ // luckily these are small.
+ ir.EditChildren(res, edit)
- ninit := n.Init()
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
+ }
- // For normal function calls, the function callee expression
- // may contain side effects (e.g., added by addinit during
- // inlconv2expr or inlconv2list). Make sure to preserve these,
- // if necessary (#42703).
- if n.Op() == ir.OCALLFUNC {
- callee := n.X
- for callee.Op() == ir.OCONVNOP {
+ return res
+}
+
+// CalleeEffects appends any side effects from evaluating callee to init.
+func CalleeEffects(init *ir.Nodes, callee ir.Node) {
+ for {
+ switch callee.Op() {
+ case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
+ return // done
+
+ case ir.OCONVNOP:
conv := callee.(*ir.ConvExpr)
- ninit.Append(ir.TakeInit(conv)...)
+ init.Append(ir.TakeInit(conv)...)
callee = conv.X
+
+ case ir.OINLCALL:
+ ic := callee.(*ir.InlinedCallExpr)
+ init.Append(ir.TakeInit(ic)...)
+ init.Append(ic.Body.Take()...)
+ callee = ic.SingleResult()
+
+ default:
+ base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
}
- if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
- base.Fatalf("unexpected callee expression: %v", callee)
- }
+ }
+}
+
+// oldInline creates an InlinedCallExpr to replace the given call
+// expression. fn is the callee function to be inlined. inlIndex is
+// the inlining tree position index, for use with src.NewInliningBase
+// when rewriting positions.
+func oldInline(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ if base.Debug.TypecheckInl == 0 {
+ typecheck.ImportedBody(fn)
+ }
+
+ SSADumpInline(fn)
+
+ ninit := call.Init()
+
+ // For normal function calls, the function callee expression
+ // may contain side effects. Make sure to preserve these,
+ // if necessary (#42703).
+ if call.Op() == ir.OCALLFUNC {
+ CalleeEffects(&ninit, call.X)
}
// Make temp names to use instead of the originals.
@@ -854,25 +816,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
}
// We can delay declaring+initializing result parameters if:
- // (1) there's exactly one "return" statement in the inlined function;
- // (2) it's not an empty return statement (#44355); and
- // (3) the result parameters aren't named.
- delayretvars := true
-
- nreturns := 0
- ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
- if n, ok := n.(*ir.ReturnStmt); ok {
- nreturns++
- if len(n.Results) == 0 {
- delayretvars = false // empty return statement (case 2)
- }
- }
- })
-
- if nreturns != 1 {
- delayretvars = false // not exactly one return statement (case 1)
- }
-
// temporaries for return values.
var retvars []ir.Node
for i, t := range fn.Type().Results().Fields().Slice() {
@@ -882,7 +825,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
m = inlvar(n)
m = typecheck.Expr(m).(*ir.Name)
inlvars[n] = m
- delayretvars = false // found a named result parameter (case 3)
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
@@ -905,61 +847,23 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
// Assign arguments to the parameters' temp names.
as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
as.Def = true
- if n.Op() == ir.OCALLMETH {
- sel := n.X.(*ir.SelectorExpr)
- if sel.X == nil {
- base.Fatalf("method call without receiver: %+v", n)
- }
- as.Rhs.Append(sel.X)
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
}
- as.Rhs.Append(n.Args...)
-
- // For non-dotted calls to variadic functions, we assign the
- // variadic parameter's temp name separately.
- var vas *ir.AssignStmt
+ as.Rhs.Append(call.Args...)
if recv := fn.Type().Recv(); recv != nil {
as.Lhs.Append(inlParam(recv, as, inlvars))
}
for _, param := range fn.Type().Params().Fields().Slice() {
- // For ordinary parameters or variadic parameters in
- // dotted calls, just add the variable to the
- // assignment list, and we're done.
- if !param.IsDDD() || n.IsDDD {
- as.Lhs.Append(inlParam(param, as, inlvars))
- continue
- }
-
- // Otherwise, we need to collect the remaining values
- // to pass as a slice.
-
- x := len(as.Lhs)
- for len(as.Lhs) < len(as.Rhs) {
- as.Lhs.Append(argvar(param.Type, len(as.Lhs)))
- }
- varargs := as.Lhs[x:]
-
- vas = ir.NewAssignStmt(base.Pos, nil, nil)
- vas.X = inlParam(param, vas, inlvars)
- if len(varargs) == 0 {
- vas.Y = typecheck.NodNil()
- vas.Y.SetType(param.Type)
- } else {
- lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil)
- lit.List = varargs
- vas.Y = lit
- }
+ as.Lhs.Append(inlParam(param, as, inlvars))
}
if len(as.Rhs) != 0 {
ninit.Append(typecheck.Stmt(as))
}
- if vas != nil {
- ninit.Append(typecheck.Stmt(vas))
- }
-
- if !delayretvars {
+ if !fn.Inl.CanDelayResults {
// Zero the return parameters.
for _, n := range retvars {
ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
@@ -972,40 +876,21 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
inlgen++
- parent := -1
- if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
- parent = b.InliningIndex()
- }
-
- sym := fn.Linksym()
- newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
-
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
- inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH)
- inlMark.SetPos(n.Pos().WithIsStmt())
- inlMark.Index = int64(newIndex)
- ninit.Append(inlMark)
-
- if base.Flag.GenDwarfInl > 0 {
- if !sym.WasInlined() {
- base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
- sym.Set(obj.AttrWasInlined, true)
- }
- }
+ ninit.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(inlIndex)))
subst := inlsubst{
- retlabel: retlabel,
- retvars: retvars,
- delayretvars: delayretvars,
- inlvars: inlvars,
- defnMarker: ir.NilExpr{},
- bases: make(map[*src.PosBase]*src.PosBase),
- newInlIndex: newIndex,
- fn: fn,
+ retlabel: retlabel,
+ retvars: retvars,
+ inlvars: inlvars,
+ defnMarker: ir.NilExpr{},
+ bases: make(map[*src.PosBase]*src.PosBase),
+ newInlIndex: inlIndex,
+ fn: fn,
}
subst.edit = subst.node
@@ -1026,26 +911,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
//dumplist("ninit post", ninit);
- call := ir.NewInlinedCallExpr(base.Pos, nil, nil)
- *call.PtrInit() = ninit
- call.Body = body
- call.ReturnVars = retvars
- call.SetType(n.Type())
- call.SetTypecheck(1)
-
- // transitive inlining
- // might be nice to do this before exporting the body,
- // but can't emit the body with inlining expanded.
- // instead we emit the things that the body needs
- // and each use must redo the inlining.
- // luckily these are small.
- ir.EditChildren(call, edit)
-
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
- }
-
- return call
+ res := ir.NewInlinedCallExpr(base.Pos, body, retvars)
+ res.SetInit(ninit)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+ return res
}
// Every time we expand a function we generate a new set of tmpnames,
@@ -1058,8 +928,10 @@ func inlvar(var_ *ir.Name) *ir.Name {
n := typecheck.NewName(var_.Sym())
n.SetType(var_.Type())
+ n.SetTypecheck(1)
n.Class = ir.PAUTO
n.SetUsed(true)
+ n.SetAutoTemp(var_.AutoTemp())
n.Curfn = ir.CurFunc // the calling function, not the called one
n.SetAddrtaken(var_.Addrtaken())
@@ -1071,18 +943,7 @@ func inlvar(var_ *ir.Name) *ir.Name {
func retvar(t *types.Field, i int) *ir.Name {
n := typecheck.NewName(typecheck.LookupNum("~R", i))
n.SetType(t.Type)
- n.Class = ir.PAUTO
- n.SetUsed(true)
- n.Curfn = ir.CurFunc // the calling function, not the called one
- ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
- return n
-}
-
-// Synthesize a variable to store the inlined function's arguments
-// when they come from a multiple return call.
-func argvar(t *types.Type, i int) ir.Node {
- n := typecheck.NewName(typecheck.LookupNum("~arg", i))
- n.SetType(t.Elem())
+ n.SetTypecheck(1)
n.Class = ir.PAUTO
n.SetUsed(true)
n.Curfn = ir.CurFunc // the calling function, not the called one
@@ -1099,10 +960,6 @@ type inlsubst struct {
// Temporary result variables.
retvars []ir.Node
- // Whether result variables should be initialized at the
- // "return" statement.
- delayretvars bool
-
inlvars map[*ir.Name]*ir.Name
// defnMarker is used to mark a Node for reassignment.
// inlsubst.clovar set this during creating new ONAME.
@@ -1157,17 +1014,21 @@ func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
// clovar creates a new ONAME node for a local variable or param of a closure
// inside a function being inlined.
func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
- // TODO(danscales): want to get rid of this shallow copy, with code like the
- // following, but it is hard to copy all the necessary flags in a maintainable way.
- // m := ir.NewNameAt(n.Pos(), n.Sym())
- // m.Class = n.Class
- // m.SetType(n.Type())
- // m.SetTypecheck(1)
- //if n.IsClosureVar() {
- // m.SetIsClosureVar(true)
- //}
- m := &ir.Name{}
- *m = *n
+ m := ir.NewNameAt(n.Pos(), n.Sym())
+ m.Class = n.Class
+ m.SetType(n.Type())
+ m.SetTypecheck(1)
+ if n.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ if n.Addrtaken() {
+ m.SetAddrtaken(true)
+ }
+ if n.Used() {
+ m.SetUsed(true)
+ }
+ m.Defn = n.Defn
+
m.Curfn = subst.newclofn
switch defn := n.Defn.(type) {
@@ -1222,8 +1083,6 @@ func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// closure does the necessary substitions for a ClosureExpr n and returns the new
// closure node.
func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
- m := ir.Copy(n)
-
// Prior to the subst edit, set a flag in the inlsubst to
// indicated that we don't want to update the source positions in
// the new closure. If we do this, it will appear that the closure
@@ -1231,29 +1090,16 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// issue #46234 for more details.
defer func(prev bool) { subst.noPosUpdate = prev }(subst.noPosUpdate)
subst.noPosUpdate = true
- ir.EditChildren(m, subst.edit)
//fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
- // The following is similar to funcLit
oldfn := n.Func
- newfn := ir.NewFunc(oldfn.Pos())
- // These three lines are not strictly necessary, but just to be clear
- // that new function needs to redo typechecking and inlinability.
- newfn.SetTypecheck(0)
- newfn.SetInlinabilityChecked(false)
- newfn.Inl = nil
- newfn.SetIsHiddenClosure(true)
- newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
- newfn.Nname.Func = newfn
+ newfn := ir.NewClosureFunc(oldfn.Pos(), true)
+
// Ntype can be nil for -G=3 mode.
if oldfn.Nname.Ntype != nil {
newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
}
- newfn.Nname.Defn = newfn
-
- m.(*ir.ClosureExpr).Func = newfn
- newfn.OClosure = m.(*ir.ClosureExpr)
if subst.newclofn != nil {
//fmt.Printf("Inlining a closure with a nested closure\n")
@@ -1303,13 +1149,9 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// Actually create the named function for the closure, now that
// the closure is inlined in a specific function.
- m.SetTypecheck(0)
- if oldfn.ClosureCalled() {
- typecheck.Callee(m)
- } else {
- typecheck.Expr(m)
- }
- return m
+ newclo := newfn.OClosure
+ newclo.SetInit(subst.list(n.Init()))
+ return typecheck.Expr(newclo)
}
// node recursively copies a node from the saved pristine body of the
@@ -1391,7 +1233,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
as.Rhs = subst.list(n.Results)
- if subst.delayretvars {
+ if subst.fn.Inl.CanDelayResults {
for _, n := range as.Lhs {
as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
n.Name().Defn = as
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index f70645f079..dc28483907 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -142,28 +142,15 @@ func (n *BinaryExpr) SetOp(op Op) {
}
}
-// A CallUse records how the result of the call is used:
-type CallUse byte
-
-const (
- _ CallUse = iota
-
- CallUseExpr // single expression result is used
- CallUseList // list of results are used
- CallUseStmt // results not used - call is a statement
-)
-
// A CallExpr is a function call X(Args).
type CallExpr struct {
miniExpr
origNode
- X Node
- Args Nodes
- KeepAlive []*Name // vars to be kept alive until call returns
- IsDDD bool
- Use CallUse
- NoInline bool
- PreserveClosure bool // disable directClosureCall for this call
+ X Node
+ Args Nodes
+ KeepAlive []*Name // vars to be kept alive until call returns
+ IsDDD bool
+ NoInline bool
}
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
@@ -181,8 +168,12 @@ func (n *CallExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
- case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
- OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
+ case OAPPEND,
+ OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
+ ODELETE,
+ OGETG, OGETCALLERPC, OGETCALLERSP,
+ OMAKE, OPRINT, OPRINTN,
+ ORECOVER, ORECOVERFP:
n.op = op
}
}
@@ -192,8 +183,10 @@ type ClosureExpr struct {
miniExpr
Func *Func `mknode:"-"`
Prealloc *Name
+ IsGoWrap bool // whether this is wrapper closure of a go statement
}
+// Deprecated: Use NewClosureFunc instead.
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
n := &ClosureExpr{Func: fn}
n.op = OCLOSURE
@@ -277,7 +270,7 @@ func (n *ConvExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
- case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR:
+ case OCONV, OCONVIFACE, OCONVIDATA, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR:
n.op = op
}
}
@@ -323,26 +316,24 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
// A StructKeyExpr is an Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
- Field *types.Sym
- Value Node
- Offset int64
+ Field *types.Field
+ Value Node
}
-func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
+func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr {
n := &StructKeyExpr{Field: field, Value: value}
n.pos = pos
n.op = OSTRUCTKEY
- n.Offset = types.BADWIDTH
return n
}
-func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym }
// An InlinedCallExpr is an inlined function call.
type InlinedCallExpr struct {
miniExpr
Body Nodes
- ReturnVars Nodes
+ ReturnVars Nodes // must be side-effect free
}
func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
@@ -354,6 +345,21 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
return n
}
+func (n *InlinedCallExpr) SingleResult() Node {
+ if have := len(n.ReturnVars); have != 1 {
+ base.FatalfAt(n.Pos(), "inlined call has %v results, expected 1", have)
+ }
+ if !n.Type().HasShape() && n.ReturnVars[0].Type().HasShape() {
+ // If the type of the call is not a shape, but the type of the return value
+ // is a shape, we need to do an implicit conversion, so the real type
+ // of n is maintained.
+ r := NewConvExpr(n.Pos(), OCONVNOP, n.Type(), n.ReturnVars[0])
+ r.SetTypecheck(1)
+ return r
+ }
+ return n.ReturnVars[0]
+}
+
// A LogicalExpr is a expression X Op Y where Op is && or ||.
// It is separate from BinaryExpr to make room for statements
// that must be executed before Y but after X.
@@ -448,6 +454,20 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) {
t.SetNod(n)
}
+// A RawOrigExpr represents an arbitrary Go expression as a string value.
+// When printed in diagnostics, the string value is written out exactly as-is.
+type RawOrigExpr struct {
+ miniExpr
+ Raw string
+}
+
+func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
+ n := &RawOrigExpr{Raw: raw}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
// A ResultExpr represents a direct access to a result.
type ResultExpr struct {
miniExpr
@@ -494,10 +514,15 @@ func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type)
// A SelectorExpr is a selector expression X.Sel.
type SelectorExpr struct {
miniExpr
- X Node
- Sel *types.Sym
+ X Node
+ // Sel is the name of the field or method being selected, without (in the
+ // case of methods) any preceding type specifier. If the field/method is
+ // exported, than the Sym uses the local package regardless of the package
+ // of the containing type.
+ Sel *types.Sym
+ // The actual selected field - may not be filled in until typechecking.
Selection *types.Field
- Prealloc *Name // preallocated storage for OCALLPART, if any
+ Prealloc *Name // preallocated storage for OMETHVALUE, if any
}
func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
@@ -511,7 +536,7 @@ func (n *SelectorExpr) SetOp(op Op) {
switch op {
default:
panic(n.no("SetOp " + op.String()))
- case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OCALLPART, OMETHEXPR:
+ case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OMETHVALUE, OMETHEXPR:
n.op = op
}
}
@@ -652,6 +677,38 @@ func (n *TypeAssertExpr) SetOp(op Op) {
}
}
+// A DynamicTypeAssertExpr asserts that X is of dynamic type T.
+type DynamicTypeAssertExpr struct {
+ miniExpr
+ X Node
+ // N = not an interface
+ // E = empty interface
+ // I = nonempty interface
+ // For E->N, T is a *runtime.type for N
+ // For I->N, T is a *runtime.itab for N+I
+ // For E->I, T is a *runtime.type for I
+ // For I->I, ditto
+ // For I->E, T is a *runtime.type for interface{} (unnecessary, but just to fill in the slot)
+ // For E->E, ditto
+ T Node
+}
+
+func NewDynamicTypeAssertExpr(pos src.XPos, op Op, x, t Node) *DynamicTypeAssertExpr {
+ n := &DynamicTypeAssertExpr{X: x, T: t}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *DynamicTypeAssertExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case ODYNAMICDOTTYPE, ODYNAMICDOTTYPE2:
+ n.op = op
+ }
+}
+
// A UnaryExpr is a unary expression Op X,
// or Op(X) for a builtin function that does not end up being a call.
type UnaryExpr struct {
@@ -678,6 +735,11 @@ func (n *UnaryExpr) SetOp(op Op) {
}
}
+// Probably temporary: using Implicit() flag to mark generic function nodes that
+// are called to make getGfInfo analysis easier in one pre-order pass.
+func (n *InstExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *InstExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
// An InstExpr is a generic function or type instantiation.
type InstExpr struct {
miniExpr
@@ -773,6 +835,11 @@ func StaticValue(n Node) Node {
continue
}
+ if n.Op() == OINLCALL {
+ n = n.(*InlinedCallExpr).SingleResult()
+ continue
+ }
+
n1 := staticValue1(n)
if n1 == nil {
return n
@@ -1071,7 +1138,7 @@ func MethodExprName(n Node) *Name {
// MethodExprFunc is like MethodExprName, but returns the types.Field instead.
func MethodExprFunc(n Node) *types.Field {
switch n.Op() {
- case ODOTMETH, OMETHEXPR, OCALLPART:
+ case ODOTMETH, OMETHEXPR, OMETHVALUE:
return n.(*SelectorExpr).Selection
}
base.Fatalf("unexpected node: %v (%v)", n, n.Op())
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index f2ae0f7606..d19fe453ef 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -185,6 +185,7 @@ var OpPrec = []int{
OCLOSE: 8,
OCOMPLIT: 8,
OCONVIFACE: 8,
+ OCONVIDATA: 8,
OCONVNOP: 8,
OCONV: 8,
OCOPY: 8,
@@ -237,7 +238,7 @@ var OpPrec = []int{
ODOTTYPE: 8,
ODOT: 8,
OXDOT: 8,
- OCALLPART: 8,
+ OMETHVALUE: 8,
OMETHEXPR: 8,
OPLUS: 7,
ONOT: 7,
@@ -546,7 +547,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
n = nn.X
continue
}
- case OCONV, OCONVNOP, OCONVIFACE:
+ case OCONV, OCONVNOP, OCONVIFACE, OCONVIDATA:
nn := nn.(*ConvExpr)
if nn.Implicit() {
n = nn.X
@@ -567,6 +568,11 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
+ if n, ok := n.(*RawOrigExpr); ok {
+ fmt.Fprint(s, n.Raw)
+ return
+ }
+
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
@@ -709,6 +715,10 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "... argument")
return
}
+ if typ := n.Type(); typ != nil {
+ fmt.Fprintf(s, "%v{%s}", typ, ellipsisIf(len(n.List) != 0))
+ return
+ }
if n.Ntype != nil {
fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0))
return
@@ -752,7 +762,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
n := n.(*StructKeyExpr)
fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
- case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OCALLPART, OMETHEXPR:
+ case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OMETHVALUE, OMETHEXPR:
n := n.(*SelectorExpr)
exprFmt(n.X, s, nprec)
if n.Sel == nil {
@@ -804,6 +814,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OCONV,
OCONVIFACE,
+ OCONVIDATA,
OCONVNOP,
OBYTES2STR,
ORUNES2STR,
@@ -854,6 +865,15 @@ func exprFmt(n Node, s fmt.State, prec int) {
}
fmt.Fprintf(s, "(%.v)", n.Args)
+ case OINLCALL:
+ n := n.(*InlinedCallExpr)
+ // TODO(mdempsky): Print Init and/or Body?
+ if len(n.ReturnVars) == 1 {
+ fmt.Fprintf(s, "%v", n.ReturnVars[0])
+ return
+ }
+ fmt.Fprintf(s, "(.%v)", n.ReturnVars)
+
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
n := n.(*MakeExpr)
if n.Cap != nil {
@@ -986,7 +1006,7 @@ func (l Nodes) Format(s fmt.State, verb rune) {
// Dump prints the message s followed by a debug dump of n.
func Dump(s string, n Node) {
- fmt.Printf("%s [%p]%+v\n", s, n, n)
+ fmt.Printf("%s%+v\n", s, n)
}
// DumpList prints the message s followed by a debug dump of each node in the list.
@@ -1114,16 +1134,21 @@ func dumpNodeHeader(w io.Writer, n Node) {
}
if n.Pos().IsKnown() {
- pfx := ""
+ fmt.Fprint(w, " # ")
switch n.Pos().IsStmt() {
case src.PosNotStmt:
- pfx = "_" // "-" would be confusing
+ fmt.Fprint(w, "_") // "-" would be confusing
case src.PosIsStmt:
- pfx = "+"
+ fmt.Fprint(w, "+")
+ }
+ for i, pos := range base.Ctxt.AllPos(n.Pos(), nil) {
+ if i > 0 {
+ fmt.Fprint(w, ",")
+ }
+ // TODO(mdempsky): Print line pragma details too.
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
}
- pos := base.Ctxt.PosTable.Pos(n.Pos())
- file := filepath.Base(pos.Filename())
- fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line())
}
}
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index 20fe965711..269b6f14ec 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -9,6 +9,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
+ "fmt"
)
// A Func corresponds to a single function in a Go program
@@ -39,14 +40,14 @@ import (
// constructs a fresh node.
//
// A method value (t.M) is represented by ODOTMETH/ODOTINTER
-// when it is called directly and by OCALLPART otherwise.
+// when it is called directly and by OMETHVALUE otherwise.
// These are like method expressions, except that for ODOTMETH/ODOTINTER,
// the method name is stored in Sym instead of Right.
-// Each OCALLPART ends up being implemented as a new
+// Each OMETHVALUE ends up being implemented as a new
// function, a bit like a closure, with its own ODCLFUNC.
-// The OCALLPART uses n.Func to record the linkage to
+// The OMETHVALUE uses n.Func to record the linkage to
// the generated ODCLFUNC, but there is no
-// pointer from the Func back to the OCALLPART.
+// pointer from the Func back to the OMETHVALUE.
type Func struct {
miniNode
Body Nodes
@@ -166,6 +167,11 @@ type Inline struct {
// another package is imported.
Dcl []*Name
Body []Node
+
+ // CanDelayResults reports whether it's safe for the inliner to delay
+ // initializing the result parameters until immediately before the
+ // "return" statement.
+ CanDelayResults bool
}
// A Mark represents a scope boundary.
@@ -196,7 +202,7 @@ const (
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcOpenCodedDeferDisallowed // can't do open-coded defers
- funcClosureCalled // closure is only immediately called
+ funcClosureCalled // closure is only immediately called; used by escape analysis
)
type SymAndPos struct {
@@ -272,6 +278,17 @@ func PkgFuncName(f *Func) string {
var CurFunc *Func
+// WithFunc invokes do with CurFunc and base.Pos set to curfn and
+// curfn.Pos(), respectively, and then restores their previous values
+// before returning.
+func WithFunc(curfn *Func, do func()) {
+ oldfn, oldpos := CurFunc, base.Pos
+ defer func() { CurFunc, base.Pos = oldfn, oldpos }()
+
+ CurFunc, base.Pos = curfn, curfn.Pos()
+ do()
+}
+
func FuncSymName(s *types.Sym) string {
return s.Name + "·f"
}
@@ -279,7 +296,7 @@ func FuncSymName(s *types.Sym) string {
// MarkFunc marks a node as a function.
func MarkFunc(n *Name) {
if n.Op() != ONAME || n.Class != Pxxx {
- base.Fatalf("expected ONAME/Pxxx node, got %v", n)
+ base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
}
n.Class = PFUNC
@@ -296,8 +313,8 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
}
}
- if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
- base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func))
}
}
@@ -306,3 +323,109 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
func IsTrivialClosure(clo *ClosureExpr) bool {
return len(clo.Func.ClosureVars) == 0
}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int32
+
+// closureName generates a new unique name for a closure within outerfn.
+func closureName(outerfn *Func) *types.Sym {
+ pkg := types.LocalPkg
+ outer := "glob."
+ prefix := "func"
+ gen := &globClosgen
+
+ if outerfn != nil {
+ if outerfn.OClosure != nil {
+ prefix = ""
+ }
+
+ pkg = outerfn.Sym().Pkg
+ outer = FuncName(outerfn)
+
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if !IsBlank(outerfn.Nname) {
+ gen = &outerfn.Closgen
+ }
+ }
+
+ *gen++
+ return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// NewClosureFunc creates a new Func to represent a function literal.
+// If hidden is true, then the closure is marked hidden (i.e., as a
+// function literal contained within another function, rather than a
+// package-scope variable initialization expression).
+func NewClosureFunc(pos src.XPos, hidden bool) *Func {
+ fn := NewFunc(pos)
+ fn.SetIsHiddenClosure(hidden)
+
+ fn.Nname = NewNameAt(pos, BlankNode.Sym())
+ fn.Nname.Func = fn
+ fn.Nname.Defn = fn
+
+ fn.OClosure = NewClosureExpr(pos, fn)
+
+ return fn
+}
+
+// NameClosure generates a unique for the given function literal,
+// which must have appeared within outerfn.
+func NameClosure(clo *ClosureExpr, outerfn *Func) {
+ fn := clo.Func
+ if fn.IsHiddenClosure() != (outerfn != nil) {
+ base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
+ }
+
+ name := fn.Nname
+ if !IsBlank(name) {
+ base.FatalfAt(clo.Pos(), "closure already named: %v", name)
+ }
+
+ name.SetSym(closureName(outerfn))
+ MarkFunc(name)
+}
+
+// UseClosure checks that the ginen function literal has been setup
+// correctly, and then returns it as an expression.
+// It must be called after clo.Func.ClosureVars has been set.
+func UseClosure(clo *ClosureExpr, pkg *Package) Node {
+ fn := clo.Func
+ name := fn.Nname
+
+ if IsBlank(name) {
+ base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
+ }
+ // Caution: clo.Typecheck() is still 0 when UseClosure is called by
+ // tcClosure.
+ if fn.Typecheck() != 1 || name.Typecheck() != 1 {
+ base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
+ }
+ if clo.Type() == nil || name.Type() == nil {
+ base.FatalfAt(fn.Pos(), "missing types: %v", fn)
+ }
+ if !types.Identical(clo.Type(), name.Type()) {
+ base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
+ }
+
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("new closure func: %v", fn)
+ Dump(s, fn)
+ }
+
+ if pkg != nil {
+ pkg.Decls = append(pkg.Decls, fn)
+ }
+
+ if false && IsTrivialClosure(clo) {
+ // TODO(mdempsky): Investigate if we can/should optimize this
+ // case. walkClosure already handles it later, but it could be
+ // useful to recognize earlier (e.g., it might allow multiple
+ // inlined calls to a function to share a common trivial closure
+ // func, rather than cloning it for each inlined call).
+ }
+
+ return clo
+}
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
index b6c68bc5e0..a2eec05013 100644
--- a/src/cmd/compile/internal/ir/name.go
+++ b/src/cmd/compile/internal/ir/name.go
@@ -358,39 +358,74 @@ func (n *Name) Byval() bool {
return n.Canonical().flags&nameByval != 0
}
+// NewClosureVar returns a new closure variable for fn to refer to
+// outer variable n.
+func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name {
+ c := NewNameAt(pos, n.Sym())
+ c.Curfn = fn
+ c.Class = PAUTOHEAP
+ c.SetIsClosureVar(true)
+ c.Defn = n.Canonical()
+ c.Outer = n
+
+ c.SetType(n.Type())
+ c.SetTypecheck(n.Typecheck())
+
+ fn.ClosureVars = append(fn.ClosureVars, c)
+
+ return c
+}
+
+// NewHiddenParam returns a new hidden parameter for fn with the given
+// name and type.
+func NewHiddenParam(pos src.XPos, fn *Func, sym *types.Sym, typ *types.Type) *Name {
+ if fn.OClosure != nil {
+ base.FatalfAt(fn.Pos(), "cannot add hidden parameters to closures")
+ }
+
+ fn.SetNeedctxt(true)
+
+ // Create a fake parameter, disassociated from any real function, to
+ // pretend to capture.
+ fake := NewNameAt(pos, sym)
+ fake.Class = PPARAM
+ fake.SetType(typ)
+ fake.SetByval(true)
+
+ return NewClosureVar(pos, fn, fake)
+}
+
// CaptureName returns a Name suitable for referring to n from within function
// fn or from the package block if fn is nil. If n is a free variable declared
-// within a function that encloses fn, then CaptureName returns a closure
-// variable that refers to n and adds it to fn.ClosureVars. Otherwise, it simply
-// returns n.
+// within a function that encloses fn, then CaptureName returns the closure
+// variable that refers to n within fn, creating it if necessary.
+// Otherwise, it simply returns n.
func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
- if n.IsClosureVar() {
- base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
- }
- if n.Op() != ONAME || n.Curfn == nil || n.Curfn == fn {
+ if n.Op() != ONAME || n.Curfn == nil {
return n // okay to use directly
}
- if fn == nil {
- base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
+ if n.IsClosureVar() {
+ base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
}
c := n.Innermost
- if c != nil && c.Curfn == fn {
+ if c == nil {
+ c = n
+ }
+ if c.Curfn == fn {
return c
}
+ if fn == nil {
+ base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
+ }
+
// Do not have a closure var for the active closure yet; make one.
- c = NewNameAt(pos, n.Sym())
- c.Curfn = fn
- c.Class = PAUTOHEAP
- c.SetIsClosureVar(true)
- c.Defn = n
+ c = NewClosureVar(pos, fn, c)
// Link into list of active closure variables.
// Popped from list in FinishCaptureNames.
- c.Outer = n.Innermost
n.Innermost = c
- fn.ClosureVars = append(fn.ClosureVars, c)
return c
}
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index af559cc082..f071cb78ce 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -159,7 +159,6 @@ const (
OCALLFUNC // X(Args) (function call f(args))
OCALLMETH // X(Args) (direct method call x.Method(args))
OCALLINTER // X(Args) (interface method call x.Method(args))
- OCALLPART // X.Sel (method expression x.Method, not called)
OCAP // cap(X)
OCLOSE // close(X)
OCLOSURE // func Type { Func.Closure.Body } (func literal)
@@ -171,6 +170,7 @@ const (
OPTRLIT // &X (X is composite literal)
OCONV // Type(X) (type conversion)
OCONVIFACE // Type(X) (type conversion, to interface)
+ OCONVIDATA // Builds a data word to store X in an interface. Equivalent to IDATA(CONVIFACE(X)). Is an ir.ConvExpr.
OCONVNOP // Type(X) (type conversion, no effect)
OCOPY // copy(X, Y)
ODCL // var X (declares X of type X.Type)
@@ -237,6 +237,7 @@ const (
OSLICE3ARR // X[Low : High : Max] (X is pointer to array)
OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity)
ORECOVER // recover()
+ ORECOVERFP // recover(Args) w/ explicit FP argument
ORECV // <-X
ORUNESTR // Type(X) (Type is string, X is rune)
OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE)
@@ -249,14 +250,16 @@ const (
OSIZEOF // unsafe.Sizeof(X)
OUNSAFEADD // unsafe.Add(X, Y)
OUNSAFESLICE // unsafe.Slice(X, Y)
- OMETHEXPR // method expression
+ OMETHEXPR // X(Args) (method expression T.Method(args), first argument is the method receiver)
+ OMETHVALUE // X.Sel (method expression t.Method, not called)
// statements
OBLOCK // { List } (block of code)
OBREAK // break [Label]
// OCASE: case List: Body (List==nil means default)
// For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
- // for nil), and, if a type-switch variable is specified, Rlist is an
+ // for nil) or an ODYNAMICTYPE indicating a runtime type for generics.
+ // If a type-switch variable is specified, Var is an
// ONAME for the version of the type-switch variable with the specified
// type.
OCASE
@@ -317,9 +320,16 @@ const (
OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
OLINKSYMOFFSET // offset within a name
+ // opcodes for generics
+ ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter)
+ ODYNAMICDOTTYPE2 // x, ok = i.(T) where T is a type parameter (or derived from a type parameter)
+ ODYNAMICTYPE // a type node for type switches (represents a dynamic target type for a type switch)
+
// arch-specific opcodes
- OTAILCALL // tail call to another function
- OGETG // runtime.getg() (read g pointer)
+ OTAILCALL // tail call to another function
+ OGETG // runtime.getg() (read g pointer)
+ OGETCALLERPC // runtime.getcallerpc() (continuation PC in caller frame)
+ OGETCALLERSP // runtime.getcallersp() (stack pointer in caller frame)
OEND
)
@@ -436,18 +446,19 @@ func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
return res
}
-type PragmaFlag int16
+type PragmaFlag uint16
const (
// Func pragmas.
- Nointerface PragmaFlag = 1 << iota
- Noescape // func parameters don't escape
- Norace // func must not have race detector annotations
- Nosplit // func should not execute on separate stack
- Noinline // func should not be inlined
- NoCheckPtr // func should not be instrumented by checkptr
- CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
- UintptrEscapes // pointers converted to uintptr escape
+ Nointerface PragmaFlag = 1 << iota
+ Noescape // func parameters don't escape
+ Norace // func must not have race detector annotations
+ Nosplit // func should not execute on separate stack
+ Noinline // func should not be inlined
+ NoCheckPtr // func should not be instrumented by checkptr
+ CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
+ UintptrKeepAlive // pointers converted to uintptr must be kept alive (compiler internal only)
+ UintptrEscapes // pointers converted to uintptr escape
// Runtime-only func pragmas.
// See ../../../../runtime/README.md for detailed descriptions.
@@ -563,7 +574,7 @@ func OuterValue(n Node) Node {
for {
switch nn := n; nn.Op() {
case OXDOT:
- base.Fatalf("OXDOT in walk")
+ base.FatalfAt(n.Pos(), "OXDOT in walk: %v", n)
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index 22855d7163..aa41c03beb 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -463,6 +463,62 @@ func (n *Decl) editChildren(edit func(Node) Node) {
}
}
+func (n *DynamicType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *DynamicType) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *DynamicType) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.ITab != nil && do(n.ITab) {
+ return true
+ }
+ return false
+}
+func (n *DynamicType) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.ITab != nil {
+ n.ITab = edit(n.ITab).(Node)
+ }
+}
+
+func (n *DynamicTypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *DynamicTypeAssertExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *DynamicTypeAssertExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.T != nil && do(n.T) {
+ return true
+ }
+ return false
+}
+func (n *DynamicTypeAssertExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.T != nil {
+ n.T = edit(n.T).(Node)
+ }
+}
+
func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ForStmt) copy() Node {
c := *n
@@ -947,6 +1003,22 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) {
}
}
+func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *RawOrigExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ResultExpr) copy() Node {
c := *n
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
index 405a0c6b3c..b8cee71818 100644
--- a/src/cmd/compile/internal/ir/op_string.go
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -41,18 +41,18 @@ func _() {
_ = x[OCALLFUNC-30]
_ = x[OCALLMETH-31]
_ = x[OCALLINTER-32]
- _ = x[OCALLPART-33]
- _ = x[OCAP-34]
- _ = x[OCLOSE-35]
- _ = x[OCLOSURE-36]
- _ = x[OCOMPLIT-37]
- _ = x[OMAPLIT-38]
- _ = x[OSTRUCTLIT-39]
- _ = x[OARRAYLIT-40]
- _ = x[OSLICELIT-41]
- _ = x[OPTRLIT-42]
- _ = x[OCONV-43]
- _ = x[OCONVIFACE-44]
+ _ = x[OCAP-33]
+ _ = x[OCLOSE-34]
+ _ = x[OCLOSURE-35]
+ _ = x[OCOMPLIT-36]
+ _ = x[OMAPLIT-37]
+ _ = x[OSTRUCTLIT-38]
+ _ = x[OARRAYLIT-39]
+ _ = x[OSLICELIT-40]
+ _ = x[OPTRLIT-41]
+ _ = x[OCONV-42]
+ _ = x[OCONVIFACE-43]
+ _ = x[OCONVIDATA-44]
_ = x[OCONVNOP-45]
_ = x[OCOPY-46]
_ = x[ODCL-47]
@@ -109,65 +109,72 @@ func _() {
_ = x[OSLICE3ARR-98]
_ = x[OSLICEHEADER-99]
_ = x[ORECOVER-100]
- _ = x[ORECV-101]
- _ = x[ORUNESTR-102]
- _ = x[OSELRECV2-103]
- _ = x[OIOTA-104]
- _ = x[OREAL-105]
- _ = x[OIMAG-106]
- _ = x[OCOMPLEX-107]
- _ = x[OALIGNOF-108]
- _ = x[OOFFSETOF-109]
- _ = x[OSIZEOF-110]
- _ = x[OUNSAFEADD-111]
- _ = x[OUNSAFESLICE-112]
- _ = x[OMETHEXPR-113]
- _ = x[OBLOCK-114]
- _ = x[OBREAK-115]
- _ = x[OCASE-116]
- _ = x[OCONTINUE-117]
- _ = x[ODEFER-118]
- _ = x[OFALL-119]
- _ = x[OFOR-120]
- _ = x[OFORUNTIL-121]
- _ = x[OGOTO-122]
- _ = x[OIF-123]
- _ = x[OLABEL-124]
- _ = x[OGO-125]
- _ = x[ORANGE-126]
- _ = x[ORETURN-127]
- _ = x[OSELECT-128]
- _ = x[OSWITCH-129]
- _ = x[OTYPESW-130]
- _ = x[OFUNCINST-131]
- _ = x[OTCHAN-132]
- _ = x[OTMAP-133]
- _ = x[OTSTRUCT-134]
- _ = x[OTINTER-135]
- _ = x[OTFUNC-136]
- _ = x[OTARRAY-137]
- _ = x[OTSLICE-138]
- _ = x[OINLCALL-139]
- _ = x[OEFACE-140]
- _ = x[OITAB-141]
- _ = x[OIDATA-142]
- _ = x[OSPTR-143]
- _ = x[OCFUNC-144]
- _ = x[OCHECKNIL-145]
- _ = x[OVARDEF-146]
- _ = x[OVARKILL-147]
- _ = x[OVARLIVE-148]
- _ = x[ORESULT-149]
- _ = x[OINLMARK-150]
- _ = x[OLINKSYMOFFSET-151]
- _ = x[OTAILCALL-152]
- _ = x[OGETG-153]
- _ = x[OEND-154]
+ _ = x[ORECOVERFP-101]
+ _ = x[ORECV-102]
+ _ = x[ORUNESTR-103]
+ _ = x[OSELRECV2-104]
+ _ = x[OIOTA-105]
+ _ = x[OREAL-106]
+ _ = x[OIMAG-107]
+ _ = x[OCOMPLEX-108]
+ _ = x[OALIGNOF-109]
+ _ = x[OOFFSETOF-110]
+ _ = x[OSIZEOF-111]
+ _ = x[OUNSAFEADD-112]
+ _ = x[OUNSAFESLICE-113]
+ _ = x[OMETHEXPR-114]
+ _ = x[OMETHVALUE-115]
+ _ = x[OBLOCK-116]
+ _ = x[OBREAK-117]
+ _ = x[OCASE-118]
+ _ = x[OCONTINUE-119]
+ _ = x[ODEFER-120]
+ _ = x[OFALL-121]
+ _ = x[OFOR-122]
+ _ = x[OFORUNTIL-123]
+ _ = x[OGOTO-124]
+ _ = x[OIF-125]
+ _ = x[OLABEL-126]
+ _ = x[OGO-127]
+ _ = x[ORANGE-128]
+ _ = x[ORETURN-129]
+ _ = x[OSELECT-130]
+ _ = x[OSWITCH-131]
+ _ = x[OTYPESW-132]
+ _ = x[OFUNCINST-133]
+ _ = x[OTCHAN-134]
+ _ = x[OTMAP-135]
+ _ = x[OTSTRUCT-136]
+ _ = x[OTINTER-137]
+ _ = x[OTFUNC-138]
+ _ = x[OTARRAY-139]
+ _ = x[OTSLICE-140]
+ _ = x[OINLCALL-141]
+ _ = x[OEFACE-142]
+ _ = x[OITAB-143]
+ _ = x[OIDATA-144]
+ _ = x[OSPTR-145]
+ _ = x[OCFUNC-146]
+ _ = x[OCHECKNIL-147]
+ _ = x[OVARDEF-148]
+ _ = x[OVARKILL-149]
+ _ = x[OVARLIVE-150]
+ _ = x[ORESULT-151]
+ _ = x[OINLMARK-152]
+ _ = x[OLINKSYMOFFSET-153]
+ _ = x[ODYNAMICDOTTYPE-154]
+ _ = x[ODYNAMICDOTTYPE2-155]
+ _ = x[ODYNAMICTYPE-156]
+ _ = x[OTAILCALL-157]
+ _ = x[OGETG-158]
+ _ = x[OGETCALLERPC-159]
+ _ = x[OGETCALLERSP-160]
+ _ = x[OEND-161]
}
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 213, 216, 221, 228, 235, 241, 250, 258, 266, 272, 276, 285, 292, 296, 299, 306, 314, 321, 327, 330, 336, 343, 351, 355, 362, 370, 372, 374, 376, 378, 380, 382, 387, 392, 400, 403, 412, 415, 419, 427, 434, 443, 456, 459, 462, 465, 468, 471, 474, 480, 483, 486, 492, 496, 499, 503, 508, 513, 519, 524, 528, 533, 541, 549, 555, 564, 575, 582, 586, 593, 601, 605, 609, 613, 620, 627, 635, 641, 650, 661, 669, 674, 679, 683, 691, 696, 700, 703, 711, 715, 717, 722, 724, 729, 735, 741, 747, 753, 761, 766, 770, 777, 783, 788, 794, 800, 807, 812, 816, 821, 825, 830, 838, 844, 851, 858, 864, 871, 884, 892, 896, 899}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 208, 213, 220, 227, 233, 242, 250, 258, 264, 268, 277, 286, 293, 297, 300, 307, 315, 322, 328, 331, 337, 344, 352, 356, 363, 371, 373, 375, 377, 379, 381, 383, 388, 393, 401, 404, 413, 416, 420, 428, 435, 444, 457, 460, 463, 466, 469, 472, 475, 481, 484, 487, 493, 497, 500, 504, 509, 514, 520, 525, 529, 534, 542, 550, 556, 565, 576, 583, 592, 596, 603, 611, 615, 619, 623, 630, 637, 645, 651, 660, 671, 679, 688, 693, 698, 702, 710, 715, 719, 722, 730, 734, 736, 741, 743, 748, 754, 760, 766, 772, 780, 785, 789, 796, 802, 807, 813, 819, 826, 831, 835, 840, 844, 849, 857, 863, 870, 877, 883, 890, 903, 917, 932, 943, 951, 955, 966, 977, 980}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/ir/package.go b/src/cmd/compile/internal/ir/package.go
index e4b93d113e..3896e2b91b 100644
--- a/src/cmd/compile/internal/ir/package.go
+++ b/src/cmd/compile/internal/ir/package.go
@@ -32,7 +32,4 @@ type Package struct {
// Exported (or re-exported) symbols.
Exports []*Name
-
- // Map from function names of stencils to already-created stencils.
- Stencils map[*types.Sym]*Func
}
diff --git a/src/cmd/compile/internal/ir/scc.go b/src/cmd/compile/internal/ir/scc.go
index 83c6074170..2cfceaa1f6 100644
--- a/src/cmd/compile/internal/ir/scc.go
+++ b/src/cmd/compile/internal/ir/scc.go
@@ -90,7 +90,7 @@ func (v *bottomUpVisitor) visit(n *Func) uint32 {
if n := n.(*Name); n.Class == PFUNC {
do(n.Defn)
}
- case ODOTMETH, OCALLPART, OMETHEXPR:
+ case ODOTMETH, OMETHVALUE, OMETHEXPR:
if fn := MethodExprName(n); fn != nil {
do(fn.Defn)
}
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
index a903ea8cd4..63dd673dcd 100644
--- a/src/cmd/compile/internal/ir/type.go
+++ b/src/cmd/compile/internal/ir/type.go
@@ -300,11 +300,36 @@ func (n *typeNode) CanBeNtype() {}
// TypeNode returns the Node representing the type t.
func TypeNode(t *types.Type) Ntype {
+ return TypeNodeAt(src.NoXPos, t)
+}
+
+// TypeNodeAt is like TypeNode, but allows specifying the position
+// information if a new OTYPE needs to be constructed.
+//
+// Deprecated: Use TypeNode instead. For typical use, the position for
+// an anonymous OTYPE node should not matter. However, TypeNodeAt is
+// available for use with toolstash -cmp to refactor existing code
+// that is sensitive to OTYPE position.
+func TypeNodeAt(pos src.XPos, t *types.Type) Ntype {
if n := t.Obj(); n != nil {
if n.Type() != t {
base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
}
return n.(Ntype)
}
- return newTypeNode(src.NoXPos, t)
+ return newTypeNode(pos, t)
+}
+
+// A DynamicType represents the target type in a type switch.
+type DynamicType struct {
+ miniExpr
+ X Node // a *runtime._type for the targeted type
+ ITab Node // for type switches from nonempty interfaces to non-interfaces, this is the itab for that pair.
+}
+
+func NewDynamicType(pos src.XPos, x Node) *DynamicType {
+ n := &DynamicType{X: x}
+ n.pos = pos
+ n.op = ODYNAMICTYPE
+ return n
}
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
index 03c320e205..bfe7d2bb43 100644
--- a/src/cmd/compile/internal/ir/val.go
+++ b/src/cmd/compile/internal/ir/val.go
@@ -66,7 +66,7 @@ func Float64Val(v constant.Value) float64 {
func AssertValidTypeForConst(t *types.Type, v constant.Value) {
if !ValidTypeForConst(t, v) {
- base.Fatalf("%v does not represent %v", t, v)
+ base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind())
}
}
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
index f5c2ef7709..2705eac4f7 100644
--- a/src/cmd/compile/internal/liveness/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -1082,6 +1082,10 @@ func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
+ if lv.fn.Wrapper() || lv.fn.Dupok() {
+ // Skip reporting liveness information for compiler-generated wrappers.
+ return
+ }
if !(v == nil || v.Op.IsCall()) {
// Historically we only printed this information at
// calls. Keep doing so.
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
index 71976174b0..902cbc8091 100644
--- a/src/cmd/compile/internal/logopt/logopt_test.go
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -209,7 +209,7 @@ func s15a8(x *[15]int64) [15]int64 {
want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
// escape analysis explanation
- want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r2 with derefs=0",`+
+ want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
@@ -220,8 +220,8 @@ func s15a8(x *[15]int64) [15]int64 {
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~R0:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~R0 (return)"}]}`)
})
}
diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go
index f892923ba0..4e6897042e 100644
--- a/src/cmd/compile/internal/mips/galign.go
+++ b/src/cmd/compile/internal/mips/galign.go
@@ -21,7 +21,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = (buildcfg.GOMIPS == "softfloat")
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
index af81366e51..412bc71aab 100644
--- a/src/cmd/compile/internal/mips64/galign.go
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -21,7 +21,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat"
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
diff --git a/src/cmd/compile/internal/noder/codes.go b/src/cmd/compile/internal/noder/codes.go
new file mode 100644
index 0000000000..f8cb7729ac
--- /dev/null
+++ b/src/cmd/compile/internal/noder/codes.go
@@ -0,0 +1,124 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+type code interface {
+ marker() syncMarker
+ value() int
+}
+
+type codeVal int
+
+func (c codeVal) marker() syncMarker { return syncVal }
+func (c codeVal) value() int { return int(c) }
+
+const (
+ valBool codeVal = iota
+ valString
+ valInt64
+ valBigInt
+ valBigRat
+ valBigFloat
+)
+
+type codeType int
+
+func (c codeType) marker() syncMarker { return syncType }
+func (c codeType) value() int { return int(c) }
+
+const (
+ typeBasic codeType = iota
+ typeNamed
+ typePointer
+ typeSlice
+ typeArray
+ typeChan
+ typeMap
+ typeSignature
+ typeStruct
+ typeInterface
+ typeUnion
+ typeTypeParam
+)
+
+type codeObj int
+
+func (c codeObj) marker() syncMarker { return syncCodeObj }
+func (c codeObj) value() int { return int(c) }
+
+const (
+ objAlias codeObj = iota
+ objConst
+ objType
+ objFunc
+ objVar
+ objStub
+)
+
+type codeStmt int
+
+func (c codeStmt) marker() syncMarker { return syncStmt1 }
+func (c codeStmt) value() int { return int(c) }
+
+const (
+ stmtEnd codeStmt = iota
+ stmtLabel
+ stmtBlock
+ stmtExpr
+ stmtSend
+ stmtAssign
+ stmtAssignOp
+ stmtIncDec
+ stmtBranch
+ stmtCall
+ stmtReturn
+ stmtIf
+ stmtFor
+ stmtSwitch
+ stmtSelect
+
+ // TODO(mdempsky): Remove after we don't care about toolstash -cmp.
+ stmtTypeDeclHack
+)
+
+type codeExpr int
+
+func (c codeExpr) marker() syncMarker { return syncExpr }
+func (c codeExpr) value() int { return int(c) }
+
+// TODO(mdempsky): Split expr into addr, for lvalues.
+const (
+ exprNone codeExpr = iota
+ exprConst
+ exprType // type expression
+ exprLocal // local variable
+ exprName // global variable or function
+ exprBlank
+ exprCompLit
+ exprFuncLit
+ exprSelector
+ exprIndex
+ exprSlice
+ exprAssert
+ exprUnaryOp
+ exprBinaryOp
+ exprCall
+ exprConvert
+)
+
+type codeDecl int
+
+func (c codeDecl) marker() syncMarker { return syncDecl }
+func (c codeDecl) value() int { return int(c) }
+
+const (
+ declEnd codeDecl = iota
+ declFunc
+ declMethod
+ declVar
+ declOther
+)
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
index 4ca2eb4740..429c8a14c8 100644
--- a/src/cmd/compile/internal/noder/decl.go
+++ b/src/cmd/compile/internal/noder/decl.go
@@ -41,20 +41,27 @@ func (g *irgen) decls(decls []syntax.Decl) []ir.Node {
}
func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
- // TODO(mdempsky): Merge with gcimports so we don't have to import
- // packages twice.
-
g.pragmaFlags(decl.Pragma, 0)
- ipkg := importfile(decl)
- if ipkg == ir.Pkgs.Unsafe {
+ // Get the imported package's path, as resolved already by types2
+ // and gcimporter. This is the same path as would be computed by
+ // parseImportPath.
+ switch pkgNameOf(g.info, decl).Imported().Path() {
+ case "unsafe":
p.importedUnsafe = true
- }
- if ipkg.Path == "embed" {
+ case "embed":
p.importedEmbed = true
}
}
+// pkgNameOf returns the PkgName associated with the given ImportDecl.
+func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
+ if name := decl.LocalPkgName; name != nil {
+ return info.Defs[name].(*types2.PkgName)
+ }
+ return info.Implicits[decl].(*types2.PkgName)
+}
+
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
g.pragmaFlags(decl.Pragma, 0)
@@ -95,7 +102,21 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
g.target.Inits = append(g.target.Inits, fn)
}
+ if fn.Type().HasTParam() {
+ g.topFuncIsGeneric = true
+ }
g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
+ g.topFuncIsGeneric = false
+ if fn.Type().HasTParam() && fn.Body != nil {
+ // Set pointers to the dcls/body of a generic function/method in
+ // the Inl struct, so it is marked for export, is available for
+ // stenciling, and works with Inline_Flood().
+ fn.Inl = &ir.Inline{
+ Cost: 1,
+ Dcl: fn.Dcl,
+ Body: fn.Body,
+ }
+ }
out.Append(fn)
}
@@ -104,13 +125,7 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
if decl.Alias {
name, _ := g.def(decl.Name)
g.pragmaFlags(decl.Pragma, 0)
-
- // TODO(mdempsky): This matches how typecheckdef marks aliases for
- // export, but this won't generalize to exporting function-scoped
- // type aliases. We should maybe just use n.Alias() instead.
- if ir.CurFunc == nil {
- name.Sym().Def = ir.TypeNode(name.Type())
- }
+ assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
return
@@ -154,11 +169,15 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
// [mdempsky: Subtleties like these are why I always vehemently
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
- if len(decl.TParamList) > 0 {
- // Set HasTParam if there are any tparams, even if no tparams are
- // used in the type itself (e.g., if it is an empty struct, or no
- // fields in the struct use the tparam).
- ntyp.SetHasTParam(true)
+
+ tparams := otyp.(*types2.Named).TParams()
+ if n := tparams.Len(); n > 0 {
+ rparams := make([]*types.Type, n)
+ for i := range rparams {
+ rparams[i] = g.typ(tparams.At(i).Type())
+ }
+ // This will set hasTParam flag if any rparams are not concrete types.
+ ntyp.SetRParams(rparams)
}
types.ResumeCheckSize()
diff --git a/src/cmd/compile/internal/noder/decoder.go b/src/cmd/compile/internal/noder/decoder.go
new file mode 100644
index 0000000000..3dc61c6a69
--- /dev/null
+++ b/src/cmd/compile/internal/noder/decoder.go
@@ -0,0 +1,301 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+
+ "cmd/compile/internal/base"
+)
+
+type pkgDecoder struct {
+ pkgPath string
+
+ elemEndsEnds [numRelocs]uint32
+ elemEnds []uint32
+ elemData string
+}
+
+func newPkgDecoder(pkgPath, input string) pkgDecoder {
+ pr := pkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, os.SEEK_CUR)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+ assert(len(pr.elemData) == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+func (pr *pkgDecoder) numElems(k reloc) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+func (pr *pkgDecoder) totalElems() int {
+ return len(pr.elemEnds)
+}
+
+func (pr *pkgDecoder) absIdx(k reloc, idx int) int {
+ absIdx := idx
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ base.Fatalf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+func (pr *pkgDecoder) dataIdx(k reloc, idx int) string {
+ absIdx := pr.absIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+func (pr *pkgDecoder) stringIdx(idx int) string {
+ return pr.dataIdx(relocString, idx)
+}
+
+func (pr *pkgDecoder) newDecoder(k reloc, idx int, marker syncMarker) decoder {
+ r := pr.newDecoderRaw(k, idx)
+ r.sync(marker)
+ return r
+}
+
+func (pr *pkgDecoder) newDecoderRaw(k reloc, idx int) decoder {
+ r := decoder{
+ common: pr,
+ k: k,
+ idx: idx,
+ }
+
+ // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+ r.data = *strings.NewReader(pr.dataIdx(k, idx))
+
+ r.sync(syncRelocs)
+ r.relocs = make([]relocEnt, r.len())
+ for i := range r.relocs {
+ r.sync(syncReloc)
+ r.relocs[i] = relocEnt{reloc(r.len()), r.len()}
+ }
+
+ return r
+}
+
+type decoder struct {
+ common *pkgDecoder
+
+ relocs []relocEnt
+ data strings.Reader
+
+ k reloc
+ idx int
+}
+
+func (r *decoder) checkErr(err error) {
+ if err != nil {
+ base.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func (r *decoder) rawUvarint() uint64 {
+ x, err := binary.ReadUvarint(&r.data)
+ r.checkErr(err)
+ return x
+}
+
+func (r *decoder) rawVarint() int64 {
+ ux := r.rawUvarint()
+
+ // Zig-zag decode.
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *decoder) rawReloc(k reloc, idx int) int {
+ e := r.relocs[idx]
+ assert(e.kind == k)
+ return e.idx
+}
+
+func (r *decoder) sync(mWant syncMarker) {
+ if !enableSync {
+ return
+ }
+
+ pos, _ := r.data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved
+ mHave := syncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.stringIdx(r.rawReloc(relocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+func (r *decoder) bool() bool {
+ r.sync(syncBool)
+ x, err := r.data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+func (r *decoder) int64() int64 {
+ r.sync(syncInt64)
+ return r.rawVarint()
+}
+
+func (r *decoder) uint64() uint64 {
+ r.sync(syncUint64)
+ return r.rawUvarint()
+}
+
+func (r *decoder) len() int { x := r.uint64(); v := int(x); assert(uint64(v) == x); return v }
+func (r *decoder) int() int { x := r.int64(); v := int(x); assert(int64(v) == x); return v }
+func (r *decoder) uint() uint { x := r.uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+func (r *decoder) code(mark syncMarker) int {
+ r.sync(mark)
+ return r.len()
+}
+
+func (r *decoder) reloc(k reloc) int {
+ r.sync(syncUseReloc)
+ return r.rawReloc(k, r.len())
+}
+
+func (r *decoder) string() string {
+ r.sync(syncString)
+ return r.common.stringIdx(r.reloc(relocString))
+}
+
+func (r *decoder) strings() []string {
+ res := make([]string, r.len())
+ for i := range res {
+ res[i] = r.string()
+ }
+ return res
+}
+
+func (r *decoder) rawValue() constant.Value {
+ isComplex := r.bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *decoder) scalar() constant.Value {
+ switch tag := codeVal(r.code(syncVal)); tag {
+ default:
+ panic(fmt.Sprintf("unexpected scalar tag: %v", tag))
+
+ case valBool:
+ return constant.MakeBool(r.bool())
+ case valString:
+ return constant.MakeString(r.string())
+ case valInt64:
+ return constant.MakeInt64(r.int64())
+ case valBigInt:
+ return constant.Make(r.bigInt())
+ case valBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case valBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.string()))
+ if r.bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.string())) == nil)
+ return v
+}
diff --git a/src/cmd/compile/internal/noder/encoder.go b/src/cmd/compile/internal/noder/encoder.go
new file mode 100644
index 0000000000..d8ab0f6255
--- /dev/null
+++ b/src/cmd/compile/internal/noder/encoder.go
@@ -0,0 +1,284 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+
+ "cmd/compile/internal/base"
+)
+
+type pkgEncoder struct {
+ elems [numRelocs][]string
+
+ stringsIdx map[string]int
+}
+
+func newPkgEncoder() pkgEncoder {
+ return pkgEncoder{
+ stringsIdx: make(map[string]int),
+ }
+}
+
+func (pw *pkgEncoder) dump(out io.Writer) {
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+}
+
+func (pw *pkgEncoder) stringIdx(s string) int {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[relocString][idx] == s)
+ return idx
+ }
+
+ idx := len(pw.elems[relocString])
+ pw.elems[relocString] = append(pw.elems[relocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+func (pw *pkgEncoder) newEncoder(k reloc, marker syncMarker) encoder {
+ e := pw.newEncoderRaw(k)
+ e.sync(marker)
+ return e
+}
+
+func (pw *pkgEncoder) newEncoderRaw(k reloc) encoder {
+ idx := len(pw.elems[k])
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return encoder{
+ p: pw,
+ k: k,
+ idx: idx,
+ }
+}
+
+// Encoders
+
+type encoder struct {
+ p *pkgEncoder
+
+ relocs []relocEnt
+ data bytes.Buffer
+
+ encodingRelocHeader bool
+
+ k reloc
+ idx int
+}
+
+func (w *encoder) flush() int {
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ base.Fatalf("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.sync(syncRelocs)
+ w.len(len(w.relocs))
+ for _, rent := range w.relocs {
+ w.sync(syncReloc)
+ w.len(int(rent.kind))
+ w.len(rent.idx)
+ }
+
+ io.Copy(&sb, &w.data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.idx] = sb.String()
+
+ return w.idx
+}
+
+func (w *encoder) checkErr(err error) {
+ if err != nil {
+ base.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func (w *encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *encoder) rawReloc(r reloc, idx int) int {
+ // TODO(mdempsky): Use map for lookup.
+ for i, rent := range w.relocs {
+ if rent.kind == r && rent.idx == idx {
+ return i
+ }
+ }
+
+ i := len(w.relocs)
+ w.relocs = append(w.relocs, relocEnt{r, idx})
+ return i
+}
+
+func (w *encoder) sync(m syncMarker) {
+ if !enableSync {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && base.Debug.SyncFrames > 0 {
+ pcs := make([]uintptr, base.Debug.SyncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(relocString, w.p.stringIdx(frame))))
+ }
+}
+
+func (w *encoder) bool(b bool) bool {
+ w.sync(syncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+func (w *encoder) int64(x int64) {
+ w.sync(syncInt64)
+ w.rawVarint(x)
+}
+
+func (w *encoder) uint64(x uint64) {
+ w.sync(syncUint64)
+ w.rawUvarint(x)
+}
+
+func (w *encoder) len(x int) { assert(x >= 0); w.uint64(uint64(x)) }
+func (w *encoder) int(x int) { w.int64(int64(x)) }
+func (w *encoder) uint(x uint) { w.uint64(uint64(x)) }
+
+func (w *encoder) reloc(r reloc, idx int) {
+ w.sync(syncUseReloc)
+ w.len(w.rawReloc(r, idx))
+}
+
+func (w *encoder) code(c code) {
+ w.sync(c.marker())
+ w.len(c.value())
+}
+
+func (w *encoder) string(s string) {
+ w.sync(syncString)
+ w.reloc(relocString, w.p.stringIdx(s))
+}
+
+func (w *encoder) strings(ss []string) {
+ w.len(len(ss))
+ for _, s := range ss {
+ w.string(s)
+ }
+}
+
+func (w *encoder) rawValue(val constant.Value) {
+ if w.bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ panic(fmt.Sprintf("unhandled %v (%v)", val, val.Kind()))
+ case bool:
+ w.code(valBool)
+ w.bool(v)
+ case string:
+ w.code(valString)
+ w.string(v)
+ case int64:
+ w.code(valInt64)
+ w.int64(v)
+ case *big.Int:
+ w.code(valBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.code(valBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.code(valBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.string(string(b)) // TODO: More efficient encoding.
+ w.bool(v.Sign() < 0)
+}
+
+func (w *encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.string(string(b)) // TODO: More efficient encoding.
+}
diff --git a/src/cmd/compile/internal/noder/export.go b/src/cmd/compile/internal/noder/export.go
new file mode 100644
index 0000000000..1a296e22c8
--- /dev/null
+++ b/src/cmd/compile/internal/noder/export.go
@@ -0,0 +1,65 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/typecheck"
+ "cmd/internal/bio"
+)
+
+// writeNewExportFunc is a hook that can be added to append extra
+// export data after the normal export data section. It allows
+// experimenting with new export data format designs without requiring
+// immediate support in the go/internal or x/tools importers.
+var writeNewExportFunc func(out io.Writer)
+
+func WriteExports(out *bio.Writer) {
+ // When unified IR exports are enable, we simply append it to the
+ // end of the normal export data (with compiler extensions
+ // disabled), and write an extra header giving its size.
+ //
+ // If the compiler sees this header, it knows to read the new data
+ // instead; meanwhile the go/types importers will silently ignore it
+ // and continue processing the old export instead.
+ //
+ // This allows us to experiment with changes to the new export data
+ // format without needing to update the go/internal/gcimporter or
+ // (worse) x/tools/go/gcexportdata.
+
+ useNewExport := writeNewExportFunc != nil
+
+ var old, new bytes.Buffer
+
+ typecheck.WriteExports(&old, !useNewExport)
+
+ if useNewExport {
+ writeNewExportFunc(&new)
+ }
+
+ oldLen := old.Len()
+ newLen := new.Len()
+
+ if useNewExport {
+ fmt.Fprintf(out, "\nnewexportsize %v\n", newLen)
+ }
+
+ // The linker also looks for the $$ marker - use char after $$ to distinguish format.
+ out.WriteString("\n$$B\n") // indicate binary export format
+ io.Copy(out, &old)
+ out.WriteString("\n$$\n")
+ io.Copy(out, &new)
+
+ if base.Debug.Export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, oldLen)
+ if useNewExport {
+ fmt.Printf("BenchmarkNewExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, newLen)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
index c7695ed920..3e3c352a32 100644
--- a/src/cmd/compile/internal/noder/expr.go
+++ b/src/cmd/compile/internal/noder/expr.go
@@ -5,6 +5,8 @@
package noder
import (
+ "fmt"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
@@ -15,6 +17,8 @@ import (
)
func (g *irgen) expr(expr syntax.Expr) ir.Node {
+ expr = unparen(expr) // skip parens; unneeded after parse+typecheck
+
if expr == nil {
return nil
}
@@ -67,14 +71,16 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node {
// Constant expression.
if tv.Value != nil {
- return Const(g.pos(expr), g.typ(typ), tv.Value)
+ typ := g.typ(typ)
+ value := FixValue(typ, tv.Value)
+ return OrigConst(g.pos(expr), typ, value, constExprOp(expr), syntax.String(expr))
}
n := g.expr0(typ, expr)
if n.Typecheck() != 1 && n.Typecheck() != 3 {
base.FatalfAt(g.pos(expr), "missed typecheck: %+v", n)
}
- if !g.match(n.Type(), typ, tv.HasOk()) {
+ if n.Op() != ir.OFUNCINST && !g.match(n.Type(), typ, tv.HasOk()) {
base.FatalfAt(g.pos(expr), "expected %L to have type %v", n, typ)
}
return n
@@ -82,6 +88,11 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node {
func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
pos := g.pos(expr)
+ assert(pos.IsKnown())
+
+ // Set base.Pos for transformation code that still uses base.Pos, rather than
+ // the pos of the node being converted.
+ base.Pos = pos
switch expr := expr.(type) {
case *syntax.Name:
@@ -105,23 +116,30 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
// The key for the Inferred map is the CallExpr (if inferring
// types required the function arguments) or the IndexExpr below
// (if types could be inferred without the function arguments).
- if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
+ if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the case where inferring types required the
// types of the function arguments.
- targs := make([]ir.Node, len(inferred.Targs))
- for i, targ := range inferred.Targs {
+ targs := make([]ir.Node, len(inferred.TArgs))
+ for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
if fun.Op() == ir.OFUNCINST {
// Replace explicit type args with the full list that
- // includes the additional inferred type args
+ // includes the additional inferred type args.
+ // Substitute the type args for the type params in
+ // the generic function's type.
fun.(*ir.InstExpr).Targs = targs
+ newt := g.substType(fun.Type(), fun.Type().TParams(), targs)
+ typed(newt, fun)
} else {
- // Create a function instantiation here, given
- // there are only inferred type args (e.g.
- // min(5,6), where min is a generic function)
+ // Create a function instantiation here, given there
+ // are only inferred type args (e.g. min(5,6), where
+ // min is a generic function). Substitute the type
+ // args for the type params in the generic function's
+ // type.
inst := ir.NewInstExpr(pos, ir.OFUNCINST, fun, targs)
- typed(fun.Type(), inst)
+ newt := g.substType(fun.Type(), fun.Type().TParams(), targs)
+ typed(newt, inst)
fun = inst
}
@@ -131,12 +149,12 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
case *syntax.IndexExpr:
var targs []ir.Node
- if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
+ if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the partial type inference case where the types
// can be inferred from other type arguments without using
// the types of the function arguments.
- targs = make([]ir.Node, len(inferred.Targs))
- for i, targ := range inferred.Targs {
+ targs = make([]ir.Node, len(inferred.TArgs))
+ for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
} else if _, ok := expr.Index.(*syntax.ListExpr); ok {
@@ -158,12 +176,16 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
panic("Incorrect argument for generic func instantiation")
}
n := ir.NewInstExpr(pos, ir.OFUNCINST, x, targs)
- typed(g.typ(typ), n)
+ newt := g.typ(typ)
+ // Substitute the type args for the type params in the uninstantiated
+ // function's type. If there aren't enough type args, then the rest
+ // will be inferred at the call node, so don't try the substitution yet.
+ if x.Type().TParams().NumFields() == len(targs) {
+ newt = g.substType(g.typ(typ), x.Type().TParams(), targs)
+ }
+ typed(newt, n)
return n
- case *syntax.ParenExpr:
- return g.expr(expr.X) // skip parens; unneeded after parse+typecheck
-
case *syntax.SelectorExpr:
// Qualified identifier.
if name, ok := expr.X.(*syntax.Name); ok {
@@ -193,7 +215,29 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
}
}
-// selectorExpr resolves the choice of ODOT, ODOTPTR, OCALLPART (eventually
+// substType does a normal type substition, but tparams is in the form of a field
+// list, and targs is in terms of a slice of type nodes. substType records any newly
+// instantiated types into g.instTypeList.
+func (g *irgen) substType(typ *types.Type, tparams *types.Type, targs []ir.Node) *types.Type {
+ fields := tparams.FieldSlice()
+ tparams1 := make([]*types.Type, len(fields))
+ for i, f := range fields {
+ tparams1[i] = f.Type
+ }
+ targs1 := make([]*types.Type, len(targs))
+ for i, n := range targs {
+ targs1[i] = n.Type()
+ }
+ ts := typecheck.Tsubster{
+ Tparams: tparams1,
+ Targs: targs1,
+ }
+ newt := ts.Typ(typ)
+ g.instTypeList = append(g.instTypeList, ts.InstTypeList...)
+ return newt
+}
+
+// selectorExpr resolves the choice of ODOT, ODOTPTR, OMETHVALUE (eventually
// ODOTMETH & ODOTINTER), and OMETHEXPR and deals with embedded fields here rather
// than in typecheck.go.
func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.SelectorExpr) ir.Node {
@@ -203,6 +247,44 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
// only be fully transformed once it has an instantiated type.
n := ir.NewSelectorExpr(pos, ir.OXDOT, x, typecheck.Lookup(expr.Sel.Value))
typed(g.typ(typ), n)
+
+ // Fill in n.Selection for a generic method reference or a bound
+ // interface method, even though we won't use it directly, since it
+ // is useful for analysis. Specifically do not fill in for fields or
+ // other interfaces methods (method call on an interface value), so
+ // n.Selection being non-nil means a method reference for a generic
+ // type or a method reference due to a bound.
+ obj2 := g.info.Selections[expr].Obj()
+ sig := types2.AsSignature(obj2.Type())
+ if sig == nil || sig.Recv() == nil {
+ return n
+ }
+ index := g.info.Selections[expr].Index()
+ last := index[len(index)-1]
+ // recvType is the receiver of the method being called. Because of the
+ // way methods are imported, g.obj(obj2) doesn't work across
+ // packages, so we have to lookup the method via the receiver type.
+ recvType := deref2(sig.Recv().Type())
+ if types2.AsInterface(recvType.Underlying()) != nil {
+ fieldType := n.X.Type()
+ for _, ix := range index[:len(index)-1] {
+ fieldType = fieldType.Field(ix).Type
+ }
+ if fieldType.Kind() == types.TTYPEPARAM {
+ n.Selection = fieldType.Bound().AllMethods().Index(last)
+ //fmt.Printf(">>>>> %v: Bound call %v\n", base.FmtPos(pos), n.Sel)
+ } else {
+ assert(fieldType.Kind() == types.TINTER)
+ //fmt.Printf(">>>>> %v: Interface call %v\n", base.FmtPos(pos), n.Sel)
+ }
+ return n
+ }
+
+ recvObj := types2.AsNamed(recvType).Obj()
+ recv := g.pkg(recvObj.Pkg()).Lookup(recvObj.Name()).Def
+ n.Selection = recv.Type().Methods().Index(last)
+ //fmt.Printf(">>>>> %v: Method call %v\n", base.FmtPos(pos), n.Sel)
+
return n
}
@@ -259,14 +341,18 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
if wantPtr {
recvType2Base = types2.AsPointer(recvType2).Elem()
}
- if len(types2.AsNamed(recvType2Base).TParams()) > 0 {
+ if types2.AsNamed(recvType2Base).TParams().Len() > 0 {
// recvType2 is the original generic type that is
// instantiated for this method call.
// selinfo.Recv() is the instantiated type
recvType2 = recvType2Base
- // method is the generic method associated with the gen type
- method := g.obj(types2.AsNamed(recvType2).Method(last))
- n = ir.NewSelectorExpr(pos, ir.OCALLPART, x, method.Sym())
+ recvTypeSym := g.pkg(method2.Pkg()).Lookup(recvType2.(*types2.Named).Obj().Name())
+ recvType := recvTypeSym.Def.(*ir.Name).Type()
+ // method is the generic method associated with
+ // the base generic type. The instantiated type may not
+ // have method bodies filled in, if it was imported.
+ method := recvType.Methods().Index(last).Nname.(*ir.Name)
+ n = ir.NewSelectorExpr(pos, ir.OMETHVALUE, x, typecheck.Lookup(expr.Sel.Value))
n.(*ir.SelectorExpr).Selection = types.NewField(pos, method.Sym(), method.Type())
n.(*ir.SelectorExpr).Selection.Nname = method
typed(method.Type(), n)
@@ -301,10 +387,7 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
// getTargs gets the targs associated with the receiver of a selected method
func getTargs(selinfo *types2.Selection) []types2.Type {
- r := selinfo.Recv()
- if p := types2.AsPointer(r); p != nil {
- r = p.Elem()
- }
+ r := deref2(selinfo.Recv())
n := types2.AsNamed(r)
if n == nil {
base.Fatalf("Incorrect type for selinfo %v", selinfo)
@@ -313,13 +396,17 @@ func getTargs(selinfo *types2.Selection) []types2.Type {
}
func (g *irgen) exprList(expr syntax.Expr) []ir.Node {
+ return g.exprs(unpackListExpr(expr))
+}
+
+func unpackListExpr(expr syntax.Expr) []syntax.Expr {
switch expr := expr.(type) {
case nil:
return nil
case *syntax.ListExpr:
- return g.exprs(expr.ElemList)
+ return expr.ElemList
default:
- return []ir.Node{g.expr(expr)}
+ return []syntax.Expr{expr}
}
}
@@ -344,11 +431,13 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
for i, elem := range lit.ElemList {
switch elem := elem.(type) {
case *syntax.KeyValueExpr:
+ var key ir.Node
if isStruct {
- exprs[i] = ir.NewStructKeyExpr(g.pos(elem), g.name(elem.Key.(*syntax.Name)), g.expr(elem.Value))
+ key = ir.NewIdent(g.pos(elem.Key), g.name(elem.Key.(*syntax.Name)))
} else {
- exprs[i] = ir.NewKeyExpr(g.pos(elem), g.expr(elem.Key), g.expr(elem.Value))
+ key = g.expr(elem.Key)
}
+ exprs[i] = ir.NewKeyExpr(g.pos(elem), key, g.expr(elem.Value))
default:
exprs[i] = g.expr(elem)
}
@@ -360,19 +449,13 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
}
func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
- fn := ir.NewFunc(g.pos(expr))
- fn.SetIsHiddenClosure(ir.CurFunc != nil)
+ fn := ir.NewClosureFunc(g.pos(expr), ir.CurFunc != nil)
+ ir.NameClosure(fn.OClosure, ir.CurFunc)
- fn.Nname = ir.NewNameAt(g.pos(expr), typecheck.ClosureName(ir.CurFunc))
- ir.MarkFunc(fn.Nname)
typ := g.typ(typ2)
- fn.Nname.Func = fn
- fn.Nname.Defn = fn
typed(typ, fn.Nname)
- fn.SetTypecheck(1)
-
- fn.OClosure = ir.NewClosureExpr(g.pos(expr), fn)
typed(typ, fn.OClosure)
+ fn.SetTypecheck(1)
g.funcBody(fn, nil, expr.Type, expr.Body)
@@ -386,9 +469,14 @@ func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
cv.SetWalkdef(1)
}
- g.target.Decls = append(g.target.Decls, fn)
-
- return fn.OClosure
+ if g.topFuncIsGeneric {
+ // Don't add any closure inside a generic function/method to the
+ // g.target.Decls list, even though it may not be generic itself.
+ // See issue #47514.
+ return ir.UseClosure(fn.OClosure, nil)
+ } else {
+ return ir.UseClosure(fn.OClosure, g.target)
+ }
}
func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
@@ -398,3 +486,35 @@ func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
}
return n.Type()
}
+
+// constExprOp returns an ir.Op that represents the outermost
+// operation of the given constant expression. It's intended for use
+// with ir.RawOrigExpr.
+func constExprOp(expr syntax.Expr) ir.Op {
+ switch expr := expr.(type) {
+ default:
+ panic(fmt.Sprintf("%s: unexpected expression: %T", expr.Pos(), expr))
+
+ case *syntax.BasicLit:
+ return ir.OLITERAL
+ case *syntax.Name, *syntax.SelectorExpr:
+ return ir.ONAME
+ case *syntax.CallExpr:
+ return ir.OCALL
+ case *syntax.Operation:
+ if expr.Y == nil {
+ return unOps[expr.Op]
+ }
+ return binOps[expr.Op]
+ }
+}
+
+func unparen(expr syntax.Expr) syntax.Expr {
+ for {
+ paren, ok := expr.(*syntax.ParenExpr)
+ if !ok {
+ return expr
+ }
+ expr = paren.X
+ }
+}
diff --git a/src/cmd/compile/internal/noder/frames_go1.go b/src/cmd/compile/internal/noder/frames_go1.go
new file mode 100644
index 0000000000..2958efd622
--- /dev/null
+++ b/src/cmd/compile/internal/noder/frames_go1.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+// TODO(mdempsky): Remove after #44505 is resolved
+
+package noder
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ for _, pc := range pcs {
+ fn := runtime.FuncForPC(pc)
+ file, line := fn.FileLine(pc)
+
+ visit(file, line, fn.Name(), pc-fn.Entry())
+ }
+}
diff --git a/src/cmd/compile/internal/noder/frames_go17.go b/src/cmd/compile/internal/noder/frames_go17.go
new file mode 100644
index 0000000000..273217e39a
--- /dev/null
+++ b/src/cmd/compile/internal/noder/frames_go17.go
@@ -0,0 +1,24 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package noder
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
index 9da0e49300..b9dbd030af 100644
--- a/src/cmd/compile/internal/noder/helpers.go
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -43,6 +43,32 @@ func Const(pos src.XPos, typ *types.Type, val constant.Value) ir.Node {
return typed(typ, ir.NewBasicLit(pos, val))
}
+func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
+ orig := ir.NewRawOrigExpr(pos, op, raw)
+ return ir.NewConstExpr(val, typed(typ, orig))
+}
+
+// FixValue returns val after converting and truncating it as
+// appropriate for typ.
+func FixValue(typ *types.Type, val constant.Value) constant.Value {
+ assert(typ.Kind() != types.TFORW)
+ switch {
+ case typ.IsInteger():
+ val = constant.ToInt(val)
+ case typ.IsFloat():
+ val = constant.ToFloat(val)
+ case typ.IsComplex():
+ val = constant.ToComplex(val)
+ }
+ if !typ.IsUntyped() {
+ val = typecheck.DefaultLit(ir.NewBasicLit(src.NoXPos, val), typ).Val()
+ }
+ if !typ.IsTypeParam() {
+ ir.AssertValidTypeForConst(typ, val)
+ }
+ return val
+}
+
func Nil(pos src.XPos, typ *types.Type) ir.Node {
return typed(typ, ir.NewNilExpr(pos))
}
@@ -87,15 +113,15 @@ func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) ir.Node {
func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) ir.Node {
n := ir.NewCallExpr(pos, ir.OCALL, fun, args)
n.IsDDD = dots
- // n.Use will be changed to ir.CallUseStmt in g.stmt() if this call is
- // just a statement (any return values are ignored).
- n.Use = ir.CallUseExpr
if fun.Op() == ir.OTYPE {
// Actually a type conversion, not a function call.
- if fun.Type().HasTParam() || args[0].Type().HasTParam() {
- // For type params, don't typecheck until we actually know
- // the type.
+ if !fun.Type().IsInterface() &&
+ (fun.Type().HasTParam() || args[0].Type().HasTParam()) {
+ // For type params, we can transform if fun.Type() is known
+ // to be an interface (in which case a CONVIFACE node will be
+ // inserted). Otherwise, don't typecheck until we actually
+ // know the type.
return typed(typ, n)
}
typed(typ, n)
@@ -103,22 +129,17 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
}
if fun, ok := fun.(*ir.Name); ok && fun.BuiltinOp != 0 {
- // For Builtin ops, we currently stay with using the old
- // typechecker to transform the call to a more specific expression
- // and possibly use more specific ops. However, for a bunch of the
- // ops, we delay doing the old typechecker if any of the args have
- // type params, for a variety of reasons:
+ // For most Builtin ops, we delay doing transformBuiltin if any of the
+ // args have type params, for a variety of reasons:
//
- // OMAKE: hard to choose specific ops OMAKESLICE, etc. until arg type is known
- // OREAL/OIMAG: can't determine type float32/float64 until arg type know
- // OLEN/OCAP: old typechecker will complain if arg is not obviously a slice/array.
- // OAPPEND: old typechecker will complain if arg is not obviously slice, etc.
- //
- // We will eventually break out the transforming functionality
- // needed for builtin's, and call it here or during stenciling, as
- // appropriate.
+ // OMAKE: transformMake can't choose specific ops OMAKESLICE, etc.
+ // until arg type is known
+ // OREAL/OIMAG: transformRealImag can't determine type float32/float64
+ // until arg type known
+ // OAPPEND: transformAppend requires that the arg is a slice
+ // ODELETE: transformDelete requires that the arg is a map
switch fun.BuiltinOp {
- case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OLEN, ir.OCAP, ir.OAPPEND:
+ case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.ODELETE:
hasTParam := false
for _, arg := range args {
if arg.Type().HasTParam() {
@@ -137,10 +158,8 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
// Add information, now that we know that fun is actually being called.
switch fun := fun.(type) {
- case *ir.ClosureExpr:
- fun.Func.SetClosureCalled(true)
case *ir.SelectorExpr:
- if fun.Op() == ir.OCALLPART {
+ if fun.Op() == ir.OMETHVALUE {
op := ir.ODOTMETH
if fun.X.Type().IsInterface() {
op = ir.ODOTINTER
@@ -152,46 +171,52 @@ func Call(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool)
}
}
- if fun.Type().HasTParam() {
- // If the fun arg is or has a type param, don't do any extra
+ if fun.Type().HasTParam() || fun.Op() == ir.OXDOT || fun.Op() == ir.OFUNCINST {
+ // If the fun arg is or has a type param, we can't do all the
// transformations, since we may not have needed properties yet
- // (e.g. number of return values, etc). The type param is probably
- // described by a structural constraint that requires it to be a
- // certain function type, etc., but we don't want to analyze that.
- return typed(typ, n)
- }
-
- if fun.Op() == ir.OXDOT {
- if !fun.(*ir.SelectorExpr).X.Type().HasTParam() {
- base.FatalfAt(pos, "Expecting type param receiver in %v", fun)
+ // (e.g. number of return values, etc). The same applies if a fun
+ // which is an XDOT could not be transformed yet because of a generic
+ // type in the X of the selector expression.
+ //
+ // A function instantiation (even if fully concrete) shouldn't be
+ // transformed yet, because we need to add the dictionary during the
+ // transformation.
+ //
+ // However, if we have a function type (even though it is
+ // parameterized), then we can add in any needed CONVIFACE nodes via
+ // typecheckaste(). We need to call transformArgs() to deal first
+ // with the f(g(()) case where g returns multiple return values. We
+ // can't do anything if fun is a type param (which is probably
+ // described by a structural constraint)
+ if fun.Type().Kind() == types.TFUNC {
+ transformArgs(n)
+ typecheckaste(ir.OCALL, fun, n.IsDDD, fun.Type().Params(), n.Args, true)
}
- // For methods called in a generic function, don't do any extra
- // transformations. We will do those later when we create the
- // instantiated function and have the correct receiver type.
- typed(typ, n)
- return n
- }
- if fun.Op() != ir.OFUNCINST {
- // If no type params, do the normal call transformations. This
- // will convert OCALL to OCALLFUNC.
- typed(typ, n)
- transformCall(n)
- return n
+ return typed(typ, n)
}
- // Leave the op as OCALL, which indicates the call still needs typechecking.
+ // If no type params, do the normal call transformations. This
+ // will convert OCALL to OCALLFUNC.
typed(typ, n)
+ transformCall(n)
return n
}
func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) ir.Node {
n := ir.NewBinaryExpr(pos, op, x, y)
if x.Type().HasTParam() || y.Type().HasTParam() {
- // Delay transformCompare() if either arg has a type param, since
- // it needs to know the exact types to decide on any needed conversions.
- n.SetType(typ)
- n.SetTypecheck(3)
- return n
+ xIsInt := x.Type().IsInterface()
+ yIsInt := y.Type().IsInterface()
+ if !(xIsInt && !yIsInt || !xIsInt && yIsInt) {
+ // If either arg is a type param, then we can still do the
+ // transformCompare() if we know that one arg is an interface
+ // and the other is not. Otherwise, we delay
+ // transformCompare(), since it needs to know the exact types
+ // to decide on any needed conversions.
+ n.SetType(typ)
+ n.SetTypecheck(3)
+ return n
+ }
}
typed(typ, n)
transformCompare(n)
@@ -225,7 +250,7 @@ func DotMethod(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
// Method value.
typ := typecheck.NewMethodType(method.Type, nil)
- return dot(pos, typ, ir.OCALLPART, x, method)
+ return dot(pos, typ, ir.OMETHVALUE, x, method)
}
// MethodExpr returns a OMETHEXPR node with the indicated index into the methods
@@ -321,5 +346,15 @@ var one = constant.MakeInt64(1)
func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt {
assert(x.Type() != nil)
- return ir.NewAssignOpStmt(pos, op, x, typecheck.DefaultLit(ir.NewBasicLit(pos, one), x.Type()))
+ bl := ir.NewBasicLit(pos, one)
+ if x.Type().HasTParam() {
+ // If the operand is generic, then types2 will have proved it must be
+ // a type that fits with increment/decrement, so just set the type of
+ // "one" to n.Type(). This works even for types that are eventually
+ // float or complex.
+ typed(x.Type(), bl)
+ } else {
+ bl = typecheck.DefaultLit(bl, x.Type())
+ }
+ return ir.NewAssignOpStmt(pos, op, x, bl)
}
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index 701e9001c8..48f0e48028 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"internal/buildcfg"
- "io"
"os"
pathpkg "path"
"runtime"
@@ -32,8 +31,24 @@ import (
"cmd/internal/src"
)
-// Temporary import helper to get type2-based type-checking going.
+// haveLegacyImports records whether we've imported any packages
+// without a new export data section. This is useful for experimenting
+// with new export data format designs, when you need to support
+// existing tests that manually compile files with inconsistent
+// compiler flags.
+var haveLegacyImports = false
+
+// newReadImportFunc is an extension hook for experimenting with new
+// export data formats. If a new export data payload was written out
+// for an imported package by overloading writeNewExportFunc, then
+// that payload will be mapped into memory and passed to
+// newReadImportFunc.
+var newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ panic("unexpected new export data payload")
+}
+
type gcimports struct {
+ check *types2.Checker
packages map[string]*types2.Package
}
@@ -46,13 +61,8 @@ func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*ty
panic("mode must be 0")
}
- path, err := resolveImportPath(path)
- if err != nil {
- return nil, err
- }
-
- lookup := func(path string) (io.ReadCloser, error) { return openPackage(path) }
- return importer.Import(m.packages, path, srcDir, lookup)
+ _, pkg, err := readImportFile(path, typecheck.Target, m.check, m.packages)
+ return pkg, err
}
func isDriveLetter(b byte) bool {
@@ -175,160 +185,242 @@ func resolveImportPath(path string) (string, error) {
return path, nil
}
-// TODO(mdempsky): Return an error instead.
func importfile(decl *syntax.ImportDecl) *types.Pkg {
- if decl.Path.Kind != syntax.StringLit {
- base.Errorf("import path must be a string")
+ path, err := parseImportPath(decl.Path)
+ if err != nil {
+ base.Errorf("%s", err)
return nil
}
- path, err := strconv.Unquote(decl.Path.Value)
+ pkg, _, err := readImportFile(path, typecheck.Target, nil, nil)
if err != nil {
- base.Errorf("import path must be a string")
+ base.Errorf("%s", err)
return nil
}
+ if pkg != ir.Pkgs.Unsafe && pkg.Height >= myheight {
+ myheight = pkg.Height + 1
+ }
+ return pkg
+}
+
+func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
+ if pathLit.Kind != syntax.StringLit {
+ return "", errors.New("import path must be a string")
+ }
+
+ path, err := strconv.Unquote(pathLit.Value)
+ if err != nil {
+ return "", errors.New("import path must be a string")
+ }
+
if err := checkImportPath(path, false); err != nil {
- base.Errorf("%s", err.Error())
- return nil
+ return "", err
}
+ return path, err
+}
+
+// readImportFile reads the import file for the given package path and
+// returns its types.Pkg representation. If packages is non-nil, the
+// types2.Package representation is also returned.
+func readImportFile(path string, target *ir.Package, check *types2.Checker, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
path, err = resolveImportPath(path)
if err != nil {
- base.Errorf("%s", err)
- return nil
+ return
+ }
+
+ if path == "unsafe" {
+ pkg1, pkg2 = ir.Pkgs.Unsafe, types2.Unsafe
+
+ // TODO(mdempsky): Investigate if this actually matters. Why would
+ // the linker or runtime care whether a package imported unsafe?
+ if !pkg1.Direct {
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+ }
+
+ return
}
- importpkg := types.NewPkg(path, "")
- if importpkg.Direct {
- return importpkg // already fully loaded
+ pkg1 = types.NewPkg(path, "")
+ if packages != nil {
+ pkg2 = packages[path]
+ assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete()))
}
- importpkg.Direct = true
- typecheck.Target.Imports = append(typecheck.Target.Imports, importpkg)
- if path == "unsafe" {
- return importpkg // initialized with universe
+ if pkg1.Direct {
+ return
}
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
f, err := openPackage(path)
if err != nil {
- base.Errorf("could not import %q: %v", path, err)
- base.ErrorExit()
+ return
}
- imp := bio.NewReader(f)
- defer imp.Close()
- file := f.Name()
+ defer f.Close()
- // check object header
- p, err := imp.ReadString('\n')
+ r, end, newsize, err := findExportData(f)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "!<arch>\n" { // package archive
- // package export block should be first
- sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
- if sz <= 0 {
- base.Errorf("import %s: not a package file", file)
- base.ErrorExit()
- }
- p, err = imp.ReadString('\n')
+ if base.Debug.Export != 0 {
+ fmt.Printf("importing %s (%s)\n", path, f.Name())
+ }
+
+ if newsize != 0 {
+ // We have unified IR data. Map it, and feed to the importers.
+ end -= newsize
+ var data string
+ data, err = base.MapFile(r.File(), end, newsize)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- }
- if !strings.HasPrefix(p, "go object ") {
- base.Errorf("import %s: not a go object file: %s", file, p)
- base.ErrorExit()
- }
- q := objabi.HeaderString()
- if p != q {
- base.Errorf("import %s: object is [%s] expected [%s]", file, p, q)
- base.ErrorExit()
- }
+ pkg2, err = newReadImportFunc(data, pkg1, check, packages)
+ } else {
+ // We only have old data. Oh well, fall back to the legacy importers.
+ haveLegacyImports = true
- // process header lines
- for {
- p, err = imp.ReadString('\n')
+ var c byte
+ switch c, err = r.ReadByte(); {
+ case err != nil:
+ return
+
+ case c != 'i':
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ err = fmt.Errorf("unexpected package format byte: %v", c)
+ return
+ }
+
+ pos := r.Offset()
+
+ // Map string (and data) section into memory as a single large
+ // string. This reduces heap fragmentation and allows
+ // returning individual substrings very efficiently.
+ var data string
+ data, err = base.MapFile(r.File(), pos, end-pos)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "\n" {
- break // header ends with blank line
+
+ typecheck.ReadImports(pkg1, data)
+
+ if packages != nil {
+ pkg2, err = importer.ImportData(packages, data, path)
+ if err != nil {
+ return
+ }
}
}
- // Expect $$B\n to signal binary import format.
+ err = addFingerprint(path, f, end)
+ return
+}
+
+// findExportData returns a *bio.Reader positioned at the start of the
+// binary export data section, and a file offset for where to stop
+// reading.
+func findExportData(f *os.File) (r *bio.Reader, end, newsize int64, err error) {
+ r = bio.NewReader(f)
+
+ // check object header
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return
+ }
- // look for $$
- var c byte
- for {
- c, err = imp.ReadByte()
+ if line == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF"))
+ if sz <= 0 {
+ err = errors.New("not a package file")
+ return
+ }
+ end = r.Offset() + sz
+ line, err = r.ReadString('\n')
if err != nil {
- break
+ return
}
- if c == '$' {
- c, err = imp.ReadByte()
- if c == '$' || err != nil {
- break
- }
+ } else {
+ // Not an archive; provide end of file instead.
+ // TODO(mdempsky): I don't think this happens anymore.
+ var fi os.FileInfo
+ fi, err = f.Stat()
+ if err != nil {
+ return
}
+ end = fi.Size()
}
- // get character after $$
- if err == nil {
- c, _ = imp.ReadByte()
+ if !strings.HasPrefix(line, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", line)
+ return
+ }
+ if expect := objabi.HeaderString(); line != expect {
+ err = fmt.Errorf("object is [%s] expected [%s]", line, expect)
+ return
}
- var fingerprint goobj.FingerprintType
- switch c {
- case '\n':
- base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path)
- return nil
-
- case 'B':
- if base.Debug.Export != 0 {
- fmt.Printf("importing %s (%s)\n", path, file)
+ // process header lines
+ for !strings.HasPrefix(line, "$$") {
+ if strings.HasPrefix(line, "newexportsize ") {
+ fields := strings.Fields(line)
+ newsize, err = strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ return
+ }
}
- imp.ReadByte() // skip \n after $$B
- c, err = imp.ReadByte()
+ line, err = r.ReadString('\n')
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
+ }
- // Indexed format is distinguished by an 'i' byte,
- // whereas previous export formats started with 'c', 'd', or 'v'.
- if c != 'i' {
- base.Errorf("import %s: unexpected package format byte: %v", file, c)
- base.ErrorExit()
- }
- fingerprint = typecheck.ReadImports(importpkg, imp)
+ // Expect $$B\n to signal binary import format.
+ if line != "$$B\n" {
+ err = errors.New("old export format no longer supported (recompile library)")
+ return
+ }
+
+ return
+}
+
+// addFingerprint reads the linker fingerprint included at the end of
+// the exportdata.
+func addFingerprint(path string, f *os.File, end int64) error {
+ const eom = "\n$$\n"
+ var fingerprint goobj.FingerprintType
+
+ var buf [len(fingerprint) + len(eom)]byte
+ if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil {
+ return err
+ }
- default:
- base.Errorf("no import in %q", path)
- base.ErrorExit()
+ // Caller should have given us the end position of the export data,
+ // which should end with the "\n$$\n" marker. As a consistency check
+ // to make sure we're reading at the right offset, make sure we
+ // found the marker.
+ if s := string(buf[len(fingerprint):]); s != eom {
+ return fmt.Errorf("expected $$ marker, but found %q", s)
}
+ copy(fingerprint[:], buf[:])
+
// assume files move (get installed) so don't record the full path
if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
base.Ctxt.AddImport(path, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+ file := f.Name()
base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint)
}
-
- if importpkg.Height >= myheight {
- myheight = importpkg.Height + 1
- }
-
- return importpkg
+ return nil
}
// The linker uses the magic symbol prefixes "go." and "type."
@@ -431,7 +523,7 @@ func clearImports() {
s.Def = nil
continue
}
- if types.IsDotAlias(s) {
+ if s.Def != nil && s.Def.Sym() != s {
// throw away top-level name left over
// from previous import . "x"
// We'll report errors after type checking in CheckDotImports.
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index 3e0d3285ab..7bc8a6bcc3 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -18,9 +18,9 @@ import (
"cmd/internal/src"
)
-// check2 type checks a Go package using types2, and then generates IR
-// using the results.
-func check2(noders []*noder) {
+// checkFiles configures and runs the types2 checker on the given
+// parsed source files and then returns the result.
+func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
if base.SyntaxErrors() != 0 {
base.ErrorExit()
}
@@ -34,20 +34,22 @@ func check2(noders []*noder) {
}
// typechecking
+ importer := gcimports{
+ packages: make(map[string]*types2.Package),
+ }
conf := types2.Config{
GoVersion: base.Flag.Lang,
IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode
CompilerErrorMessages: true, // use error strings matching existing compiler errors
+ AllowTypeLists: true, // remove this line once all tests use type set syntax
Error: func(err error) {
terr := err.(types2.Error)
base.ErrorfAt(m.makeXPos(terr.Pos), "%s", terr.Msg)
},
- Importer: &gcimports{
- packages: make(map[string]*types2.Package),
- },
- Sizes: &gcSizes{},
+ Importer: &importer,
+ Sizes: &gcSizes{},
}
- info := types2.Info{
+ info := &types2.Info{
Types: make(map[syntax.Expr]types2.TypeAndValue),
Defs: make(map[*syntax.Name]types2.Object),
Uses: make(map[*syntax.Name]types2.Object),
@@ -57,12 +59,24 @@ func check2(noders []*noder) {
Inferred: make(map[syntax.Expr]types2.Inferred),
// expand as needed
}
- pkg, err := conf.Check(base.Ctxt.Pkgpath, files, &info)
- files = nil
+
+ pkg := types2.NewPackage(base.Ctxt.Pkgpath, "")
+ importer.check = types2.NewChecker(&conf, pkg, info)
+ err := importer.check.Files(files)
+
base.ExitIfErrors()
if err != nil {
base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
}
+
+ return m, pkg, info
+}
+
+// check2 type checks a Go package using types2, and then generates IR
+// using the results.
+func check2(noders []*noder) {
+ m, pkg, info := checkFiles(noders)
+
if base.Flag.G < 2 {
os.Exit(0)
}
@@ -70,7 +84,7 @@ func check2(noders []*noder) {
g := irgen{
target: typecheck.Target,
self: pkg,
- info: &info,
+ info: info,
posMap: m,
objs: make(map[types2.Object]*ir.Name),
typs: make(map[types2.Type]*types.Type),
@@ -82,6 +96,41 @@ func check2(noders []*noder) {
}
}
+// gfInfo is information gathered on a generic function.
+type gfInfo struct {
+ tparams []*types.Type
+ derivedTypes []*types.Type
+ // Nodes in generic function that requires a subdictionary. Includes
+ // method and function calls (OCALL), function values (OFUNCINST), method
+ // values/expressions (OXDOT).
+ subDictCalls []ir.Node
+ // Nodes in generic functions that are a conversion from a typeparam/derived
+ // type to a specific interface.
+ itabConvs []ir.Node
+ // For type switches on nonempty interfaces, a map from OTYPE entries of
+ // HasTParam type, to the interface type we're switching from.
+ // TODO: what if the type we're switching from is a shape type?
+ type2switchType map[ir.Node]*types.Type
+}
+
+// instInfo is information gathered on an gcshape (or fully concrete)
+// instantiation of a function.
+type instInfo struct {
+ fun *ir.Func // The instantiated function (with body)
+ dictParam *ir.Name // The node inside fun that refers to the dictionary param
+
+ gf *ir.Name // The associated generic function
+ gfInfo *gfInfo
+
+ startSubDict int // Start of dict entries for subdictionaries
+ startItabConv int // Start of dict entries for itab conversions
+ dictLen int // Total number of entries in dictionary
+
+ // Map from nodes in instantiated fun (OCALL, OCALLMETHOD, OFUNCINST, and
+ // OMETHEXPR) to the associated dictionary entry for a sub-dictionary
+ dictEntryMap map[ir.Node]int
+}
+
type irgen struct {
target *ir.Package
self *types2.Package
@@ -94,10 +143,38 @@ type irgen struct {
// Fully-instantiated generic types whose methods should be instantiated
instTypeList []*types.Type
+
+ dnum int // for generating unique dictionary variables
+
+ // Map from generic function to information about its type params, derived
+ // types, and subdictionaries.
+ gfInfoMap map[*types.Sym]*gfInfo
+
+ // Map from a name of function that been instantiated to information about
+ // its instantiated function, associated generic function/method, and the
+ // mapping from IR nodes to dictionary entries.
+ instInfoMap map[*types.Sym]*instInfo
+
+ // dictionary syms which we need to finish, by writing out any itabconv
+ // entries.
+ dictSymsToFinalize []*delayInfo
+
+ // True when we are compiling a top-level generic function or method. Use to
+ // avoid adding closures of generic functions/methods to the target.Decls
+ // list.
+ topFuncIsGeneric bool
+}
+
+type delayInfo struct {
+ gf *ir.Name
+ targs []*types.Type
+ sym *types.Sym
+ off int
}
func (g *irgen) generate(noders []*noder) {
types.LocalPkg.Name = g.self.Name()
+ types.LocalPkg.Height = g.self.Height()
typecheck.TypecheckAllowed = true
// Prevent size calculations until we set the underlying type
@@ -132,7 +209,6 @@ Outer:
}
}
}
- types.LocalPkg.Height = myheight
// 2. Process all package-block type declarations. As with imports,
// we need to make sure all types are properly instantiated before
@@ -167,6 +243,10 @@ Outer:
}
}
+ // Check for unusual case where noder2 encounters a type error that types2
+ // doesn't check for (e.g. notinheap incompatibility).
+ base.ExitIfErrors()
+
typecheck.DeclareUniverse()
for _, p := range noders {
@@ -175,7 +255,7 @@ Outer:
// Double check for any type-checking inconsistencies. This can be
// removed once we're confident in IR generation results.
- syntax.Walk(p.file, func(n syntax.Node) bool {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
g.validate(n)
return false
})
@@ -184,9 +264,9 @@ Outer:
// Create any needed stencils of generic functions
g.stencil()
- // For now, remove all generic functions from g.target.Decl, since they
- // have been used for stenciling, but don't compile. TODO: We will
- // eventually export any exportable generic functions.
+ // Remove all generic functions from g.target.Decl, since they have been
+ // used for stenciling, but don't compile. Generic functions will already
+ // have been marked for export as appropriate.
j := 0
for i, decl := range g.target.Decls {
if decl.Op() != ir.ODCLFUNC || !decl.Type().HasTParam() {
diff --git a/src/cmd/compile/internal/noder/linker.go b/src/cmd/compile/internal/noder/linker.go
new file mode 100644
index 0000000000..2bc7f7c608
--- /dev/null
+++ b/src/cmd/compile/internal/noder/linker.go
@@ -0,0 +1,296 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/types"
+ "cmd/internal/goobj"
+ "cmd/internal/obj"
+)
+
+// This file implements the unified IR linker, which combines the
+// local package's stub data with imported package data to produce a
+// complete export data file. It also rewrites the compiler's
+// extension data sections based on the results of compilation (e.g.,
+// the function inlining cost and linker symbol index assignments).
+//
+// TODO(mdempsky): Using the name "linker" here is confusing, because
+// readers are likely to mistake references to it for cmd/link. But
+// there's a shortage of good names for "something that combines
+// multiple parts into a cohesive whole"... e.g., "assembler" and
+// "compiler" are also already taken.
+
+type linker struct {
+ pw pkgEncoder
+
+ pkgs map[string]int
+ decls map[*types.Sym]int
+}
+
+func (l *linker) relocAll(pr *pkgReader, relocs []relocEnt) []relocEnt {
+ res := make([]relocEnt, len(relocs))
+ for i, rent := range relocs {
+ rent.idx = l.relocIdx(pr, rent.kind, rent.idx)
+ res[i] = rent
+ }
+ return res
+}
+
+func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
+ assert(pr != nil)
+
+ absIdx := pr.absIdx(k, idx)
+
+ if newidx := pr.newindex[absIdx]; newidx != 0 {
+ return ^newidx
+ }
+
+ var newidx int
+ switch k {
+ case relocString:
+ newidx = l.relocString(pr, idx)
+ case relocPkg:
+ newidx = l.relocPkg(pr, idx)
+ case relocObj:
+ newidx = l.relocObj(pr, idx)
+
+ default:
+ // Generic relocations.
+ //
+ // TODO(mdempsky): Deduplicate more sections? In fact, I think
+ // every section could be deduplicated. This would also be easier
+ // if we do external relocations.
+
+ w := l.pw.newEncoderRaw(k)
+ l.relocCommon(pr, &w, k, idx)
+ newidx = w.idx
+ }
+
+ pr.newindex[absIdx] = ^newidx
+
+ return newidx
+}
+
+func (l *linker) relocString(pr *pkgReader, idx int) int {
+ return l.pw.stringIdx(pr.stringIdx(idx))
+}
+
+func (l *linker) relocPkg(pr *pkgReader, idx int) int {
+ path := pr.peekPkgPath(idx)
+
+ if newidx, ok := l.pkgs[path]; ok {
+ return newidx
+ }
+
+ r := pr.newDecoder(relocPkg, idx, syncPkgDef)
+ w := l.pw.newEncoder(relocPkg, syncPkgDef)
+ l.pkgs[path] = w.idx
+
+ // TODO(mdempsky): We end up leaving an empty string reference here
+ // from when the package was originally written as "". Probably not
+ // a big deal, but a little annoying. Maybe relocating
+ // cross-references in place is the way to go after all.
+ w.relocs = l.relocAll(pr, r.relocs)
+
+ _ = r.string() // original path
+ w.string(path)
+
+ io.Copy(&w.data, &r.data)
+
+ return w.flush()
+}
+
+func (l *linker) relocObj(pr *pkgReader, idx int) int {
+ path, name, tag := pr.peekObj(idx)
+ sym := types.NewPkg(path, "").Lookup(name)
+
+ if newidx, ok := l.decls[sym]; ok {
+ return newidx
+ }
+
+ if tag == objStub && path != "builtin" && path != "unsafe" {
+ pri, ok := objReader[sym]
+ if !ok {
+ base.Fatalf("missing reader for %q.%v", path, name)
+ }
+ assert(ok)
+
+ pr = pri.pr
+ idx = pri.idx
+
+ path2, name2, tag2 := pr.peekObj(idx)
+ sym2 := types.NewPkg(path2, "").Lookup(name2)
+ assert(sym == sym2)
+ assert(tag2 != objStub)
+ }
+
+ w := l.pw.newEncoderRaw(relocObj)
+ wext := l.pw.newEncoderRaw(relocObjExt)
+ wname := l.pw.newEncoderRaw(relocName)
+ wdict := l.pw.newEncoderRaw(relocObjDict)
+
+ l.decls[sym] = w.idx
+ assert(wext.idx == w.idx)
+ assert(wname.idx == w.idx)
+ assert(wdict.idx == w.idx)
+
+ l.relocCommon(pr, &w, relocObj, idx)
+ l.relocCommon(pr, &wname, relocName, idx)
+ l.relocCommon(pr, &wdict, relocObjDict, idx)
+
+ var obj *ir.Name
+ if path == "" {
+ var ok bool
+ obj, ok = sym.Def.(*ir.Name)
+
+ // Generic types and functions and declared constraint types won't
+ // have definitions.
+ // For now, just generically copy their extension data.
+ // TODO(mdempsky): Restore assertion.
+ if !ok && false {
+ base.Fatalf("missing definition for %v", sym)
+ }
+ }
+
+ if obj != nil {
+ wext.sync(syncObject1)
+ switch tag {
+ case objFunc:
+ l.relocFuncExt(&wext, obj)
+ case objType:
+ l.relocTypeExt(&wext, obj)
+ case objVar:
+ l.relocVarExt(&wext, obj)
+ }
+ wext.flush()
+ } else {
+ l.relocCommon(pr, &wext, relocObjExt, idx)
+ }
+
+ return w.idx
+}
+
+func (l *linker) relocCommon(pr *pkgReader, w *encoder, k reloc, idx int) {
+ r := pr.newDecoderRaw(k, idx)
+ w.relocs = l.relocAll(pr, r.relocs)
+ io.Copy(&w.data, &r.data)
+ w.flush()
+}
+
+func (l *linker) pragmaFlag(w *encoder, pragma ir.PragmaFlag) {
+ w.sync(syncPragma)
+ w.int(int(pragma))
+}
+
+func (l *linker) relocFuncExt(w *encoder, name *ir.Name) {
+ w.sync(syncFuncExt)
+
+ l.pragmaFlag(w, name.Func.Pragma)
+ l.linkname(w, name)
+
+ // Relocated extension data.
+ w.bool(true)
+
+ // Record definition ABI so cross-ABI calls can be direct.
+ // This is important for the performance of calling some
+ // common functions implemented in assembly (e.g., bytealg).
+ w.uint64(uint64(name.Func.ABI))
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(name.Type()).FieldSlice() {
+ w.string(f.Note)
+ }
+ }
+
+ if inl := name.Func.Inl; w.bool(inl != nil) {
+ w.len(int(inl.Cost))
+ w.bool(inl.CanDelayResults)
+
+ pri, ok := bodyReader[name.Func]
+ assert(ok)
+ w.reloc(relocBody, l.relocIdx(pri.pr, relocBody, pri.idx))
+ }
+
+ w.sync(syncEOF)
+}
+
+func (l *linker) relocTypeExt(w *encoder, name *ir.Name) {
+ w.sync(syncTypeExt)
+
+ typ := name.Type()
+
+ l.pragmaFlag(w, name.Pragma())
+
+ // For type T, export the index of type descriptor symbols of T and *T.
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ))
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo()))
+
+ if typ.Kind() != types.TINTER {
+ for _, method := range typ.Methods().Slice() {
+ l.relocFuncExt(w, method.Nname.(*ir.Name))
+ }
+ }
+}
+
+func (l *linker) relocVarExt(w *encoder, name *ir.Name) {
+ w.sync(syncVarExt)
+ l.linkname(w, name)
+}
+
+func (l *linker) linkname(w *encoder, name *ir.Name) {
+ w.sync(syncLinkname)
+
+ linkname := name.Sym().Linkname
+ if !l.lsymIdx(w, linkname, name.Linksym()) {
+ w.string(linkname)
+ }
+}
+
+func (l *linker) lsymIdx(w *encoder, linkname string, lsym *obj.LSym) bool {
+ if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || linkname != "" {
+ w.int64(-1)
+ return false
+ }
+
+ // For a defined symbol, export its index.
+ // For re-exporting an imported symbol, pass its index through.
+ w.int64(int64(lsym.SymIdx))
+ return true
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+func (pr *pkgDecoder) peekPkgPath(idx int) string {
+ r := pr.newDecoder(relocPkg, idx, syncPkgDef)
+ path := r.string()
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+func (pr *pkgDecoder) peekObj(idx int) (string, string, codeObj) {
+ r := pr.newDecoder(relocName, idx, syncObject1)
+ r.sync(syncSym)
+ r.sync(syncPkg)
+ path := pr.peekPkgPath(r.reloc(relocPkg))
+ name := r.string()
+ assert(name != "")
+
+ tag := codeObj(r.code(syncCodeObj))
+
+ return path, name, tag
+}
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index 5fcad096c2..6a2aacd3fe 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -5,9 +5,11 @@
package noder
import (
+ "errors"
"fmt"
"go/constant"
"go/token"
+ "internal/buildcfg"
"os"
"path/filepath"
"runtime"
@@ -29,8 +31,11 @@ import (
func LoadPackage(filenames []string) {
base.Timer.Start("fe", "parse")
+ // -G=3 and unified expect generics syntax, but -G=0 does not.
+ supportsGenerics := base.Flag.G != 0 || buildcfg.Experiment.Unified
+
mode := syntax.CheckBranches
- if base.Flag.G != 0 {
+ if supportsGenerics && types.AllowsGoVersion(types.LocalPkg, 1, 18) {
mode |= syntax.AllowGenerics
}
@@ -75,6 +80,11 @@ func LoadPackage(filenames []string) {
}
base.Timer.AddEvent(int64(lines), "lines")
+ if base.Debug.Unified != 0 {
+ unified(noders)
+ return
+ }
+
if base.Flag.G != 0 {
// Use types2 to type-check and possibly generate IR.
check2(noders)
@@ -109,25 +119,35 @@ func LoadPackage(filenames []string) {
// We also defer type alias declarations until phase 2
// to avoid cycles like #18640.
// TODO(gri) Remove this again once we have a fix for #25838.
-
- // Don't use range--typecheck can add closures to Target.Decls.
- base.Timer.Start("fe", "typecheck", "top1")
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) {
- typecheck.Target.Decls[i] = typecheck.Stmt(n)
- }
- }
-
+ //
// Phase 2: Variable assignments.
// To check interface assignments, depends on phase 1.
// Don't use range--typecheck can add closures to Target.Decls.
- base.Timer.Start("fe", "typecheck", "top2")
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() {
- typecheck.Target.Decls[i] = typecheck.Stmt(n)
+ for phase, name := range []string{"top1", "top2"} {
+ base.Timer.Start("fe", "typecheck", name)
+ for i := 0; i < len(typecheck.Target.Decls); i++ {
+ n := typecheck.Target.Decls[i]
+ op := n.Op()
+
+ // Closure function declarations are typechecked as part of the
+ // closure expression.
+ if fn, ok := n.(*ir.Func); ok && fn.OClosure != nil {
+ continue
+ }
+
+ // We don't actually add ir.ODCL nodes to Target.Decls. Make sure of that.
+ if op == ir.ODCL {
+ base.FatalfAt(n.Pos(), "unexpected top declaration: %v", op)
+ }
+
+ // Identify declarations that should be deferred to the second
+ // iteration.
+ late := op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias()
+
+ if late == (phase == 1) {
+ typecheck.Target.Decls[i] = typecheck.Stmt(n)
+ }
}
}
@@ -136,16 +156,15 @@ func LoadPackage(filenames []string) {
base.Timer.Start("fe", "typecheck", "func")
var fcount int64
for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if n.Op() == ir.ODCLFUNC {
+ if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nbefore typecheck %v", n)
- ir.Dump(s, n)
+ s := fmt.Sprintf("\nbefore typecheck %v", fn)
+ ir.Dump(s, fn)
}
- typecheck.FuncBody(n.(*ir.Func))
+ typecheck.FuncBody(fn)
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nafter typecheck %v", n)
- ir.Dump(s, n)
+ s := fmt.Sprintf("\nafter typecheck %v", fn)
+ ir.Dump(s, fn)
}
fcount++
}
@@ -449,7 +468,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
type constState struct {
group *syntax.Group
typ ir.Ntype
- values []ir.Node
+ values syntax.Expr
iota int64
}
@@ -467,16 +486,15 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
names := p.declNames(ir.OLITERAL, decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var values []ir.Node
if decl.Values != nil {
- values = p.exprList(decl.Values)
- cs.typ, cs.values = typ, values
+ cs.typ, cs.values = typ, decl.Values
} else {
if typ != nil {
base.Errorf("const declaration cannot have type without expression")
}
- typ, values = cs.typ, cs.values
+ typ = cs.typ
}
+ values := p.exprList(cs.values)
nn := make([]ir.Node, 0, len(names))
for i, n := range names {
@@ -484,10 +502,16 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
base.Errorf("missing value in const declaration")
break
}
+
v := values[i]
if decl.Values == nil {
- v = ir.DeepCopy(n.Pos(), v)
+ ir.Visit(v, func(v ir.Node) {
+ if ir.HasUniquePos(v) {
+ v.SetPos(n.Pos())
+ }
+ })
}
+
typecheck.Declare(n, typecheck.DeclContext)
n.Ntype = typ
@@ -625,6 +649,9 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field {
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
+ if i > 0 && params[i].Type == params[i-1].Type {
+ nodes[i].Ntype = nodes[i-1].Ntype
+ }
}
return nodes
}
@@ -914,6 +941,9 @@ func (p *noder) structType(expr *syntax.StructType) ir.Node {
} else {
n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
}
+ if i > 0 && expr.FieldList[i].Type == expr.FieldList[i-1].Type {
+ n.Ntype = l[i-1].Ntype
+ }
if i < len(expr.TagList) && expr.TagList[i] != nil {
n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
}
@@ -977,6 +1007,8 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym {
}
func (p *noder) embedded(typ syntax.Expr) *ir.Field {
+ pos := p.pos(syntax.StartPos(typ))
+
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
@@ -986,11 +1018,11 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Field {
}
sym := p.packname(typ)
- n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
+ n := ir.NewField(pos, typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n.Embedded = true
if isStar {
- n.Ntype = ir.NewStarExpr(p.pos(op), n.Ntype)
+ n.Ntype = ir.NewStarExpr(pos, n.Ntype)
}
return n
}
@@ -1780,24 +1812,14 @@ func fakeRecv() *ir.Field {
}
func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
- xtype := p.typeExpr(expr.Type)
-
- fn := ir.NewFunc(p.pos(expr))
- fn.SetIsHiddenClosure(ir.CurFunc != nil)
-
- fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure
- fn.Nname.Func = fn
- fn.Nname.Ntype = xtype
- fn.Nname.Defn = fn
-
- clo := ir.NewClosureExpr(p.pos(expr), fn)
- fn.OClosure = clo
+ fn := ir.NewClosureFunc(p.pos(expr), ir.CurFunc != nil)
+ fn.Nname.Ntype = p.typeExpr(expr.Type)
p.funcBody(fn, expr.Body)
ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn)
- return clo
+ return fn.OClosure
}
// A function named init is a special case.
@@ -1841,33 +1863,14 @@ func oldname(s *types.Sym) ir.Node {
}
func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
- if pragma.Embeds == nil {
- return
- }
-
pragmaEmbeds := pragma.Embeds
pragma.Embeds = nil
- pos := makeXPos(pragmaEmbeds[0].Pos)
-
- if !haveEmbed {
- base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"")
- return
- }
- if len(decl.NameList) > 1 {
- base.ErrorfAt(pos, "go:embed cannot apply to multiple vars")
- return
- }
- if decl.Values != nil {
- base.ErrorfAt(pos, "go:embed cannot apply to var with initializer")
+ if len(pragmaEmbeds) == 0 {
return
}
- if decl.Type == nil {
- // Should not happen, since Values == nil now.
- base.ErrorfAt(pos, "go:embed cannot apply to var without type")
- return
- }
- if typecheck.DeclContext != ir.PEXTERN {
- base.ErrorfAt(pos, "go:embed cannot apply to var inside func")
+
+ if err := checkEmbed(decl, haveEmbed, typecheck.DeclContext != ir.PEXTERN); err != nil {
+ base.ErrorfAt(makeXPos(pragmaEmbeds[0].Pos), "%s", err)
return
}
@@ -1878,3 +1881,24 @@ func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.Va
typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
name.Embed = &embeds
}
+
+func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error {
+ switch {
+ case !haveEmbed:
+ return errors.New("go:embed only allowed in Go files that import \"embed\"")
+ case len(decl.NameList) > 1:
+ return errors.New("go:embed cannot apply to multiple vars")
+ case decl.Values != nil:
+ return errors.New("go:embed cannot apply to var with initializer")
+ case decl.Type == nil:
+ // Should not happen, since Values == nil now.
+ return errors.New("go:embed cannot apply to var without type")
+ case withinFunc:
+ return errors.New("go:embed cannot apply to var inside func")
+ case !types.AllowsGoVersion(types.LocalPkg, 1, 16):
+ return fmt.Errorf("go:embed requires go1.16 or later (-lang was set to %s; check go.mod)", base.Flag.Lang)
+
+ default:
+ return nil
+ }
+}
diff --git a/src/cmd/compile/internal/noder/object.go b/src/cmd/compile/internal/noder/object.go
index 82cce1ace0..40c0b9cf42 100644
--- a/src/cmd/compile/internal/noder/object.go
+++ b/src/cmd/compile/internal/noder/object.go
@@ -29,7 +29,7 @@ func (g *irgen) use(name *syntax.Name) *ir.Name {
if !ok {
base.FatalfAt(g.pos(name), "unknown name %v", name)
}
- obj := ir.CaptureName(g.pos(obj2), ir.CurFunc, g.obj(obj2))
+ obj := ir.CaptureName(g.pos(name), ir.CurFunc, g.obj(obj2))
if obj.Defn != nil && obj.Defn.Op() == ir.ONAME {
// If CaptureName created a closure variable, then transfer the
// type of the captured name to the new closure variable.
@@ -49,6 +49,11 @@ func (g *irgen) obj(obj types2.Object) *ir.Name {
// For imported objects, we use iimport directly instead of mapping
// the types2 representation.
if obj.Pkg() != g.self {
+ if sig, ok := obj.Type().(*types2.Signature); ok && sig.Recv() != nil {
+ // We can't import a method by name - must import the type
+ // and access the method from it.
+ base.FatalfAt(g.pos(obj), "tried to import a method directly")
+ }
sym := g.sym(obj)
if sym.Def != nil {
return sym.Def.(*ir.Name)
@@ -101,25 +106,28 @@ func (g *irgen) obj(obj types2.Object) *ir.Name {
case *types2.TypeName:
if obj.IsAlias() {
name = g.objCommon(pos, ir.OTYPE, g.sym(obj), class, g.typ(obj.Type()))
+ name.SetAlias(true)
} else {
name = ir.NewDeclNameAt(pos, ir.OTYPE, g.sym(obj))
g.objFinish(name, class, types.NewNamed(name))
}
case *types2.Var:
- var sym *types.Sym
- if class == ir.PPARAMOUT {
+ sym := g.sym(obj)
+ if class == ir.PPARAMOUT && (sym == nil || sym.IsBlank()) {
// Backend needs names for result parameters,
// even if they're anonymous or blank.
- switch obj.Name() {
- case "":
- sym = typecheck.LookupNum("~r", len(ir.CurFunc.Dcl)) // 'r' for "result"
- case "_":
- sym = typecheck.LookupNum("~b", len(ir.CurFunc.Dcl)) // 'b' for "blank"
+ nresults := 0
+ for _, n := range ir.CurFunc.Dcl {
+ if n.Class == ir.PPARAMOUT {
+ nresults++
+ }
+ }
+ if sym == nil {
+ sym = typecheck.LookupNum("~r", nresults) // 'r' for "result"
+ } else {
+ sym = typecheck.LookupNum("~b", nresults) // 'b' for "blank"
}
- }
- if sym == nil {
- sym = g.sym(obj)
}
name = g.objCommon(pos, ir.ONAME, sym, class, g.typ(obj.Type()))
@@ -164,9 +172,8 @@ func (g *irgen) objFinish(name *ir.Name, class ir.Class, typ *types.Type) {
break // methods are exported with their receiver type
}
if types.IsExported(sym.Name) {
- if name.Class == ir.PFUNC && name.Type().NumTParams() > 0 {
- base.FatalfAt(name.Pos(), "Cannot export a generic function (yet): %v", name)
- }
+ // Generic functions can be marked for export here, even
+ // though they will not be compiled until instantiated.
typecheck.Export(name)
}
if base.Flag.AsmHdr != "" && !name.Sym().Asm() {
diff --git a/src/cmd/compile/internal/noder/quirks.go b/src/cmd/compile/internal/noder/quirks.go
new file mode 100644
index 0000000000..914c5d2bd7
--- /dev/null
+++ b/src/cmd/compile/internal/noder/quirks.go
@@ -0,0 +1,450 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// This file defines helper functions useful for satisfying toolstash
+// -cmp when compared against the legacy frontend behavior, but can be
+// removed after that's no longer a concern.
+
+// quirksMode controls whether behavior specific to satisfying
+// toolstash -cmp is used.
+func quirksMode() bool {
+ return base.Debug.UnifiedQuirks != 0
+}
+
+// posBasesOf returns all of the position bases in the source files,
+// as seen in a straightforward traversal.
+//
+// This is necessary to ensure position bases (and thus file names)
+// get registered in the same order as noder would visit them.
+func posBasesOf(noders []*noder) []*syntax.PosBase {
+ seen := make(map[*syntax.PosBase]bool)
+ var bases []*syntax.PosBase
+
+ for _, p := range noders {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ if b := n.Pos().Base(); !seen[b] {
+ bases = append(bases, b)
+ seen[b] = true
+ }
+ return false
+ })
+ }
+
+ return bases
+}
+
+// importedObjsOf returns the imported objects (i.e., referenced
+// objects not declared by curpkg) from the parsed source files, in
+// the order that typecheck used to load their definitions.
+//
+// This is needed because loading the definitions for imported objects
+// can also add file names.
+func importedObjsOf(curpkg *types2.Package, info *types2.Info, noders []*noder) []types2.Object {
+ // This code is complex because it matches the precise order that
+ // typecheck recursively and repeatedly traverses the IR. It's meant
+ // to be thrown away eventually anyway.
+
+ seen := make(map[types2.Object]bool)
+ var objs []types2.Object
+
+ var phase int
+
+ decls := make(map[types2.Object]syntax.Decl)
+ assoc := func(decl syntax.Decl, names ...*syntax.Name) {
+ for _, name := range names {
+ obj, ok := info.Defs[name]
+ assert(ok)
+ decls[obj] = decl
+ }
+ }
+
+ for _, p := range noders {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.ConstDecl:
+ assoc(n, n.NameList...)
+ case *syntax.FuncDecl:
+ assoc(n, n.Name)
+ case *syntax.TypeDecl:
+ assoc(n, n.Name)
+ case *syntax.VarDecl:
+ assoc(n, n.NameList...)
+ case *syntax.BlockStmt:
+ return true
+ }
+ return false
+ })
+ }
+
+ var visited map[syntax.Decl]bool
+
+ var resolveDecl func(n syntax.Decl)
+ var resolveNode func(n syntax.Node, top bool)
+
+ resolveDecl = func(n syntax.Decl) {
+ if visited[n] {
+ return
+ }
+ visited[n] = true
+
+ switch n := n.(type) {
+ case *syntax.ConstDecl:
+ resolveNode(n.Type, true)
+ resolveNode(n.Values, true)
+
+ case *syntax.FuncDecl:
+ if n.Recv != nil {
+ resolveNode(n.Recv, true)
+ }
+ resolveNode(n.Type, true)
+
+ case *syntax.TypeDecl:
+ resolveNode(n.Type, true)
+
+ case *syntax.VarDecl:
+ if n.Type != nil {
+ resolveNode(n.Type, true)
+ } else {
+ resolveNode(n.Values, true)
+ }
+ }
+ }
+
+ resolveObj := func(pos syntax.Pos, obj types2.Object) {
+ switch obj.Pkg() {
+ case nil:
+ // builtin; nothing to do
+
+ case curpkg:
+ if decl, ok := decls[obj]; ok {
+ resolveDecl(decl)
+ }
+
+ default:
+ if obj.Parent() == obj.Pkg().Scope() && !seen[obj] {
+ seen[obj] = true
+ objs = append(objs, obj)
+ }
+ }
+ }
+
+ checkdefat := func(pos syntax.Pos, n *syntax.Name) {
+ if n.Value == "_" {
+ return
+ }
+ obj, ok := info.Uses[n]
+ if !ok {
+ obj, ok = info.Defs[n]
+ if !ok {
+ return
+ }
+ }
+ if obj == nil {
+ return
+ }
+ resolveObj(pos, obj)
+ }
+ checkdef := func(n *syntax.Name) { checkdefat(n.Pos(), n) }
+
+ var later []syntax.Node
+
+ resolveNode = func(n syntax.Node, top bool) {
+ if n == nil {
+ return
+ }
+ syntax.Crawl(n, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.Name:
+ checkdef(n)
+
+ case *syntax.SelectorExpr:
+ if name, ok := n.X.(*syntax.Name); ok {
+ if _, isPkg := info.Uses[name].(*types2.PkgName); isPkg {
+ checkdefat(n.X.Pos(), n.Sel)
+ return true
+ }
+ }
+
+ case *syntax.AssignStmt:
+ resolveNode(n.Rhs, top)
+ resolveNode(n.Lhs, top)
+ return true
+
+ case *syntax.VarDecl:
+ resolveNode(n.Values, top)
+
+ case *syntax.FuncLit:
+ if top {
+ resolveNode(n.Type, top)
+ later = append(later, n.Body)
+ return true
+ }
+
+ case *syntax.BlockStmt:
+ if phase >= 3 {
+ for _, stmt := range n.List {
+ resolveNode(stmt, false)
+ }
+ }
+ return true
+ }
+
+ return false
+ })
+ }
+
+ for phase = 1; phase <= 5; phase++ {
+ visited = map[syntax.Decl]bool{}
+
+ for _, p := range noders {
+ for _, decl := range p.file.DeclList {
+ switch decl := decl.(type) {
+ case *syntax.ConstDecl:
+ resolveDecl(decl)
+
+ case *syntax.FuncDecl:
+ resolveDecl(decl)
+ if phase >= 3 && decl.Body != nil {
+ resolveNode(decl.Body, true)
+ }
+
+ case *syntax.TypeDecl:
+ if !decl.Alias || phase >= 2 {
+ resolveDecl(decl)
+ }
+
+ case *syntax.VarDecl:
+ if phase >= 2 {
+ resolveNode(decl.Values, true)
+ resolveDecl(decl)
+ }
+ }
+ }
+
+ if phase >= 5 {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ if name, ok := n.(*syntax.Name); ok {
+ if obj, ok := info.Uses[name]; ok {
+ resolveObj(name.Pos(), obj)
+ }
+ }
+ return false
+ })
+ }
+ }
+
+ for i := 0; i < len(later); i++ {
+ resolveNode(later[i], true)
+ }
+ later = nil
+ }
+
+ return objs
+}
+
+// typeExprEndPos returns the position that noder would leave base.Pos
+// after parsing the given type expression.
+func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
+ for {
+ switch expr := expr0.(type) {
+ case *syntax.Name:
+ return expr.Pos()
+ case *syntax.SelectorExpr:
+ return expr.X.Pos()
+
+ case *syntax.ParenExpr:
+ expr0 = expr.X
+
+ case *syntax.Operation:
+ assert(expr.Op == syntax.Mul)
+ assert(expr.Y == nil)
+ expr0 = expr.X
+
+ case *syntax.ArrayType:
+ expr0 = expr.Elem
+ case *syntax.ChanType:
+ expr0 = expr.Elem
+ case *syntax.DotsType:
+ expr0 = expr.Elem
+ case *syntax.MapType:
+ expr0 = expr.Value
+ case *syntax.SliceType:
+ expr0 = expr.Elem
+
+ case *syntax.StructType:
+ return expr.Pos()
+
+ case *syntax.InterfaceType:
+ expr0 = lastFieldType(expr.MethodList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+
+ case *syntax.FuncType:
+ expr0 = lastFieldType(expr.ResultList)
+ if expr0 == nil {
+ expr0 = lastFieldType(expr.ParamList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+ }
+
+ case *syntax.IndexExpr: // explicit type instantiation
+ targs := unpackListExpr(expr.Index)
+ expr0 = targs[len(targs)-1]
+
+ default:
+ panic(fmt.Sprintf("%s: unexpected type expression %v", expr.Pos(), syntax.String(expr)))
+ }
+ }
+}
+
+func lastFieldType(fields []*syntax.Field) syntax.Expr {
+ if len(fields) == 0 {
+ return nil
+ }
+ return fields[len(fields)-1].Type
+}
+
+// sumPos returns the position that noder.sum would produce for
+// constant expression x.
+func sumPos(x syntax.Expr) syntax.Pos {
+ orig := x
+ for {
+ switch x1 := x.(type) {
+ case *syntax.BasicLit:
+ assert(x1.Kind == syntax.StringLit)
+ return x1.Pos()
+ case *syntax.Operation:
+ assert(x1.Op == syntax.Add && x1.Y != nil)
+ if r, ok := x1.Y.(*syntax.BasicLit); ok {
+ assert(r.Kind == syntax.StringLit)
+ x = x1.X
+ continue
+ }
+ }
+ return orig.Pos()
+ }
+}
+
+// funcParamsEndPos returns the value of base.Pos left by noder after
+// processing a function signature.
+func funcParamsEndPos(fn *ir.Func) src.XPos {
+ sig := fn.Nname.Type()
+
+ fields := sig.Results().FieldSlice()
+ if len(fields) == 0 {
+ fields = sig.Params().FieldSlice()
+ if len(fields) == 0 {
+ fields = sig.Recvs().FieldSlice()
+ if len(fields) == 0 {
+ if fn.OClosure != nil {
+ return fn.Nname.Ntype.Pos()
+ }
+ return fn.Pos()
+ }
+ }
+ }
+
+ return fields[len(fields)-1].Pos
+}
+
+type dupTypes struct {
+ origs map[types2.Type]types2.Type
+}
+
+func (d *dupTypes) orig(t types2.Type) types2.Type {
+ if orig, ok := d.origs[t]; ok {
+ return orig
+ }
+ return t
+}
+
+func (d *dupTypes) add(t, orig types2.Type) {
+ if t == orig {
+ return
+ }
+
+ if d.origs == nil {
+ d.origs = make(map[types2.Type]types2.Type)
+ }
+ assert(d.origs[t] == nil)
+ d.origs[t] = orig
+
+ switch t := t.(type) {
+ case *types2.Pointer:
+ orig := orig.(*types2.Pointer)
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Slice:
+ orig := orig.(*types2.Slice)
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Map:
+ orig := orig.(*types2.Map)
+ d.add(t.Key(), orig.Key())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Array:
+ orig := orig.(*types2.Array)
+ assert(t.Len() == orig.Len())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Chan:
+ orig := orig.(*types2.Chan)
+ assert(t.Dir() == orig.Dir())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Struct:
+ orig := orig.(*types2.Struct)
+ assert(t.NumFields() == orig.NumFields())
+ for i := 0; i < t.NumFields(); i++ {
+ d.add(t.Field(i).Type(), orig.Field(i).Type())
+ }
+
+ case *types2.Interface:
+ orig := orig.(*types2.Interface)
+ assert(t.NumExplicitMethods() == orig.NumExplicitMethods())
+ assert(t.NumEmbeddeds() == orig.NumEmbeddeds())
+ for i := 0; i < t.NumExplicitMethods(); i++ {
+ d.add(t.ExplicitMethod(i).Type(), orig.ExplicitMethod(i).Type())
+ }
+ for i := 0; i < t.NumEmbeddeds(); i++ {
+ d.add(t.EmbeddedType(i), orig.EmbeddedType(i))
+ }
+
+ case *types2.Signature:
+ orig := orig.(*types2.Signature)
+ assert((t.Recv() == nil) == (orig.Recv() == nil))
+ if t.Recv() != nil {
+ d.add(t.Recv().Type(), orig.Recv().Type())
+ }
+ d.add(t.Params(), orig.Params())
+ d.add(t.Results(), orig.Results())
+
+ case *types2.Tuple:
+ orig := orig.(*types2.Tuple)
+ assert(t.Len() == orig.Len())
+ for i := 0; i < t.Len(); i++ {
+ d.add(t.At(i).Type(), orig.At(i).Type())
+ }
+
+ default:
+ assert(types2.Identical(t, orig))
+ }
+}
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
new file mode 100644
index 0000000000..5481812b18
--- /dev/null
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -0,0 +1,2390 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "go/constant"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/deadcode"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// TODO(mdempsky): Suppress duplicate type/const errors that can arise
+// during typecheck due to naive type substitution (e.g., see #42758).
+// I anticipate these will be handled as a consequence of adding
+// dictionaries support, so it's probably not important to focus on
+// this until after that's done.
+
+type pkgReader struct {
+ pkgDecoder
+
+ posBases []*src.PosBase
+ pkgs []*types.Pkg
+ typs []*types.Type
+
+ // offset for rewriting the given index into the output,
+ // but bitwise inverted so we can detect if we're missing the entry or not.
+ newindex []int
+}
+
+func newPkgReader(pr pkgDecoder) *pkgReader {
+ return &pkgReader{
+ pkgDecoder: pr,
+
+ posBases: make([]*src.PosBase, pr.numElems(relocPosBase)),
+ pkgs: make([]*types.Pkg, pr.numElems(relocPkg)),
+ typs: make([]*types.Type, pr.numElems(relocType)),
+
+ newindex: make([]int, pr.totalElems()),
+ }
+}
+
+type pkgReaderIndex struct {
+ pr *pkgReader
+ idx int
+ dict *readerDict
+}
+
+func (pri pkgReaderIndex) asReader(k reloc, marker syncMarker) *reader {
+ r := pri.pr.newReader(k, pri.idx, marker)
+ r.dict = pri.dict
+ return r
+}
+
+func (pr *pkgReader) newReader(k reloc, idx int, marker syncMarker) *reader {
+ return &reader{
+ decoder: pr.newDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+type reader struct {
+ decoder
+
+ p *pkgReader
+
+ ext *reader
+
+ dict *readerDict
+
+ // TODO(mdempsky): The state below is all specific to reading
+ // function bodies. It probably makes sense to split it out
+ // separately so that it doesn't take up space in every reader
+ // instance.
+
+ curfn *ir.Func
+ locals []*ir.Name
+ closureVars []*ir.Name
+
+ funarghack bool
+
+ // scopeVars is a stack tracking the number of variables declared in
+ // the current function at the moment each open scope was opened.
+ scopeVars []int
+ marker dwarfgen.ScopeMarker
+ lastCloseScopePos src.XPos
+
+ // === details for handling inline body expansion ===
+
+ // If we're reading in a function body because of inlining, this is
+ // the call that we're inlining for.
+ inlCaller *ir.Func
+ inlCall *ir.CallExpr
+ inlFunc *ir.Func
+ inlTreeIndex int
+ inlPosBases map[*src.PosBase]*src.PosBase
+
+ delayResults bool
+
+ // Label to return to.
+ retlabel *types.Sym
+
+ inlvars, retvars ir.Nodes
+}
+
+type readerDict struct {
+ // targs holds the implicit and explicit type arguments in use for
+ // reading the current object. For example:
+ //
+ // func F[T any]() {
+ // type X[U any] struct { t T; u U }
+ // var _ X[string]
+ // }
+ //
+ // var _ = F[int]
+ //
+ // While instantiating F[int], we need to in turn instantiate
+ // X[string]. [int] and [string] are explicit type arguments for F
+ // and X, respectively; but [int] is also the implicit type
+ // arguments for X.
+ //
+ // (As an analogy to function literals, explicits are the function
+ // literal's formal parameters, while implicits are variables
+ // captured by the function literal.)
+ targs []*types.Type
+
+ // implicits counts how many of types within targs are implicit type
+ // arguments; the rest are explicit.
+ implicits int
+
+ derived []derivedInfo // reloc index of the derived type's descriptor
+ derivedTypes []*types.Type // slice of previously computed derived types
+
+ funcs []objInfo
+ funcsObj []ir.Node
+}
+
+func (r *reader) setType(n ir.Node, typ *types.Type) {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+
+ if name, ok := n.(*ir.Name); ok {
+ name.SetWalkdef(1)
+ name.Ntype = ir.TypeNode(name.Type())
+ }
+}
+
+func (r *reader) setValue(name *ir.Name, val constant.Value) {
+ name.SetVal(val)
+ name.Defn = nil
+}
+
+// @@@ Positions
+
+func (r *reader) pos() src.XPos {
+ return base.Ctxt.PosTable.XPos(r.pos0())
+}
+
+func (r *reader) pos0() src.Pos {
+ r.sync(syncPos)
+ if !r.bool() {
+ return src.NoPos
+ }
+
+ posBase := r.posBase()
+ line := r.uint()
+ col := r.uint()
+ return src.MakePos(posBase, line, col)
+}
+
+func (r *reader) posBase() *src.PosBase {
+ return r.inlPosBase(r.p.posBaseIdx(r.reloc(relocPosBase)))
+}
+
+func (pr *pkgReader) posBaseIdx(idx int) *src.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+
+ r := pr.newReader(relocPosBase, idx, syncPosBase)
+ var b *src.PosBase
+
+ fn := r.string()
+ absfn := r.string()
+
+ if r.bool() {
+ b = src.NewFileBase(fn, absfn)
+ } else {
+ pos := r.pos0()
+ line := r.uint()
+ col := r.uint()
+ b = src.NewLinePragmaBase(pos, fn, absfn, line, col)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+func (r *reader) inlPosBase(oldBase *src.PosBase) *src.PosBase {
+ if r.inlCall == nil {
+ return oldBase
+ }
+
+ if newBase, ok := r.inlPosBases[oldBase]; ok {
+ return newBase
+ }
+
+ newBase := src.NewInliningBase(oldBase, r.inlTreeIndex)
+ r.inlPosBases[oldBase] = newBase
+ return newBase
+}
+
+func (r *reader) updatePos(xpos src.XPos) src.XPos {
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ pos.SetBase(r.inlPosBase(pos.Base()))
+ return base.Ctxt.PosTable.XPos(pos)
+}
+
+func (r *reader) origPos(xpos src.XPos) src.XPos {
+ if r.inlCall == nil {
+ return xpos
+ }
+
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ for old, new := range r.inlPosBases {
+ if pos.Base() == new {
+ pos.SetBase(old)
+ return base.Ctxt.PosTable.XPos(pos)
+ }
+ }
+
+ base.FatalfAt(xpos, "pos base missing from inlPosBases")
+ panic("unreachable")
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Pkg {
+ r.sync(syncPkg)
+ return r.p.pkgIdx(r.reloc(relocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx int) *types.Pkg {
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Pkg {
+ path := r.string()
+ if path == "builtin" {
+ return types.BuiltinPkg
+ }
+ if path == "" {
+ path = r.p.pkgPath
+ }
+
+ name := r.string()
+ height := r.len()
+
+ pkg := types.NewPkg(path, "")
+
+ if pkg.Name == "" {
+ pkg.Name = name
+ } else {
+ assert(pkg.Name == name)
+ }
+
+ if pkg.Height == 0 {
+ pkg.Height = height
+ } else {
+ assert(pkg.Height == height)
+ }
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() *types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.sync(syncType)
+ if r.bool() {
+ return typeInfo{idx: r.len(), derived: true}
+ }
+ return typeInfo{idx: r.reloc(relocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) *types.Type {
+ idx := info.idx
+ var where **types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ r := pr.newReader(relocType, idx, syncTypeIdx)
+ r.dict = dict
+
+ typ := r.doTyp()
+ assert(typ != nil)
+
+ // For recursive type declarations involving interfaces and aliases,
+ // above r.doTyp() call may have already set pr.typs[idx], so just
+ // double check and return the type.
+ //
+ // Example:
+ //
+ // type F = func(I)
+ //
+ // type I interface {
+ // m(F)
+ // }
+ //
+ // The writer writes data types in following index order:
+ //
+ // 0: func(I)
+ // 1: I
+ // 2: interface{m(func(I))}
+ //
+ // The reader resolves it in following index order:
+ //
+ // 0 -> 1 -> 2 -> 0 -> 1
+ //
+ // and can divide in logically 2 steps:
+ //
+ // - 0 -> 1 : first time the reader reach type I,
+ // it creates new named type with symbol I.
+ //
+ // - 2 -> 0 -> 1: the reader ends up reaching symbol I again,
+ // now the symbol I was setup in above step, so
+ // the reader just return the named type.
+ //
+ // Now, the functions called return, the pr.typs looks like below:
+ //
+ // - 0 -> 1 -> 2 -> 0 : [<T> I <T>]
+ // - 0 -> 1 -> 2 : [func(I) I <T>]
+ // - 0 -> 1 : [func(I) I interface { "".m(func("".I)) }]
+ //
+ // The idx 1, corresponding with type I was resolved successfully
+ // after r.doTyp() call.
+
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+
+ if !typ.IsUntyped() {
+ types.CheckSize(typ)
+ }
+
+ return typ
+}
+
+func (r *reader) doTyp() *types.Type {
+ switch tag := codeType(r.code(syncType)); tag {
+ default:
+ panic(fmt.Sprintf("unexpected type: %v", tag))
+
+ case typeBasic:
+ return *basics[r.len()]
+
+ case typeNamed:
+ obj := r.obj()
+ assert(obj.Op() == ir.OTYPE)
+ return obj.Type()
+
+ case typeTypeParam:
+ return r.dict.targs[r.len()]
+
+ case typeArray:
+ len := int64(r.uint64())
+ return types.NewArray(r.typ(), len)
+ case typeChan:
+ dir := dirs[r.len()]
+ return types.NewChan(r.typ(), dir)
+ case typeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case typePointer:
+ return types.NewPtr(r.typ())
+ case typeSignature:
+ return r.signature(types.LocalPkg, nil)
+ case typeSlice:
+ return types.NewSlice(r.typ())
+ case typeStruct:
+ return r.structType()
+ case typeInterface:
+ return r.interfaceType()
+ }
+}
+
+func (r *reader) interfaceType() *types.Type {
+ tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
+
+ nmethods, nembeddeds := r.len(), r.len()
+
+ fields := make([]*types.Field, nmethods+nembeddeds)
+ methods, embeddeds := fields[:nmethods], fields[nmethods:]
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, sym := r.selector()
+ tpkg = pkg
+ mtyp := r.signature(pkg, typecheck.FakeRecv())
+ methods[i] = types.NewField(pos, sym, mtyp)
+ }
+ for i := range embeddeds {
+ embeddeds[i] = types.NewField(src.NoXPos, nil, r.typ())
+ }
+
+ if len(fields) == 0 {
+ return types.Types[types.TINTER] // empty interface
+ }
+ return r.needWrapper(types.NewInterface(tpkg, fields))
+}
+
+func (r *reader) structType() *types.Type {
+ tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
+ fields := make([]*types.Field, r.len())
+ for i := range fields {
+ pos := r.pos()
+ pkg, sym := r.selector()
+ tpkg = pkg
+ ftyp := r.typ()
+ tag := r.string()
+ embedded := r.bool()
+
+ f := types.NewField(pos, sym, ftyp)
+ f.Note = tag
+ if embedded {
+ f.Embedded = 1
+ }
+ fields[i] = f
+ }
+ return r.needWrapper(types.NewStruct(tpkg, fields))
+}
+
+func (r *reader) signature(tpkg *types.Pkg, recv *types.Field) *types.Type {
+ r.sync(syncSignature)
+
+ params := r.params(&tpkg)
+ results := r.params(&tpkg)
+ if r.bool() { // variadic
+ params[len(params)-1].SetIsDDD(true)
+ }
+
+ return types.NewSignature(tpkg, recv, nil, params, results)
+}
+
+func (r *reader) params(tpkg **types.Pkg) []*types.Field {
+ r.sync(syncParams)
+ fields := make([]*types.Field, r.len())
+ for i := range fields {
+ *tpkg, fields[i] = r.param()
+ }
+ return fields
+}
+
+func (r *reader) param() (*types.Pkg, *types.Field) {
+ r.sync(syncParam)
+
+ pos := r.pos()
+ pkg, sym := r.localIdent()
+ typ := r.typ()
+
+ return pkg, types.NewField(pos, sym, typ)
+}
+
+// @@@ Objects
+
+var objReader = map[*types.Sym]pkgReaderIndex{}
+
+func (r *reader) obj() ir.Node {
+ r.sync(syncObject)
+
+ if r.bool() {
+ idx := r.len()
+ obj := r.dict.funcsObj[idx]
+ if obj == nil {
+ fn := r.dict.funcs[idx]
+ targs := make([]*types.Type, len(fn.explicits))
+ for i, targ := range fn.explicits {
+ targs[i] = r.p.typIdx(targ, r.dict)
+ }
+
+ obj = r.p.objIdx(fn.idx, nil, targs)
+ assert(r.dict.funcsObj[idx] == nil)
+ r.dict.funcsObj[idx] = obj
+ }
+ return obj
+ }
+
+ idx := r.reloc(relocObj)
+
+ explicits := make([]*types.Type, r.len())
+ for i := range explicits {
+ explicits[i] = r.typ()
+ }
+
+ var implicits []*types.Type
+ if r.dict != nil {
+ implicits = r.dict.targs
+ }
+
+ return r.p.objIdx(idx, implicits, explicits)
+}
+
+func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node {
+ rname := pr.newReader(relocName, idx, syncObject1)
+ _, sym := rname.qualifiedIdent()
+ tag := codeObj(rname.code(syncCodeObj))
+
+ if tag == objStub {
+ assert(!sym.IsBlank())
+ switch sym.Pkg {
+ case types.BuiltinPkg, ir.Pkgs.Unsafe:
+ return sym.Def.(ir.Node)
+ }
+ if pri, ok := objReader[sym]; ok {
+ return pri.pr.objIdx(pri.idx, nil, explicits)
+ }
+ if haveLegacyImports {
+ assert(len(explicits) == 0)
+ return typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ }
+ base.Fatalf("unresolved stub: %v", sym)
+ }
+
+ dict := pr.objDictIdx(sym, idx, implicits, explicits)
+
+ r := pr.newReader(relocObj, idx, syncObject1)
+ r.ext = pr.newReader(relocObjExt, idx, syncObject1)
+
+ r.dict = dict
+ r.ext.dict = dict
+
+ sym = r.mangle(sym)
+ if !sym.IsBlank() && sym.Def != nil {
+ return sym.Def.(*ir.Name)
+ }
+
+ do := func(op ir.Op, hasTParams bool) *ir.Name {
+ pos := r.pos()
+ if hasTParams {
+ r.typeParamNames()
+ }
+
+ name := ir.NewDeclNameAt(pos, op, sym)
+ name.Class = ir.PEXTERN // may be overridden later
+ if !sym.IsBlank() {
+ if sym.Def != nil {
+ base.FatalfAt(name.Pos(), "already have a definition for %v", name)
+ }
+ assert(sym.Def == nil)
+ sym.Def = name
+ }
+ return name
+ }
+
+ switch tag {
+ default:
+ panic("unexpected object")
+
+ case objAlias:
+ name := do(ir.OTYPE, false)
+ r.setType(name, r.typ())
+ name.SetAlias(true)
+ return name
+
+ case objConst:
+ name := do(ir.OLITERAL, false)
+ typ, val := r.value()
+ r.setType(name, typ)
+ r.setValue(name, val)
+ return name
+
+ case objFunc:
+ if sym.Name == "init" {
+ sym = renameinit()
+ }
+ name := do(ir.ONAME, true)
+ r.setType(name, r.signature(sym.Pkg, nil))
+
+ name.Func = ir.NewFunc(r.pos())
+ name.Func.Nname = name
+
+ r.ext.funcExt(name)
+ return name
+
+ case objType:
+ name := do(ir.OTYPE, true)
+ typ := types.NewNamed(name)
+ r.setType(name, typ)
+
+ // Important: We need to do this before SetUnderlying.
+ r.ext.typeExt(name)
+
+ // We need to defer CheckSize until we've called SetUnderlying to
+ // handle recursive types.
+ types.DeferCheckSize()
+ typ.SetUnderlying(r.typ())
+ types.ResumeCheckSize()
+
+ methods := make([]*types.Field, r.len())
+ for i := range methods {
+ methods[i] = r.method()
+ }
+ if len(methods) != 0 {
+ typ.Methods().Set(methods)
+ }
+
+ if !typ.IsPtr() {
+ r.needWrapper(typ)
+ }
+
+ return name
+
+ case objVar:
+ name := do(ir.ONAME, false)
+ r.setType(name, r.typ())
+ r.ext.varExt(name)
+ return name
+ }
+}
+
+func (r *reader) mangle(sym *types.Sym) *types.Sym {
+ if !r.hasTypeParams() {
+ return sym
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString(sym.Name)
+ buf.WriteByte('[')
+ for i, targ := range r.dict.targs {
+ if i > 0 {
+ if i == r.dict.implicits {
+ buf.WriteByte(';')
+ } else {
+ buf.WriteByte(',')
+ }
+ }
+ buf.WriteString(targ.LinkString())
+ }
+ buf.WriteByte(']')
+ return sym.Pkg.Lookup(buf.String())
+}
+
+func (pr *pkgReader) objDictIdx(sym *types.Sym, idx int, implicits, explicits []*types.Type) *readerDict {
+ r := pr.newReader(relocObjDict, idx, syncObject1)
+
+ var dict readerDict
+
+ nimplicits := r.len()
+ nexplicits := r.len()
+
+ if nimplicits > len(implicits) || nexplicits != len(explicits) {
+ base.Fatalf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
+ }
+
+ dict.targs = append(implicits[:nimplicits:nimplicits], explicits...)
+ dict.implicits = nimplicits
+
+ // For stenciling, we can just skip over the type parameters.
+ for range dict.targs[dict.implicits:] {
+ // Skip past bounds without actually evaluating them.
+ r.sync(syncType)
+ if r.bool() {
+ r.len()
+ } else {
+ r.reloc(relocType)
+ }
+ }
+
+ dict.derived = make([]derivedInfo, r.len())
+ dict.derivedTypes = make([]*types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.reloc(relocType), r.bool()}
+ }
+
+ dict.funcs = make([]objInfo, r.len())
+ dict.funcsObj = make([]ir.Node, len(dict.funcs))
+ for i := range dict.funcs {
+ objIdx := r.reloc(relocObj)
+ targs := make([]typeInfo, r.len())
+ for j := range targs {
+ targs[j] = r.typInfo()
+ }
+ dict.funcs[i] = objInfo{idx: objIdx, explicits: targs}
+ }
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() {
+ r.sync(syncTypeParamNames)
+
+ for range r.dict.targs[r.dict.implicits:] {
+ r.pos()
+ r.localIdent()
+ }
+}
+
+func (r *reader) value() (*types.Type, constant.Value) {
+ r.sync(syncValue)
+ typ := r.typ()
+ return typ, FixValue(typ, r.rawValue())
+}
+
+func (r *reader) method() *types.Field {
+ r.sync(syncMethod)
+ pos := r.pos()
+ pkg, sym := r.selector()
+ r.typeParamNames()
+ _, recv := r.param()
+ typ := r.signature(pkg, recv)
+
+ fnsym := sym
+ fnsym = ir.MethodSym(recv.Type, fnsym)
+ name := ir.NewNameAt(pos, fnsym)
+ r.setType(name, typ)
+
+ name.Func = ir.NewFunc(r.pos())
+ name.Func.Nname = name
+
+ r.ext.funcExt(name)
+
+ meth := types.NewField(name.Func.Pos(), sym, typ)
+ meth.Nname = name
+ meth.SetNointerface(name.Func.Pragma&ir.Nointerface != 0)
+
+ return meth
+}
+
+func (r *reader) qualifiedIdent() (pkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncSym)
+ pkg = r.pkg()
+ if name := r.string(); name != "" {
+ sym = pkg.Lookup(name)
+ }
+ return
+}
+
+func (r *reader) localIdent() (pkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncLocalIdent)
+ pkg = r.pkg()
+ if name := r.string(); name != "" {
+ sym = pkg.Lookup(name)
+ }
+ return
+}
+
+func (r *reader) selector() (origPkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncSelector)
+ origPkg = r.pkg()
+ name := r.string()
+ pkg := origPkg
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ sym = pkg.Lookup(name)
+ return
+}
+
+func (r *reader) hasTypeParams() bool {
+ return r.dict.hasTypeParams()
+}
+
+func (dict *readerDict) hasTypeParams() bool {
+ return dict != nil && len(dict.targs) != 0
+}
+
+// @@@ Compiler extensions
+
+func (r *reader) funcExt(name *ir.Name) {
+ r.sync(syncFuncExt)
+
+ name.Class = 0 // so MarkFunc doesn't complain
+ ir.MarkFunc(name)
+
+ fn := name.Func
+
+ // XXX: Workaround because linker doesn't know how to copy Pos.
+ if !fn.Pos().IsKnown() {
+ fn.SetPos(name.Pos())
+ }
+
+ // Normally, we only compile local functions, which saves redundant compilation work.
+ // n.Defn is not nil for local functions, and is nil for imported function. But for
+ // generic functions, we might have an instantiation that no other package has seen before.
+ // So we need to be conservative and compile it again.
+ //
+ // That's why name.Defn is set here, so ir.VisitFuncsBottomUp can analyze function.
+ // TODO(mdempsky,cuonglm): find a cleaner way to handle this.
+ if name.Sym().Pkg == types.LocalPkg || r.hasTypeParams() {
+ name.Defn = fn
+ }
+
+ fn.Pragma = r.pragmaFlag()
+ r.linkname(name)
+
+ typecheck.Func(fn)
+
+ if r.bool() {
+ fn.ABI = obj.ABI(r.uint64())
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(name.Type()).FieldSlice() {
+ f.Note = r.string()
+ }
+ }
+
+ if r.bool() {
+ fn.Inl = &ir.Inline{
+ Cost: int32(r.len()),
+ CanDelayResults: r.bool(),
+ }
+ r.addBody(name.Func)
+ }
+ } else {
+ r.addBody(name.Func)
+ }
+ r.sync(syncEOF)
+}
+
+func (r *reader) typeExt(name *ir.Name) {
+ r.sync(syncTypeExt)
+
+ typ := name.Type()
+
+ if r.hasTypeParams() {
+ // Set "RParams" (really type arguments here, not parameters) so
+ // this type is treated as "fully instantiated". This ensures the
+ // type descriptor is written out as DUPOK and method wrappers are
+ // generated even for imported types.
+ var targs []*types.Type
+ targs = append(targs, r.dict.targs...)
+ typ.SetRParams(targs)
+ }
+
+ name.SetPragma(r.pragmaFlag())
+ if name.Pragma()&ir.NotInHeap != 0 {
+ typ.SetNotInHeap(true)
+ }
+
+ typecheck.SetBaseTypeIndex(typ, r.int64(), r.int64())
+}
+
+func (r *reader) varExt(name *ir.Name) {
+ r.sync(syncVarExt)
+ r.linkname(name)
+}
+
+func (r *reader) linkname(name *ir.Name) {
+ assert(name.Op() == ir.ONAME)
+ r.sync(syncLinkname)
+
+ if idx := r.int64(); idx >= 0 {
+ lsym := name.Linksym()
+ lsym.SymIdx = int32(idx)
+ lsym.Set(obj.AttrIndexed, true)
+ } else {
+ name.Sym().Linkname = r.string()
+ }
+}
+
+func (r *reader) pragmaFlag() ir.PragmaFlag {
+ r.sync(syncPragma)
+ return ir.PragmaFlag(r.int())
+}
+
+// @@@ Function bodies
+
+// bodyReader tracks where the serialized IR for a function's body can
+// be found.
+var bodyReader = map[*ir.Func]pkgReaderIndex{}
+
+// todoBodies holds the list of function bodies that still need to be
+// constructed.
+var todoBodies []*ir.Func
+
+func (r *reader) addBody(fn *ir.Func) {
+ pri := pkgReaderIndex{r.p, r.reloc(relocBody), r.dict}
+ bodyReader[fn] = pri
+
+ if r.curfn == nil {
+ todoBodies = append(todoBodies, fn)
+ return
+ }
+
+ pri.funcBody(fn)
+}
+
+func (pri pkgReaderIndex) funcBody(fn *ir.Func) {
+ r := pri.asReader(relocBody, syncFuncBody)
+ r.funcBody(fn)
+}
+
+func (r *reader) funcBody(fn *ir.Func) {
+ r.curfn = fn
+ r.closureVars = fn.ClosureVars
+
+ ir.WithFunc(fn, func() {
+ r.funcargs(fn)
+
+ if !r.bool() {
+ return
+ }
+
+ body := r.stmts()
+ if body == nil {
+ pos := src.NoXPos
+ if quirksMode() {
+ pos = funcParamsEndPos(fn)
+ }
+ body = []ir.Node{typecheck.Stmt(ir.NewBlockStmt(pos, nil))}
+ }
+ fn.Body = body
+ fn.Endlineno = r.pos()
+ })
+
+ r.marker.WriteTo(fn)
+}
+
+func (r *reader) funcargs(fn *ir.Func) {
+ sig := fn.Nname.Type()
+
+ if recv := sig.Recv(); recv != nil {
+ r.funcarg(recv, recv.Sym, ir.PPARAM)
+ }
+ for _, param := range sig.Params().FieldSlice() {
+ r.funcarg(param, param.Sym, ir.PPARAM)
+ }
+
+ for i, param := range sig.Results().FieldSlice() {
+ sym := types.OrigSym(param.Sym)
+
+ if sym == nil || sym.IsBlank() {
+ prefix := "~r"
+ if r.inlCall != nil {
+ prefix = "~R"
+ } else if sym != nil {
+ prefix = "~b"
+ }
+ sym = typecheck.LookupNum(prefix, i)
+ }
+
+ r.funcarg(param, sym, ir.PPARAMOUT)
+ }
+}
+
+func (r *reader) funcarg(param *types.Field, sym *types.Sym, ctxt ir.Class) {
+ if sym == nil {
+ assert(ctxt == ir.PPARAM)
+ if r.inlCall != nil {
+ r.inlvars.Append(ir.BlankNode)
+ }
+ return
+ }
+
+ name := ir.NewNameAt(r.updatePos(param.Pos), sym)
+ r.setType(name, param.Type)
+ r.addLocal(name, ctxt)
+
+ if r.inlCall == nil {
+ if !r.funarghack {
+ param.Sym = sym
+ param.Nname = name
+ }
+ } else {
+ if ctxt == ir.PPARAMOUT {
+ r.retvars.Append(name)
+ } else {
+ r.inlvars.Append(name)
+ }
+ }
+}
+
+func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) {
+ assert(ctxt == ir.PAUTO || ctxt == ir.PPARAM || ctxt == ir.PPARAMOUT)
+
+ r.sync(syncAddLocal)
+ if enableSync {
+ want := r.int()
+ if have := len(r.locals); have != want {
+ base.FatalfAt(name.Pos(), "locals table has desynced")
+ }
+ }
+
+ name.SetUsed(true)
+ r.locals = append(r.locals, name)
+
+ // TODO(mdempsky): Move earlier.
+ if ir.IsBlank(name) {
+ return
+ }
+
+ if r.inlCall != nil {
+ if ctxt == ir.PAUTO {
+ name.SetInlLocal(true)
+ } else {
+ name.SetInlFormal(true)
+ ctxt = ir.PAUTO
+ }
+
+ // TODO(mdempsky): Rethink this hack.
+ if strings.HasPrefix(name.Sym().Name, "~") || base.Flag.GenDwarfInl == 0 {
+ name.SetPos(r.inlCall.Pos())
+ name.SetInlFormal(false)
+ name.SetInlLocal(false)
+ }
+ }
+
+ name.Class = ctxt
+ name.Curfn = r.curfn
+
+ r.curfn.Dcl = append(r.curfn.Dcl, name)
+
+ if ctxt == ir.PAUTO {
+ name.SetFrameOffset(0)
+ }
+}
+
+func (r *reader) useLocal() *ir.Name {
+ r.sync(syncUseObjLocal)
+ if r.bool() {
+ return r.locals[r.len()]
+ }
+ return r.closureVars[r.len()]
+}
+
+func (r *reader) openScope() {
+ r.sync(syncOpenScope)
+ pos := r.pos()
+
+ if base.Flag.Dwarf {
+ r.scopeVars = append(r.scopeVars, len(r.curfn.Dcl))
+ r.marker.Push(pos)
+ }
+}
+
+func (r *reader) closeScope() {
+ r.sync(syncCloseScope)
+ r.lastCloseScopePos = r.pos()
+
+ r.closeAnotherScope()
+}
+
+// closeAnotherScope is like closeScope, but it reuses the same mark
+// position as the last closeScope call. This is useful for "for" and
+// "if" statements, as their implicit blocks always end at the same
+// position as an explicit block.
+func (r *reader) closeAnotherScope() {
+ r.sync(syncCloseAnotherScope)
+
+ if base.Flag.Dwarf {
+ scopeVars := r.scopeVars[len(r.scopeVars)-1]
+ r.scopeVars = r.scopeVars[:len(r.scopeVars)-1]
+
+ // Quirkish: noder decides which scopes to keep before
+ // typechecking, whereas incremental typechecking during IR
+ // construction can result in new autotemps being allocated. To
+ // produce identical output, we ignore autotemps here for the
+ // purpose of deciding whether to retract the scope.
+ //
+ // This is important for net/http/fcgi, because it contains:
+ //
+ // var body io.ReadCloser
+ // if len(content) > 0 {
+ // body, req.pw = io.Pipe()
+ // } else { … }
+ //
+ // Notably, io.Pipe is inlinable, and inlining it introduces a ~R0
+ // variable at the call site.
+ //
+ // Noder does not preserve the scope where the io.Pipe() call
+ // resides, because it doesn't contain any declared variables in
+ // source. So the ~R0 variable ends up being assigned to the
+ // enclosing scope instead.
+ //
+ // However, typechecking this assignment also introduces
+ // autotemps, because io.Pipe's results need conversion before
+ // they can be assigned to their respective destination variables.
+ //
+ // TODO(mdempsky): We should probably just keep all scopes, and
+ // let dwarfgen take care of pruning them instead.
+ retract := true
+ for _, n := range r.curfn.Dcl[scopeVars:] {
+ if !n.AutoTemp() {
+ retract = false
+ break
+ }
+ }
+
+ if retract {
+ // no variables were declared in this scope, so we can retract it.
+ r.marker.Unpush()
+ } else {
+ r.marker.Pop(r.lastCloseScopePos)
+ }
+ }
+}
+
+// @@@ Statements
+
+func (r *reader) stmt() ir.Node {
+ switch stmts := r.stmts(); len(stmts) {
+ case 0:
+ return nil
+ case 1:
+ return stmts[0]
+ default:
+ return ir.NewBlockStmt(stmts[0].Pos(), stmts)
+ }
+}
+
+func (r *reader) stmts() []ir.Node {
+ assert(ir.CurFunc == r.curfn)
+ var res ir.Nodes
+
+ r.sync(syncStmts)
+ for {
+ tag := codeStmt(r.code(syncStmt1))
+ if tag == stmtEnd {
+ r.sync(syncStmtsEnd)
+ return res
+ }
+
+ if n := r.stmt1(tag, &res); n != nil {
+ res.Append(typecheck.Stmt(n))
+ }
+ }
+}
+
+func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node {
+ var label *types.Sym
+ if n := len(*out); n > 0 {
+ if ls, ok := (*out)[n-1].(*ir.LabelStmt); ok {
+ label = ls.Label
+ }
+ }
+
+ switch tag {
+ default:
+ panic("unexpected statement")
+
+ case stmtAssign:
+ pos := r.pos()
+
+ // TODO(mdempsky): After quirks mode is gone, swap these
+ // statements so we visit LHS before RHS again.
+ rhs := r.exprList()
+ names, lhs := r.assignList()
+
+ if len(rhs) == 0 {
+ for _, name := range names {
+ as := ir.NewAssignStmt(pos, name, nil)
+ as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, name))
+ out.Append(typecheck.Stmt(as))
+ }
+ return nil
+ }
+
+ if len(lhs) == 1 && len(rhs) == 1 {
+ n := ir.NewAssignStmt(pos, lhs[0], rhs[0])
+ n.Def = r.initDefn(n, names)
+ return n
+ }
+
+ n := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
+ n.Def = r.initDefn(n, names)
+ return n
+
+ case stmtAssignOp:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ rhs := r.expr()
+ return ir.NewAssignOpStmt(pos, op, lhs, rhs)
+
+ case stmtIncDec:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewBasicLit(pos, one))
+ n.IncDec = true
+ return n
+
+ case stmtBlock:
+ out.Append(r.blockStmt()...)
+ return nil
+
+ case stmtBranch:
+ pos := r.pos()
+ op := r.op()
+ sym := r.optLabel()
+ return ir.NewBranchStmt(pos, op, sym)
+
+ case stmtCall:
+ pos := r.pos()
+ op := r.op()
+ call := r.expr()
+ return ir.NewGoDeferStmt(pos, op, call)
+
+ case stmtExpr:
+ return r.expr()
+
+ case stmtFor:
+ return r.forStmt(label)
+
+ case stmtIf:
+ return r.ifStmt()
+
+ case stmtLabel:
+ pos := r.pos()
+ sym := r.label()
+ return ir.NewLabelStmt(pos, sym)
+
+ case stmtReturn:
+ pos := r.pos()
+ results := r.exprList()
+ return ir.NewReturnStmt(pos, results)
+
+ case stmtSelect:
+ return r.selectStmt(label)
+
+ case stmtSend:
+ pos := r.pos()
+ ch := r.expr()
+ value := r.expr()
+ return ir.NewSendStmt(pos, ch, value)
+
+ case stmtSwitch:
+ return r.switchStmt(label)
+
+ case stmtTypeDeclHack:
+ // fake "type _ = int" declaration to prevent inlining in quirks mode.
+ assert(quirksMode())
+
+ name := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.BlankNode.Sym())
+ name.SetAlias(true)
+ r.setType(name, types.Types[types.TINT])
+
+ n := ir.NewDecl(src.NoXPos, ir.ODCLTYPE, name)
+ n.SetTypecheck(1)
+ return n
+ }
+}
+
+func (r *reader) assignList() ([]*ir.Name, []ir.Node) {
+ lhs := make([]ir.Node, r.len())
+ var names []*ir.Name
+
+ for i := range lhs {
+ if r.bool() {
+ pos := r.pos()
+ _, sym := r.localIdent()
+ typ := r.typ()
+
+ name := ir.NewNameAt(pos, sym)
+ lhs[i] = name
+ names = append(names, name)
+ r.setType(name, typ)
+ r.addLocal(name, ir.PAUTO)
+ continue
+ }
+
+ lhs[i] = r.expr()
+ }
+
+ return names, lhs
+}
+
+func (r *reader) blockStmt() []ir.Node {
+ r.sync(syncBlockStmt)
+ r.openScope()
+ stmts := r.stmts()
+ r.closeScope()
+ return stmts
+}
+
+func (r *reader) forStmt(label *types.Sym) ir.Node {
+ r.sync(syncForStmt)
+
+ r.openScope()
+
+ if r.bool() {
+ pos := r.pos()
+
+ // TODO(mdempsky): After quirks mode is gone, swap these
+ // statements so we read LHS before X again.
+ x := r.expr()
+ names, lhs := r.assignList()
+
+ body := r.blockStmt()
+ r.closeAnotherScope()
+
+ rang := ir.NewRangeStmt(pos, nil, nil, x, body)
+ if len(lhs) >= 1 {
+ rang.Key = lhs[0]
+ if len(lhs) >= 2 {
+ rang.Value = lhs[1]
+ }
+ }
+ rang.Def = r.initDefn(rang, names)
+ rang.Label = label
+ return rang
+ }
+
+ pos := r.pos()
+ init := r.stmt()
+ cond := r.expr()
+ post := r.stmt()
+ body := r.blockStmt()
+ r.closeAnotherScope()
+
+ stmt := ir.NewForStmt(pos, init, cond, post, body)
+ stmt.Label = label
+ return stmt
+}
+
+func (r *reader) ifStmt() ir.Node {
+ r.sync(syncIfStmt)
+ r.openScope()
+ pos := r.pos()
+ init := r.stmts()
+ cond := r.expr()
+ then := r.blockStmt()
+ els := r.stmts()
+ n := ir.NewIfStmt(pos, cond, then, els)
+ n.SetInit(init)
+ r.closeAnotherScope()
+ return n
+}
+
+func (r *reader) selectStmt(label *types.Sym) ir.Node {
+ r.sync(syncSelectStmt)
+
+ pos := r.pos()
+ clauses := make([]*ir.CommClause, r.len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ comm := r.stmt()
+ body := r.stmts()
+
+ clauses[i] = ir.NewCommStmt(pos, comm, body)
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ n := ir.NewSelectStmt(pos, clauses)
+ n.Label = label
+ return n
+}
+
+func (r *reader) switchStmt(label *types.Sym) ir.Node {
+ r.sync(syncSwitchStmt)
+
+ r.openScope()
+ pos := r.pos()
+ init := r.stmt()
+
+ var tag ir.Node
+ if r.bool() {
+ pos := r.pos()
+ var ident *ir.Ident
+ if r.bool() {
+ pos := r.pos()
+ sym := typecheck.Lookup(r.string())
+ ident = ir.NewIdent(pos, sym)
+ }
+ x := r.expr()
+ tag = ir.NewTypeSwitchGuard(pos, ident, x)
+ } else {
+ tag = r.expr()
+ }
+
+ tswitch, ok := tag.(*ir.TypeSwitchGuard)
+ if ok && tswitch.Tag == nil {
+ tswitch = nil
+ }
+
+ clauses := make([]*ir.CaseClause, r.len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ cases := r.exprList()
+
+ clause := ir.NewCaseStmt(pos, cases, nil)
+ if tswitch != nil {
+ pos := r.pos()
+ typ := r.typ()
+
+ name := ir.NewNameAt(pos, tswitch.Tag.Sym())
+ r.setType(name, typ)
+ r.addLocal(name, ir.PAUTO)
+ clause.Var = name
+ name.Defn = tswitch
+ }
+
+ clause.Body = r.stmts()
+ clauses[i] = clause
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ r.closeScope()
+
+ n := ir.NewSwitchStmt(pos, tag, clauses)
+ n.Label = label
+ if init != nil {
+ n.SetInit([]ir.Node{init})
+ }
+ return n
+}
+
+func (r *reader) label() *types.Sym {
+ r.sync(syncLabel)
+ name := r.string()
+ if r.inlCall != nil {
+ name = fmt.Sprintf("~%s·%d", name, inlgen)
+ }
+ return typecheck.Lookup(name)
+}
+
+func (r *reader) optLabel() *types.Sym {
+ r.sync(syncOptLabel)
+ if r.bool() {
+ return r.label()
+ }
+ return nil
+}
+
+// initDefn marks the given names as declared by defn and populates
+// its Init field with ODCL nodes. It then reports whether any names
+// were so declared, which can be used to initialize defn.Def.
+func (r *reader) initDefn(defn ir.InitNode, names []*ir.Name) bool {
+ if len(names) == 0 {
+ return false
+ }
+
+ init := make([]ir.Node, len(names))
+ for i, name := range names {
+ name.Defn = defn
+ init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name)
+ }
+ defn.SetInit(init)
+ return true
+}
+
+// @@@ Expressions
+
+// expr reads and returns a typechecked expression.
+func (r *reader) expr() (res ir.Node) {
+ defer func() {
+ if res != nil && res.Typecheck() == 0 {
+ base.FatalfAt(res.Pos(), "%v missed typecheck", res)
+ }
+ }()
+
+ switch tag := codeExpr(r.code(syncExpr)); tag {
+ default:
+ panic("unhandled expression")
+
+ case exprNone:
+ return nil
+
+ case exprBlank:
+ // blank only allowed in LHS of assignments
+ // TODO(mdempsky): Handle directly in assignList instead?
+ return typecheck.AssignExpr(ir.BlankNode)
+
+ case exprLocal:
+ return typecheck.Expr(r.useLocal())
+
+ case exprName:
+ // Callee instead of Expr allows builtins
+ // TODO(mdempsky): Handle builtins directly in exprCall, like method calls?
+ return typecheck.Callee(r.obj())
+
+ case exprType:
+ // TODO(mdempsky): ir.TypeNode should probably return a typecheck'd node.
+ n := ir.TypeNode(r.typ())
+ n.SetTypecheck(1)
+ return n
+
+ case exprConst:
+ pos := r.pos()
+ typ, val := r.value()
+ op := r.op()
+ orig := r.string()
+ return typecheck.Expr(OrigConst(pos, typ, val, op, orig))
+
+ case exprCompLit:
+ return r.compLit()
+
+ case exprFuncLit:
+ return r.funcLit()
+
+ case exprSelector:
+ x := r.expr()
+ pos := r.pos()
+ _, sym := r.selector()
+ return typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym))
+
+ case exprIndex:
+ x := r.expr()
+ pos := r.pos()
+ index := r.expr()
+ return typecheck.Expr(ir.NewIndexExpr(pos, x, index))
+
+ case exprSlice:
+ x := r.expr()
+ pos := r.pos()
+ var index [3]ir.Node
+ for i := range index {
+ index[i] = r.expr()
+ }
+ op := ir.OSLICE
+ if index[2] != nil {
+ op = ir.OSLICE3
+ }
+ return typecheck.Expr(ir.NewSliceExpr(pos, op, x, index[0], index[1], index[2]))
+
+ case exprAssert:
+ x := r.expr()
+ pos := r.pos()
+ typ := r.expr().(ir.Ntype)
+ return typecheck.Expr(ir.NewTypeAssertExpr(pos, x, typ))
+
+ case exprUnaryOp:
+ op := r.op()
+ pos := r.pos()
+ x := r.expr()
+
+ switch op {
+ case ir.OADDR:
+ return typecheck.Expr(typecheck.NodAddrAt(pos, x))
+ case ir.ODEREF:
+ return typecheck.Expr(ir.NewStarExpr(pos, x))
+ }
+ return typecheck.Expr(ir.NewUnaryExpr(pos, op, x))
+
+ case exprBinaryOp:
+ op := r.op()
+ x := r.expr()
+ pos := r.pos()
+ y := r.expr()
+
+ switch op {
+ case ir.OANDAND, ir.OOROR:
+ return typecheck.Expr(ir.NewLogicalExpr(pos, op, x, y))
+ }
+ return typecheck.Expr(ir.NewBinaryExpr(pos, op, x, y))
+
+ case exprCall:
+ fun := r.expr()
+ if r.bool() { // method call
+ pos := r.pos()
+ _, sym := r.selector()
+ fun = typecheck.Callee(ir.NewSelectorExpr(pos, ir.OXDOT, fun, sym))
+ }
+ pos := r.pos()
+ args := r.exprs()
+ dots := r.bool()
+ return typecheck.Call(pos, fun, args, dots)
+
+ case exprConvert:
+ typ := r.typ()
+ pos := r.pos()
+ x := r.expr()
+ return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, typ, x))
+ }
+}
+
+func (r *reader) compLit() ir.Node {
+ r.sync(syncCompLit)
+ pos := r.pos()
+ typ0 := r.typ()
+
+ typ := typ0
+ if typ.IsPtr() {
+ typ = typ.Elem()
+ }
+ if typ.Kind() == types.TFORW {
+ base.FatalfAt(pos, "unresolved composite literal type: %v", typ)
+ }
+ isStruct := typ.Kind() == types.TSTRUCT
+
+ elems := make([]ir.Node, r.len())
+ for i := range elems {
+ elemp := &elems[i]
+
+ if isStruct {
+ sk := ir.NewStructKeyExpr(r.pos(), typ.Field(r.len()), nil)
+ *elemp, elemp = sk, &sk.Value
+ } else if r.bool() {
+ kv := ir.NewKeyExpr(r.pos(), r.expr(), nil)
+ *elemp, elemp = kv, &kv.Value
+ }
+
+ *elemp = wrapName(r.pos(), r.expr())
+ }
+
+ lit := typecheck.Expr(ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(typ), elems))
+ if typ0.IsPtr() {
+ lit = typecheck.Expr(typecheck.NodAddrAt(pos, lit))
+ lit.SetType(typ0)
+ }
+ return lit
+}
+
+func wrapName(pos src.XPos, x ir.Node) ir.Node {
+ // These nodes do not carry line numbers.
+ // Introduce a wrapper node to give them the correct line.
+ switch ir.Orig(x).Op() {
+ case ir.OTYPE, ir.OLITERAL:
+ if x.Sym() == nil {
+ break
+ }
+ fallthrough
+ case ir.ONAME, ir.ONONAME, ir.OPACK, ir.ONIL:
+ p := ir.NewParenExpr(pos, x)
+ p.SetImplicit(true)
+ return p
+ }
+ return x
+}
+
+func (r *reader) funcLit() ir.Node {
+ r.sync(syncFuncLit)
+
+ pos := r.pos()
+ typPos := r.pos()
+ xtype2 := r.signature(types.LocalPkg, nil)
+
+ opos := pos
+ if quirksMode() {
+ opos = r.origPos(pos)
+ }
+
+ fn := ir.NewClosureFunc(opos, r.curfn != nil)
+ clo := fn.OClosure
+ ir.NameClosure(clo, r.curfn)
+
+ r.setType(fn.Nname, xtype2)
+ if quirksMode() {
+ fn.Nname.Ntype = ir.TypeNodeAt(typPos, xtype2)
+ }
+ typecheck.Func(fn)
+ r.setType(clo, fn.Type())
+
+ fn.ClosureVars = make([]*ir.Name, 0, r.len())
+ for len(fn.ClosureVars) < cap(fn.ClosureVars) {
+ ir.NewClosureVar(r.pos(), fn, r.useLocal())
+ }
+
+ r.addBody(fn)
+
+ // TODO(mdempsky): Remove hard-coding of typecheck.Target.
+ return ir.UseClosure(clo, typecheck.Target)
+}
+
+func (r *reader) exprList() []ir.Node {
+ r.sync(syncExprList)
+ return r.exprs()
+}
+
+func (r *reader) exprs() []ir.Node {
+ r.sync(syncExprs)
+ nodes := make([]ir.Node, r.len())
+ if len(nodes) == 0 {
+ return nil // TODO(mdempsky): Unclear if this matters.
+ }
+ for i := range nodes {
+ nodes[i] = r.expr()
+ }
+ return nodes
+}
+
+func (r *reader) op() ir.Op {
+ r.sync(syncOp)
+ return ir.Op(r.len())
+}
+
+// @@@ Package initialization
+
+func (r *reader) pkgInit(self *types.Pkg, target *ir.Package) {
+ if quirksMode() {
+ for i, n := 0, r.len(); i < n; i++ {
+ // Eagerly register position bases, so their filenames are
+ // assigned stable indices.
+ posBase := r.posBase()
+ _ = base.Ctxt.PosTable.XPos(src.MakePos(posBase, 0, 0))
+ }
+
+ for i, n := 0, r.len(); i < n; i++ {
+ // Eagerly resolve imported objects, so any filenames registered
+ // in the process are assigned stable indices too.
+ _, sym := r.qualifiedIdent()
+ typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ assert(sym.Def != nil)
+ }
+ }
+
+ cgoPragmas := make([][]string, r.len())
+ for i := range cgoPragmas {
+ cgoPragmas[i] = r.strings()
+ }
+ target.CgoPragmas = cgoPragmas
+
+ r.pkgDecls(target)
+
+ r.sync(syncEOF)
+}
+
+func (r *reader) pkgDecls(target *ir.Package) {
+ r.sync(syncDecls)
+ for {
+ switch code := codeDecl(r.code(syncDecl)); code {
+ default:
+ panic(fmt.Sprintf("unhandled decl: %v", code))
+
+ case declEnd:
+ return
+
+ case declFunc:
+ names := r.pkgObjs(target)
+ assert(len(names) == 1)
+ target.Decls = append(target.Decls, names[0].Func)
+
+ case declMethod:
+ typ := r.typ()
+ _, sym := r.selector()
+
+ method := typecheck.Lookdot1(nil, sym, typ, typ.Methods(), 0)
+ target.Decls = append(target.Decls, method.Nname.(*ir.Name).Func)
+
+ case declVar:
+ pos := r.pos()
+ names := r.pkgObjs(target)
+ values := r.exprList()
+
+ if len(names) > 1 && len(values) == 1 {
+ as := ir.NewAssignListStmt(pos, ir.OAS2, nil, values)
+ for _, name := range names {
+ as.Lhs.Append(name)
+ name.Defn = as
+ }
+ target.Decls = append(target.Decls, as)
+ } else {
+ for i, name := range names {
+ as := ir.NewAssignStmt(pos, name, nil)
+ if i < len(values) {
+ as.Y = values[i]
+ }
+ name.Defn = as
+ target.Decls = append(target.Decls, as)
+ }
+ }
+
+ if n := r.len(); n > 0 {
+ assert(len(names) == 1)
+ embeds := make([]ir.Embed, n)
+ for i := range embeds {
+ embeds[i] = ir.Embed{Pos: r.pos(), Patterns: r.strings()}
+ }
+ names[0].Embed = &embeds
+ target.Embeds = append(target.Embeds, names[0])
+ }
+
+ case declOther:
+ r.pkgObjs(target)
+ }
+ }
+}
+
+func (r *reader) pkgObjs(target *ir.Package) []*ir.Name {
+ r.sync(syncDeclNames)
+ nodes := make([]*ir.Name, r.len())
+ for i := range nodes {
+ r.sync(syncDeclName)
+
+ name := r.obj().(*ir.Name)
+ nodes[i] = name
+
+ sym := name.Sym()
+ if sym.IsBlank() {
+ continue
+ }
+
+ switch name.Class {
+ default:
+ base.FatalfAt(name.Pos(), "unexpected class: %v", name.Class)
+
+ case ir.PEXTERN:
+ target.Externs = append(target.Externs, name)
+
+ case ir.PFUNC:
+ assert(name.Type().Recv() == nil)
+
+ // TODO(mdempsky): Cleaner way to recognize init?
+ if strings.HasPrefix(sym.Name, "init.") {
+ target.Inits = append(target.Inits, name.Func)
+ }
+ }
+
+ if types.IsExported(sym.Name) {
+ assert(!sym.OnExportList())
+ target.Exports = append(target.Exports, name)
+ sym.SetOnExportList(true)
+ }
+
+ if base.Flag.AsmHdr != "" {
+ assert(!sym.Asm())
+ target.Asms = append(target.Asms, name)
+ sym.SetAsm(true)
+ }
+ }
+
+ return nodes
+}
+
+// @@@ Inlining
+
+var inlgen = 0
+
+func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ // TODO(mdempsky): Turn callerfn into an explicit parameter.
+ callerfn := ir.CurFunc
+
+ pri, ok := bodyReader[fn]
+ if !ok {
+ // Assume it's an imported function or something that we don't
+ // have access to in quirks mode.
+ if haveLegacyImports {
+ return nil
+ }
+
+ base.FatalfAt(call.Pos(), "missing function body for call to %v", fn)
+ }
+
+ if fn.Inl.Body == nil {
+ expandInline(fn, pri)
+ }
+
+ r := pri.asReader(relocBody, syncFuncBody)
+
+ // TODO(mdempsky): This still feels clumsy. Can we do better?
+ tmpfn := ir.NewFunc(fn.Pos())
+ tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), callerfn.Sym())
+ tmpfn.Closgen = callerfn.Closgen
+ defer func() { callerfn.Closgen = tmpfn.Closgen }()
+
+ r.setType(tmpfn.Nname, fn.Type())
+ r.curfn = tmpfn
+
+ r.inlCaller = callerfn
+ r.inlCall = call
+ r.inlFunc = fn
+ r.inlTreeIndex = inlIndex
+ r.inlPosBases = make(map[*src.PosBase]*src.PosBase)
+
+ r.closureVars = make([]*ir.Name, len(r.inlFunc.ClosureVars))
+ for i, cv := range r.inlFunc.ClosureVars {
+ r.closureVars[i] = cv.Outer
+ }
+
+ r.funcargs(fn)
+
+ assert(r.bool()) // have body
+ r.delayResults = fn.Inl.CanDelayResults
+
+ r.retlabel = typecheck.AutoLabel(".i")
+ inlgen++
+
+ init := ir.TakeInit(call)
+
+ // For normal function calls, the function callee expression
+ // may contain side effects. Make sure to preserve these,
+ // if necessary (#42703).
+ if call.Op() == ir.OCALLFUNC {
+ inline.CalleeEffects(&init, call.X)
+ }
+
+ var args ir.Nodes
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+ args.Append(call.Args...)
+
+ // Create assignment to declare and initialize inlvars.
+ as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, r.inlvars, args)
+ as2.Def = true
+ var as2init ir.Nodes
+ for _, name := range r.inlvars {
+ if ir.IsBlank(name) {
+ continue
+ }
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ as2init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ name.Defn = as2
+ }
+ as2.SetInit(as2init)
+ init.Append(typecheck.Stmt(as2))
+
+ if !r.delayResults {
+ // If not delaying retvars, declare and zero initialize the
+ // result variables now.
+ for _, name := range r.retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ ras := ir.NewAssignStmt(call.Pos(), name, nil)
+ init.Append(typecheck.Stmt(ras))
+ }
+ }
+
+ // Add an inline mark just before the inlined body.
+ // This mark is inline in the code so that it's a reasonable spot
+ // to put a breakpoint. Not sure if that's really necessary or not
+ // (in which case it could go at the end of the function instead).
+ // Note issue 28603.
+ init.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(r.inlTreeIndex)))
+
+ nparams := len(r.curfn.Dcl)
+
+ ir.WithFunc(r.curfn, func() {
+ r.curfn.Body = r.stmts()
+ r.curfn.Endlineno = r.pos()
+
+ deadcode.Func(r.curfn)
+
+ // Replace any "return" statements within the function body.
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ if ret, ok := n.(*ir.ReturnStmt); ok {
+ n = typecheck.Stmt(r.inlReturn(ret))
+ }
+ ir.EditChildren(n, edit)
+ return n
+ }
+ edit(r.curfn)
+ })
+
+ body := ir.Nodes(r.curfn.Body)
+
+ // Quirk: If deadcode elimination turned a non-empty function into
+ // an empty one, we need to set the position for the empty block
+ // left behind to the the inlined position for src.NoXPos, so that
+ // an empty string gets added into the DWARF file name listing at
+ // the appropriate index.
+ if quirksMode() && len(body) == 1 {
+ if block, ok := body[0].(*ir.BlockStmt); ok && len(block.List) == 0 {
+ block.SetPos(r.updatePos(src.NoXPos))
+ }
+ }
+
+ // Quirkish: We need to eagerly prune variables added during
+ // inlining, but removed by deadcode.FuncBody above. Unused
+ // variables will get removed during stack frame layout anyway, but
+ // len(fn.Dcl) ends up influencing things like autotmp naming.
+
+ used := usedLocals(body)
+
+ for i, name := range r.curfn.Dcl {
+ if i < nparams || used.Has(name) {
+ name.Curfn = callerfn
+ callerfn.Dcl = append(callerfn.Dcl, name)
+
+ // Quirkish. TODO(mdempsky): Document why.
+ if name.AutoTemp() {
+ name.SetEsc(ir.EscUnknown)
+
+ if base.Flag.GenDwarfInl != 0 {
+ name.SetInlLocal(true)
+ } else {
+ name.SetPos(r.inlCall.Pos())
+ }
+ }
+ }
+ }
+
+ body.Append(ir.NewLabelStmt(call.Pos(), r.retlabel))
+
+ res := ir.NewInlinedCallExpr(call.Pos(), body, append([]ir.Node(nil), r.retvars...))
+ res.SetInit(init)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+
+ // Inlining shouldn't add any functions to todoBodies.
+ assert(len(todoBodies) == 0)
+
+ return res
+}
+
+// inlReturn returns a statement that can substitute for the given
+// return statement when inlining.
+func (r *reader) inlReturn(ret *ir.ReturnStmt) *ir.BlockStmt {
+ pos := r.inlCall.Pos()
+
+ block := ir.TakeInit(ret)
+
+ if results := ret.Results; len(results) != 0 {
+ assert(len(r.retvars) == len(results))
+
+ as2 := ir.NewAssignListStmt(pos, ir.OAS2, append([]ir.Node(nil), r.retvars...), ret.Results)
+
+ if r.delayResults {
+ for _, name := range r.retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ block.Append(ir.NewDecl(pos, ir.ODCL, name))
+ name.Defn = as2
+ }
+ }
+
+ block.Append(as2)
+ }
+
+ block.Append(ir.NewBranchStmt(pos, ir.OGOTO, r.retlabel))
+ return ir.NewBlockStmt(pos, block)
+}
+
+// expandInline reads in an extra copy of IR to populate
+// fn.Inl.{Dcl,Body}.
+func expandInline(fn *ir.Func, pri pkgReaderIndex) {
+ // TODO(mdempsky): Remove this function. It's currently needed by
+ // dwarfgen/dwarf.go:preInliningDcls, which requires fn.Inl.Dcl to
+ // create abstract function DIEs. But we should be able to provide it
+ // with the same information some other way.
+
+ fndcls := len(fn.Dcl)
+ topdcls := len(typecheck.Target.Decls)
+
+ tmpfn := ir.NewFunc(fn.Pos())
+ tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), fn.Sym())
+ tmpfn.ClosureVars = fn.ClosureVars
+
+ {
+ r := pri.asReader(relocBody, syncFuncBody)
+ r.setType(tmpfn.Nname, fn.Type())
+
+ // Don't change parameter's Sym/Nname fields.
+ r.funarghack = true
+
+ r.funcBody(tmpfn)
+
+ ir.WithFunc(tmpfn, func() {
+ deadcode.Func(tmpfn)
+ })
+ }
+
+ used := usedLocals(tmpfn.Body)
+
+ for _, name := range tmpfn.Dcl {
+ if name.Class != ir.PAUTO || used.Has(name) {
+ name.Curfn = fn
+ fn.Inl.Dcl = append(fn.Inl.Dcl, name)
+ }
+ }
+ fn.Inl.Body = tmpfn.Body
+
+ // Double check that we didn't change fn.Dcl by accident.
+ assert(fndcls == len(fn.Dcl))
+
+ // typecheck.Stmts may have added function literals to
+ // typecheck.Target.Decls. Remove them again so we don't risk trying
+ // to compile them multiple times.
+ typecheck.Target.Decls = typecheck.Target.Decls[:topdcls]
+}
+
+// usedLocals returns a set of local variables that are used within body.
+func usedLocals(body []ir.Node) ir.NameSet {
+ var used ir.NameSet
+ ir.VisitList(body, func(n ir.Node) {
+ if n, ok := n.(*ir.Name); ok && n.Op() == ir.ONAME && n.Class == ir.PAUTO {
+ used.Add(n)
+ }
+ })
+ return used
+}
+
+// @@@ Method wrappers
+
+// needWrapperTypes lists types for which we may need to generate
+// method wrappers.
+var needWrapperTypes []*types.Type
+
+// haveWrapperTypes lists types for which we know we already have
+// method wrappers, because we found the type in an imported package.
+var haveWrapperTypes []*types.Type
+
+func (r *reader) needWrapper(typ *types.Type) *types.Type {
+ if typ.IsPtr() {
+ base.Fatalf("bad pointer type: %v", typ)
+ }
+
+ // If a type was found in an imported package, then we can assume
+ // that package (or one of its transitive dependencies) already
+ // generated method wrappers for it.
+ //
+ // Exception: If we're instantiating an imported generic type or
+ // function, we might be instantiating it with type arguments not
+ // previously seen before.
+ //
+ // TODO(mdempsky): Distinguish when a generic function or type was
+ // instantiated in an imported package so that we can add types to
+ // haveWrapperTypes instead.
+ if r.p != localPkgReader && !r.hasTypeParams() {
+ haveWrapperTypes = append(haveWrapperTypes, typ)
+ } else {
+ needWrapperTypes = append(needWrapperTypes, typ)
+ }
+
+ return typ
+}
+
+func (r *reader) wrapTypes(target *ir.Package) {
+ // always generate a wrapper for error.Error (#29304)
+ r.needWrapper(types.ErrorType)
+
+ seen := make(map[string]*types.Type)
+ addType := func(typ *types.Type) bool {
+ if typ.Sym() != nil {
+ return true
+ }
+
+ key := typ.LinkString()
+ if prev := seen[key]; prev != nil {
+ if !types.Identical(typ, prev) {
+ base.Fatalf("collision: types %v and %v have short string %q", typ, prev, key)
+ }
+ return false
+ }
+
+ seen[key] = typ
+ return true
+ }
+
+ for _, typ := range haveWrapperTypes {
+ addType(typ)
+ }
+ haveWrapperTypes = nil
+
+ for _, typ := range needWrapperTypes {
+ if addType(typ) {
+ r.wrapType(typ, target)
+ }
+ }
+ needWrapperTypes = nil
+}
+
+func (r *reader) wrapType(typ *types.Type, target *ir.Package) {
+ if !typ.IsInterface() {
+ typecheck.CalcMethods(typ)
+ }
+ for _, meth := range typ.AllMethods().Slice() {
+ if meth.Sym.IsBlank() || !meth.IsMethod() {
+ base.FatalfAt(meth.Pos, "invalid method: %v", meth)
+ }
+
+ r.methodValueWrapper(typ, meth, target)
+
+ r.methodWrapper(0, typ, meth, target)
+
+ // For non-interface types, we also want *T wrappers.
+ if !typ.IsInterface() {
+ r.methodWrapper(1, typ, meth, target)
+
+ // For not-in-heap types, *T is a scalar, not pointer shaped,
+ // so the interface wrappers use **T.
+ if typ.NotInHeap() {
+ r.methodWrapper(2, typ, meth, target)
+ }
+ }
+ }
+}
+
+func (r *reader) methodWrapper(derefs int, tbase *types.Type, method *types.Field, target *ir.Package) {
+ wrapper := tbase
+ for i := 0; i < derefs; i++ {
+ wrapper = types.NewPtr(wrapper)
+ }
+
+ sym := ir.MethodSym(wrapper, method.Sym)
+ assert(!sym.Siggen())
+ sym.SetSiggen(true)
+
+ wrappee := method.Type.Recv().Type
+ if types.Identical(wrapper, wrappee) ||
+ !types.IsMethodApplicable(wrapper, method) ||
+ !reflectdata.NeedEmit(tbase) {
+ return
+ }
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := r.newWrapperFunc(pos, sym, wrapper, method)
+
+ var recv ir.Node = fn.Nname.Type().Recv().Nname.(*ir.Name)
+
+ // For simple *T wrappers around T methods, panicwrap produces a
+ // nicer panic message.
+ if wrapper.IsPtr() && types.Identical(wrapper.Elem(), wrappee) {
+ cond := ir.NewBinaryExpr(pos, ir.OEQ, recv, types.BuiltinPkg.Lookup("nil").Def.(ir.Node))
+ then := []ir.Node{ir.NewCallExpr(pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)}
+ fn.Body.Append(ir.NewIfStmt(pos, cond, then, nil))
+ }
+
+ // typecheck will add one implicit deref, if necessary,
+ // but not-in-heap types require more for their **T wrappers.
+ for i := 1; i < derefs; i++ {
+ recv = Implicit(ir.NewStarExpr(pos, recv))
+ }
+
+ addTailCall(pos, fn, recv, method)
+
+ r.finishWrapperFunc(fn, target)
+}
+
+func (r *reader) methodValueWrapper(tbase *types.Type, method *types.Field, target *ir.Package) {
+ recvType := tbase
+ if !tbase.IsInterface() {
+ recvType = method.Type.Recv().Type
+ if !types.Identical(tbase, types.ReceiverBaseType(recvType)) {
+ return
+ }
+ }
+
+ sym := ir.MethodSymSuffix(recvType, method.Sym, "-fm")
+ assert(!sym.Uniq())
+ sym.SetUniq(true)
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := r.newWrapperFunc(pos, sym, nil, method)
+ sym.Def = fn.Nname
+
+ // Declare and initialize variable holding receiver.
+ recv := ir.NewHiddenParam(pos, fn, typecheck.Lookup(".this"), recvType)
+
+ if !reflectdata.NeedEmit(tbase) {
+ typecheck.Func(fn)
+ return
+ }
+
+ addTailCall(pos, fn, recv, method)
+
+ r.finishWrapperFunc(fn, target)
+}
+
+func (r *reader) newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field) *ir.Func {
+ fn := ir.NewFunc(pos)
+ fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
+
+ name := ir.NewNameAt(pos, sym)
+ ir.MarkFunc(name)
+ name.Func = fn
+ name.Defn = fn
+ fn.Nname = name
+
+ sig := newWrapperType(wrapper, method)
+ r.setType(name, sig)
+
+ // TODO(mdempsky): De-duplicate with similar logic in funcargs.
+ defParams := func(class ir.Class, params *types.Type) {
+ for _, param := range params.FieldSlice() {
+ name := ir.NewNameAt(param.Pos, param.Sym)
+ name.Class = class
+ r.setType(name, param.Type)
+
+ name.Curfn = fn
+ fn.Dcl = append(fn.Dcl, name)
+
+ param.Nname = name
+ }
+ }
+
+ defParams(ir.PPARAM, sig.Recvs())
+ defParams(ir.PPARAM, sig.Params())
+ defParams(ir.PPARAMOUT, sig.Results())
+
+ return fn
+}
+
+func (r *reader) finishWrapperFunc(fn *ir.Func, target *ir.Package) {
+ typecheck.Func(fn)
+
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(fn.Body)
+ })
+
+ target.Decls = append(target.Decls, fn)
+}
+
+// newWrapperType returns a copy of the given signature type, but with
+// the receiver parameter type substituted with recvType.
+// If recvType is nil, newWrapperType returns a signature
+// without a receiver parameter.
+func newWrapperType(recvType *types.Type, method *types.Field) *types.Type {
+ clone := func(params []*types.Field) []*types.Field {
+ res := make([]*types.Field, len(params))
+ for i, param := range params {
+ sym := param.Sym
+ if sym == nil || sym.Name == "_" {
+ sym = typecheck.LookupNum(".anon", i)
+ }
+ res[i] = types.NewField(param.Pos, sym, param.Type)
+ res[i].SetIsDDD(param.IsDDD())
+ }
+ return res
+ }
+
+ sig := method.Type
+
+ var recv *types.Field
+ if recvType != nil {
+ recv = types.NewField(sig.Recv().Pos, typecheck.Lookup(".this"), recvType)
+ }
+ params := clone(sig.Params().FieldSlice())
+ results := clone(sig.Results().FieldSlice())
+
+ return types.NewSignature(types.NoPkg, recv, nil, params, results)
+}
+
+func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
+ sig := fn.Nname.Type()
+ args := make([]ir.Node, sig.NumParams())
+ for i, param := range sig.Params().FieldSlice() {
+ args[i] = param.Nname.(*ir.Name)
+ }
+
+ // TODO(mdempsky): Support creating OTAILCALL, when possible. See reflectdata.methodWrapper.
+ // Not urgent though, because tail calls are currently incompatible with regabi anyway.
+
+ fn.SetWrapper(true) // TODO(mdempsky): Leave unset for tail calls?
+
+ dot := ir.NewSelectorExpr(pos, ir.OXDOT, recv, method.Sym)
+ call := typecheck.Call(pos, dot, args, method.Type.IsVariadic()).(*ir.CallExpr)
+
+ if method.Type.NumResults() == 0 {
+ fn.Body.Append(call)
+ return
+ }
+
+ ret := ir.NewReturnStmt(pos, nil)
+ ret.Results = []ir.Node{call}
+ fn.Body.Append(ret)
+}
diff --git a/src/cmd/compile/internal/noder/reader2.go b/src/cmd/compile/internal/noder/reader2.go
new file mode 100644
index 0000000000..5637196dc0
--- /dev/null
+++ b/src/cmd/compile/internal/noder/reader2.go
@@ -0,0 +1,510 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+type pkgReader2 struct {
+ pkgDecoder
+
+ check *types2.Checker
+ imports map[string]*types2.Package
+
+ posBases []*syntax.PosBase
+ pkgs []*types2.Package
+ typs []types2.Type
+}
+
+func readPackage2(check *types2.Checker, imports map[string]*types2.Package, input pkgDecoder) *types2.Package {
+ pr := pkgReader2{
+ pkgDecoder: input,
+
+ check: check,
+ imports: imports,
+
+ posBases: make([]*syntax.PosBase, input.numElems(relocPosBase)),
+ pkgs: make([]*types2.Package, input.numElems(relocPkg)),
+ typs: make([]types2.Type, input.numElems(relocType)),
+ }
+
+ r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+ pkg := r.pkg()
+ r.bool() // has init
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.obj()
+ }
+
+ r.sync(syncEOF)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+type reader2 struct {
+ decoder
+
+ p *pkgReader2
+
+ dict *reader2Dict
+}
+
+type reader2Dict struct {
+ bounds []typeInfo
+
+ tparams []*types2.TypeParam
+
+ derived []derivedInfo
+ derivedTypes []types2.Type
+}
+
+type reader2TypeBound struct {
+ derived bool
+ boundIdx int
+}
+
+func (pr *pkgReader2) newReader(k reloc, idx int, marker syncMarker) *reader2 {
+ return &reader2{
+ decoder: pr.newDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+// @@@ Positions
+
+func (r *reader2) pos() syntax.Pos {
+ r.sync(syncPos)
+ if !r.bool() {
+ return syntax.Pos{}
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.uint()
+ col := r.uint()
+ return syntax.MakePos(posBase, line, col)
+}
+
+func (r *reader2) posBase() *syntax.PosBase {
+ return r.p.posBaseIdx(r.reloc(relocPosBase))
+}
+
+func (pr *pkgReader2) posBaseIdx(idx int) *syntax.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+
+ r := pr.newReader(relocPosBase, idx, syncPosBase)
+ var b *syntax.PosBase
+
+ filename := r.string()
+ _ = r.string() // absolute file name
+
+ if r.bool() {
+ b = syntax.NewFileBase(filename)
+ } else {
+ pos := r.pos()
+ line := r.uint()
+ col := r.uint()
+ b = syntax.NewLineBase(pos, filename, line, col)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader2) pkg() *types2.Package {
+ r.sync(syncPkg)
+ return r.p.pkgIdx(r.reloc(relocPkg))
+}
+
+func (pr *pkgReader2) pkgIdx(idx int) *types2.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader2) doPkg() *types2.Package {
+ path := r.string()
+ if path == "builtin" {
+ return nil // universe
+ }
+ if path == "" {
+ path = r.p.pkgPath
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.string()
+ height := r.len()
+
+ pkg := types2.NewPackageHeight(path, name, height)
+ r.p.imports[path] = pkg
+
+ // TODO(mdempsky): The list of imported packages is important for
+ // go/types, but we could probably skip populating it for types2.
+ imports := make([]*types2.Package, r.len())
+ for i := range imports {
+ imports[i] = r.pkg()
+ }
+ pkg.SetImports(imports)
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader2) typ() types2.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader2) typInfo() typeInfo {
+ r.sync(syncType)
+ if r.bool() {
+ return typeInfo{idx: r.len(), derived: true}
+ }
+ return typeInfo{idx: r.reloc(relocType), derived: false}
+}
+
+func (pr *pkgReader2) typIdx(info typeInfo, dict *reader2Dict) types2.Type {
+ idx := info.idx
+ var where *types2.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ r := pr.newReader(relocType, idx, syncTypeIdx)
+ r.dict = dict
+
+ typ := r.doTyp()
+ assert(typ != nil)
+
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader2) doTyp() (res types2.Type) {
+ switch tag := codeType(r.code(syncType)); tag {
+ default:
+ base.FatalfAt(src.NoXPos, "unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case typeBasic:
+ return types2.Typ[r.len()]
+
+ case typeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types2.TypeName)
+ if len(targs) != 0 {
+ return r.p.check.InstantiateLazy(syntax.Pos{}, name.Type(), targs, nil, false)
+ }
+ return name.Type()
+
+ case typeTypeParam:
+ return r.dict.tparams[r.len()]
+
+ case typeArray:
+ len := int64(r.uint64())
+ return types2.NewArray(r.typ(), len)
+ case typeChan:
+ dir := types2.ChanDir(r.len())
+ return types2.NewChan(dir, r.typ())
+ case typeMap:
+ return types2.NewMap(r.typ(), r.typ())
+ case typePointer:
+ return types2.NewPointer(r.typ())
+ case typeSignature:
+ return r.signature(nil)
+ case typeSlice:
+ return types2.NewSlice(r.typ())
+ case typeStruct:
+ return r.structType()
+ case typeInterface:
+ return r.interfaceType()
+ case typeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader2) structType() *types2.Struct {
+ fields := make([]*types2.Var, r.len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.string()
+ embedded := r.bool()
+
+ fields[i] = types2.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types2.NewStruct(fields, tags)
+}
+
+func (r *reader2) unionType() *types2.Union {
+ terms := make([]*types2.Term, r.len())
+ for i := range terms {
+ terms[i] = types2.NewTerm(r.bool(), r.typ())
+ }
+ return types2.NewUnion(terms)
+}
+
+func (r *reader2) interfaceType() *types2.Interface {
+ methods := make([]*types2.Func, r.len())
+ embeddeds := make([]types2.Type, r.len())
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil)
+ methods[i] = types2.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ return types2.NewInterfaceType(methods, embeddeds)
+}
+
+func (r *reader2) signature(recv *types2.Var) *types2.Signature {
+ r.sync(syncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.bool()
+
+ return types2.NewSignature(recv, params, results, variadic)
+}
+
+func (r *reader2) params() *types2.Tuple {
+ r.sync(syncParams)
+ params := make([]*types2.Var, r.len())
+ for i := range params {
+ params[i] = r.param()
+ }
+ return types2.NewTuple(params...)
+}
+
+func (r *reader2) param() *types2.Var {
+ r.sync(syncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types2.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader2) obj() (types2.Object, []types2.Type) {
+ r.sync(syncObject)
+
+ assert(!r.bool())
+
+ pkg, name := r.p.objIdx(r.reloc(relocObj))
+ obj := pkg.Scope().Lookup(name)
+
+ targs := make([]types2.Type, r.len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
+ rname := pr.newReader(relocName, idx, syncObject1)
+
+ objPkg, objName := rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag := codeObj(rname.code(syncCodeObj))
+
+ if tag == objStub {
+ assert(objPkg == nil)
+ return objPkg, objName
+ }
+
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(relocObj, idx, syncObject1)
+ r.dict = dict
+
+ objPkg.Scope().InsertLazy(objName, func() types2.Object {
+ switch tag {
+ default:
+ panic("weird")
+
+ case objAlias:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewTypeName(pos, objPkg, objName, typ)
+
+ case objConst:
+ pos := r.pos()
+ typ, val := r.value()
+ return types2.NewConst(pos, objPkg, objName, typ, val)
+
+ case objFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil)
+ sig.SetTParams(tparams)
+ return types2.NewFunc(pos, objPkg, objName, sig)
+
+ case objType:
+ pos := r.pos()
+
+ return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeName, underlying types2.Type, methods []*types2.Func) {
+ tparams = r.typeParamNames()
+
+ // TODO(mdempsky): Rewrite receiver types to underlying is an
+ // Interface? The go/types importer does this (I think because
+ // unit tests expected that), but cmd/compile doesn't care
+ // about it, so maybe we can avoid worrying about that here.
+ underlying = r.typ().Underlying()
+
+ methods = make([]*types2.Func, r.len())
+ for i := range methods {
+ methods[i] = r.method()
+ }
+
+ return
+ })
+
+ case objVar:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewVar(pos, objPkg, objName, typ)
+ }
+ })
+
+ return objPkg, objName
+}
+
+func (r *reader2) value() (types2.Type, constant.Value) {
+ r.sync(syncValue)
+ return r.typ(), r.rawValue()
+}
+
+func (pr *pkgReader2) objDictIdx(idx int) *reader2Dict {
+ r := pr.newReader(relocObjDict, idx, syncObject1)
+
+ var dict reader2Dict
+
+ if implicits := r.len(); implicits != 0 {
+ base.Fatalf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.len())
+ dict.derivedTypes = make([]types2.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.reloc(relocType), r.bool()}
+ }
+
+ // function references follow, but reader2 doesn't need those
+
+ return &dict
+}
+
+func (r *reader2) typeParamNames() []*types2.TypeName {
+ r.sync(syncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader2 is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ names := make([]*types2.TypeName, len(r.dict.bounds))
+ r.dict.tparams = make([]*types2.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ names[i] = types2.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = r.p.check.NewTypeParam(names[i], nil)
+ }
+
+ for i, bound := range r.dict.bounds {
+ r.dict.tparams[i].SetConstraint(r.p.typIdx(bound, r.dict))
+ }
+
+ return names
+}
+
+func (r *reader2) method() *types2.Func {
+ r.sync(syncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rparams := r.typeParamNames()
+ sig := r.signature(r.param())
+ sig.SetRParams(rparams)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types2.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader2) qualifiedIdent() (*types2.Package, string) { return r.ident(syncSym) }
+func (r *reader2) localIdent() (*types2.Package, string) { return r.ident(syncLocalIdent) }
+func (r *reader2) selector() (*types2.Package, string) { return r.ident(syncSelector) }
+
+func (r *reader2) ident(marker syncMarker) (*types2.Package, string) {
+ r.sync(marker)
+ return r.pkg(), r.string()
+}
diff --git a/src/cmd/compile/internal/noder/reloc.go b/src/cmd/compile/internal/noder/reloc.go
new file mode 100644
index 0000000000..669a6182e6
--- /dev/null
+++ b/src/cmd/compile/internal/noder/reloc.go
@@ -0,0 +1,42 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+// A reloc indicates a particular section within a unified IR export.
+//
+// TODO(mdempsky): Rename to "section" or something similar?
+type reloc int
+
+// A relocEnt (relocation entry) is an entry in an atom's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type relocEnt struct {
+ kind reloc
+ idx int
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ publicRootIdx = 0
+ privateRootIdx = 1
+)
+
+const (
+ relocString reloc = iota
+ relocMeta
+ relocPosBase
+ relocPkg
+ relocName
+ relocType
+ relocObj
+ relocObjExt
+ relocObjDict
+ relocBody
+
+ numRelocs = iota
+)
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index 3ebc8dff6d..6736f128e3 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -8,21 +8,32 @@
package noder
import (
- "bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"fmt"
- "strings"
+ "go/constant"
)
-// For catching problems as we add more features
-// TODO(danscales): remove assertions or replace with base.FatalfAt()
+// Enable extra consistency checks.
+const doubleCheck = true
+
func assert(p bool) {
- if !p {
- panic("assertion failed")
+ base.Assert(p)
+}
+
+// Temporary - for outputting information on derived types, dictionaries, sub-dictionaries.
+// Turn off when running tests.
+var infoPrintMode = false
+
+func infoPrint(format string, a ...interface{}) {
+ if infoPrintMode {
+ fmt.Printf(format, a...)
}
}
@@ -32,7 +43,8 @@ func assert(p bool) {
// encountered already or new ones that are encountered during the stenciling
// process.
func (g *irgen) stencil() {
- g.target.Stencils = make(map[*types.Sym]*ir.Func)
+ g.instInfoMap = make(map[*types.Sym]*instInfo)
+ g.gfInfoMap = make(map[*types.Sym]*gfInfo)
// Instantiate the methods of instantiated generic types that we have seen so far.
g.instantiateMethods()
@@ -72,56 +84,138 @@ func (g *irgen) stencil() {
// instantiated function if it hasn't been created yet, and change
// to calling that function directly.
modified := false
- foundFuncInst := false
+ closureRequired := false
+ // declInfo will be non-nil exactly if we are scanning an instantiated function
+ declInfo := g.instInfoMap[decl.Sym()]
+
ir.Visit(decl, func(n ir.Node) {
if n.Op() == ir.OFUNCINST {
- // We found a function instantiation that is not
- // immediately called.
- foundFuncInst = true
+ // generic F, not immediately called
+ closureRequired = true
}
- if n.Op() != ir.OCALL || n.(*ir.CallExpr).X.Op() != ir.OFUNCINST {
- return
+ if (n.Op() == ir.OMETHEXPR || n.Op() == ir.OMETHVALUE) && len(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) > 0 && !types.IsInterfaceMethod(n.(*ir.SelectorExpr).Selection.Type) {
+ // T.M or x.M, where T or x is generic, but not immediately
+ // called. Not necessary if the method selected is
+ // actually for an embedded interface field.
+ closureRequired = true
+ }
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OFUNCINST {
+ // We have found a function call using a generic function
+ // instantiation.
+ call := n.(*ir.CallExpr)
+ inst := call.X.(*ir.InstExpr)
+ nameNode, isMeth := g.getInstNameNode(inst)
+ targs := typecheck.TypesOf(inst.Targs)
+ st := g.getInstantiation(nameNode, targs, isMeth)
+ dictValue, usingSubdict := g.getDictOrSubdict(declInfo, n, nameNode, targs, isMeth)
+ if infoPrintMode {
+ dictkind := "Main dictionary"
+ if usingSubdict {
+ dictkind = "Sub-dictionary"
+ }
+ if inst.X.Op() == ir.OMETHVALUE {
+ fmt.Printf("%s in %v at generic method call: %v - %v\n", dictkind, decl, inst.X, call)
+ } else {
+ fmt.Printf("%s in %v at generic function call: %v - %v\n", dictkind, decl, inst.X, call)
+ }
+ }
+
+ // Transform the Call now, which changes OCALL to
+ // OCALLFUNC and does typecheckaste/assignconvfn. Do
+ // it before installing the instantiation, so we are
+ // checking against non-shape param types in
+ // typecheckaste.
+ transformCall(call)
+
+ // Replace the OFUNCINST with a direct reference to the
+ // new stenciled function
+ call.X = st.Nname
+ if inst.X.Op() == ir.OMETHVALUE {
+ // When we create an instantiation of a method
+ // call, we make it a function. So, move the
+ // receiver to be the first arg of the function
+ // call.
+ call.Args.Prepend(inst.X.(*ir.SelectorExpr).X)
+ }
+
+ // Add dictionary to argument list.
+ call.Args.Prepend(dictValue)
+ modified = true
+ }
+ if n.Op() == ir.OCALLMETH && n.(*ir.CallExpr).X.Op() == ir.ODOTMETH && len(deref(n.(*ir.CallExpr).X.Type().Recv().Type).RParams()) > 0 {
+ // Method call on a generic type, which was instantiated by stenciling.
+ // Method calls on explicitly instantiated types will have an OFUNCINST
+ // and are handled above.
+ call := n.(*ir.CallExpr)
+ meth := call.X.(*ir.SelectorExpr)
+ targs := deref(meth.Type().Recv().Type).RParams()
+
+ t := meth.X.Type()
+ baseSym := deref(t).OrigSym
+ baseType := baseSym.Def.(*ir.Name).Type()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if meth.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+
+ // Transform the Call now, which changes OCALL
+ // to OCALLFUNC and does typecheckaste/assignconvfn.
+ transformCall(call)
+
+ st := g.getInstantiation(gf, targs, true)
+ dictValue, usingSubdict := g.getDictOrSubdict(declInfo, n, gf, targs, true)
+ // We have to be using a subdictionary, since this is
+ // a generic method call.
+ assert(usingSubdict)
+
+ // Transform to a function call, by appending the
+ // dictionary and the receiver to the args.
+ call.SetOp(ir.OCALLFUNC)
+ call.X = st.Nname
+ call.Args.Prepend(dictValue, meth.X)
+ modified = true
}
- // We have found a function call using a generic function
- // instantiation.
- call := n.(*ir.CallExpr)
- inst := call.X.(*ir.InstExpr)
- st := g.getInstantiationForNode(inst)
- // Replace the OFUNCINST with a direct reference to the
- // new stenciled function
- call.X = st.Nname
- if inst.X.Op() == ir.OCALLPART {
- // When we create an instantiation of a method
- // call, we make it a function. So, move the
- // receiver to be the first arg of the function
- // call.
- withRecv := make([]ir.Node, len(call.Args)+1)
- dot := inst.X.(*ir.SelectorExpr)
- withRecv[0] = dot.X
- copy(withRecv[1:], call.Args)
- call.Args = withRecv
- }
- // Transform the Call now, which changes OCALL
- // to OCALLFUNC and does typecheckaste/assignconvfn.
- transformCall(call)
- modified = true
})
- // If we found an OFUNCINST without a corresponding call in the
- // above decl, then traverse the nodes of decl again (with
+ // If we found a reference to a generic instantiation that wasn't an
+ // immediate call, then traverse the nodes of decl again (with
// EditChildren rather than Visit), where we actually change the
- // OFUNCINST node to an ONAME for the instantiated function.
+ // reference to the instantiation to a closure that captures the
+ // dictionary, then does a direct call.
// EditChildren is more expensive than Visit, so we only do this
- // in the infrequent case of an OFUNCINSt without a corresponding
+ // in the infrequent case of an OFUNCINST without a corresponding
// call.
- if foundFuncInst {
+ if closureRequired {
+ modified = true
var edit func(ir.Node) ir.Node
+ var outer *ir.Func
+ if f, ok := decl.(*ir.Func); ok {
+ outer = f
+ }
edit = func(x ir.Node) ir.Node {
if x.Op() == ir.OFUNCINST {
- st := g.getInstantiationForNode(x.(*ir.InstExpr))
- return st.Nname
+ child := x.(*ir.InstExpr).X
+ if child.Op() == ir.OMETHEXPR || child.Op() == ir.OMETHVALUE {
+ // Call EditChildren on child (x.X),
+ // not x, so that we don't do
+ // buildClosure() on the
+ // METHEXPR/METHVALUE nodes as well.
+ ir.EditChildren(child, edit)
+ return g.buildClosure(outer, x)
+ }
}
ir.EditChildren(x, edit)
+ switch {
+ case x.Op() == ir.OFUNCINST:
+ return g.buildClosure(outer, x)
+ case (x.Op() == ir.OMETHEXPR || x.Op() == ir.OMETHVALUE) &&
+ len(deref(x.(*ir.SelectorExpr).X.Type()).RParams()) > 0 &&
+ !types.IsInterfaceMethod(x.(*ir.SelectorExpr).Selection.Type):
+ return g.buildClosure(outer, x)
+ }
return x
}
edit(decl)
@@ -137,104 +231,384 @@ func (g *irgen) stencil() {
g.instantiateMethods()
}
+ g.finalizeSyms()
+}
+
+// buildClosure makes a closure to implement x, a OFUNCINST or OMETHEXPR
+// of generic type. outer is the containing function (or nil if closure is
+// in a global assignment instead of a function).
+func (g *irgen) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
+ pos := x.Pos()
+ var target *ir.Func // target instantiated function/method
+ var dictValue ir.Node // dictionary to use
+ var rcvrValue ir.Node // receiver, if a method value
+ typ := x.Type() // type of the closure
+ var outerInfo *instInfo
+ if outer != nil {
+ outerInfo = g.instInfoMap[outer.Sym()]
+ }
+ usingSubdict := false
+ valueMethod := false
+ if x.Op() == ir.OFUNCINST {
+ inst := x.(*ir.InstExpr)
+
+ // Type arguments we're instantiating with.
+ targs := typecheck.TypesOf(inst.Targs)
+
+ // Find the generic function/method.
+ var gf *ir.Name
+ if inst.X.Op() == ir.ONAME {
+ // Instantiating a generic function call.
+ gf = inst.X.(*ir.Name)
+ } else if inst.X.Op() == ir.OMETHVALUE {
+ // Instantiating a method value x.M.
+ se := inst.X.(*ir.SelectorExpr)
+ rcvrValue = se.X
+ gf = se.Selection.Nname.(*ir.Name)
+ } else {
+ panic("unhandled")
+ }
+
+ // target is the instantiated function we're trying to call.
+ // For functions, the target expects a dictionary as its first argument.
+ // For method values, the target expects a dictionary and the receiver
+ // as its first two arguments.
+ // dictValue is the value to use for the dictionary argument.
+ target = g.getInstantiation(gf, targs, rcvrValue != nil)
+ dictValue, usingSubdict = g.getDictOrSubdict(outerInfo, x, gf, targs, rcvrValue != nil)
+ if infoPrintMode {
+ dictkind := "Main dictionary"
+ if usingSubdict {
+ dictkind = "Sub-dictionary"
+ }
+ if rcvrValue == nil {
+ fmt.Printf("%s in %v for generic function value %v\n", dictkind, outer, inst.X)
+ } else {
+ fmt.Printf("%s in %v for generic method value %v\n", dictkind, outer, inst.X)
+ }
+ }
+ } else { // ir.OMETHEXPR or ir.METHVALUE
+ // Method expression T.M where T is a generic type.
+ se := x.(*ir.SelectorExpr)
+ targs := deref(se.X.Type()).RParams()
+ if len(targs) == 0 {
+ panic("bad")
+ }
+ if x.Op() == ir.OMETHVALUE {
+ rcvrValue = se.X
+ }
+
+ // se.X.Type() is the top-level type of the method expression. To
+ // correctly handle method expressions involving embedded fields,
+ // look up the generic method below using the type of the receiver
+ // of se.Selection, since that will be the type that actually has
+ // the method.
+ recv := deref(se.Selection.Type.Recv().Type)
+ baseType := recv.OrigSym.Def.Type()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if se.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+ if !gf.Type().Recv().Type.IsPtr() {
+ // Remember if value method, so we can detect (*T).M case.
+ valueMethod = true
+ }
+ target = g.getInstantiation(gf, targs, true)
+ dictValue, usingSubdict = g.getDictOrSubdict(outerInfo, x, gf, targs, true)
+ if infoPrintMode {
+ dictkind := "Main dictionary"
+ if usingSubdict {
+ dictkind = "Sub-dictionary"
+ }
+ fmt.Printf("%s in %v for method expression %v\n", dictkind, outer, x)
+ }
+ }
+
+ // Build a closure to implement a function instantiation.
+ //
+ // func f[T any] (int, int) (int, int) { ...whatever... }
+ //
+ // Then any reference to f[int] not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.f[int](.dictN, a0, a1)
+ // }
+ //
+ // Similarly for method expressions,
+ //
+ // type g[T any] ....
+ // func (rcvr g[T]) f(a0, a1 int) (r0, r1 int) { ... }
+ //
+ // Any reference to g[int].f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(rcvr g[int], a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, rcvr, a0, a1)
+ // }
+ //
+ // Also method values
+ //
+ // var x g[int]
+ //
+ // Any reference to x.f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // x2 := x
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, x2, a0, a1)
+ // }
+
+ // Make a new internal function.
+ fn, formalParams, formalResults := startClosure(pos, outer, typ)
+
+ // This is the dictionary we want to use.
+ // It may be a constant, or it may be a dictionary acquired from the outer function's dictionary.
+ // For the latter, dictVar is a variable in the outer function's scope, set to the subdictionary
+ // read from the outer function's dictionary.
+ var dictVar *ir.Name
+ var dictAssign *ir.AssignStmt
+ if outer != nil {
+ // Note: for now this is a compile-time constant, so we don't really need a closure
+ // to capture it (a wrapper function would work just as well). But eventually it
+ // will be a read of a subdictionary from the parent dictionary.
+ dictVar = ir.NewNameAt(pos, typecheck.LookupNum(".dict", g.dnum))
+ g.dnum++
+ dictVar.Class = ir.PAUTO
+ typed(types.Types[types.TUINTPTR], dictVar)
+ dictVar.Curfn = outer
+ dictAssign = ir.NewAssignStmt(pos, dictVar, dictValue)
+ dictAssign.SetTypecheck(1)
+ dictVar.Defn = dictAssign
+ outer.Dcl = append(outer.Dcl, dictVar)
+ }
+ // assign the receiver to a temporary.
+ var rcvrVar *ir.Name
+ var rcvrAssign ir.Node
+ if rcvrValue != nil {
+ rcvrVar = ir.NewNameAt(pos, typecheck.LookupNum(".rcvr", g.dnum))
+ g.dnum++
+ rcvrVar.Class = ir.PAUTO
+ typed(rcvrValue.Type(), rcvrVar)
+ rcvrVar.Curfn = outer
+ rcvrAssign = ir.NewAssignStmt(pos, rcvrVar, rcvrValue)
+ rcvrAssign.SetTypecheck(1)
+ rcvrVar.Defn = rcvrAssign
+ outer.Dcl = append(outer.Dcl, rcvrVar)
+ }
+
+ // Build body of closure. This involves just calling the wrapped function directly
+ // with the additional dictionary argument.
+
+ // First, figure out the dictionary argument.
+ var dict2Var ir.Node
+ if usingSubdict {
+ // Capture sub-dictionary calculated in the outer function
+ dict2Var = ir.CaptureName(pos, fn, dictVar)
+ typed(types.Types[types.TUINTPTR], dict2Var)
+ } else {
+ // Static dictionary, so can be used directly in the closure
+ dict2Var = dictValue
+ }
+ // Also capture the receiver variable.
+ var rcvr2Var *ir.Name
+ if rcvrValue != nil {
+ rcvr2Var = ir.CaptureName(pos, fn, rcvrVar)
+ }
+
+ // Build arguments to call inside the closure.
+ var args []ir.Node
+
+ // First the dictionary argument.
+ args = append(args, dict2Var)
+ // Then the receiver.
+ if rcvrValue != nil {
+ args = append(args, rcvr2Var)
+ }
+ // Then all the other arguments (including receiver for method expressions).
+ for i := 0; i < typ.NumParams(); i++ {
+ if x.Op() == ir.OMETHEXPR && i == 0 {
+ // If we are doing a method expression, we need to
+ // explicitly traverse any embedded fields in the receiver
+ // argument in order to call the method instantiation.
+ arg0 := formalParams[0].Nname.(ir.Node)
+ arg0 = typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, arg0, x.(*ir.SelectorExpr).Sel)).X
+ if valueMethod && arg0.Type().IsPtr() {
+ // For handling the (*T).M case: if we have a pointer
+ // receiver after following all the embedded fields,
+ // but it's a value method, add a star operator.
+ arg0 = ir.NewStarExpr(arg0.Pos(), arg0)
+ }
+ args = append(args, arg0)
+ } else {
+ args = append(args, formalParams[i].Nname.(*ir.Name))
+ }
+ }
+
+ // Build call itself.
+ var innerCall ir.Node = ir.NewCallExpr(pos, ir.OCALL, target.Nname, args)
+ if len(formalResults) > 0 {
+ innerCall = ir.NewReturnStmt(pos, []ir.Node{innerCall})
+ }
+ // Finish building body of closure.
+ ir.CurFunc = fn
+ // TODO: set types directly here instead of using typecheck.Stmt
+ typecheck.Stmt(innerCall)
+ ir.CurFunc = nil
+ fn.Body = []ir.Node{innerCall}
+
+ // We're all done with the captured dictionary (and receiver, for method values).
+ ir.FinishCaptureNames(pos, outer, fn)
+
+ // Make a closure referencing our new internal function.
+ c := ir.UseClosure(fn.OClosure, g.target)
+ var init []ir.Node
+ if outer != nil {
+ init = append(init, dictAssign)
+ }
+ if rcvrValue != nil {
+ init = append(init, rcvrAssign)
+ }
+ return ir.InitExpr(init, c)
}
-// instantiateMethods instantiates all the methods of all fully-instantiated
-// generic types that have been added to g.instTypeList.
+// instantiateMethods instantiates all the methods (and associated dictionaries) of
+// all fully-instantiated generic types that have been added to g.instTypeList.
func (g *irgen) instantiateMethods() {
for i := 0; i < len(g.instTypeList); i++ {
typ := g.instTypeList[i]
- // Get the base generic type by looking up the symbol of the
- // generic (uninstantiated) name.
- baseSym := typ.Sym().Pkg.Lookup(genericTypeName(typ.Sym()))
+ assert(!typ.HasShape())
+ // Mark runtime type as needed, since this ensures that the
+ // compiler puts out the needed DWARF symbols, when this
+ // instantiated type has a different package from the local
+ // package.
+ typecheck.NeedRuntimeType(typ)
+ // Lookup the method on the base generic type, since methods may
+ // not be set on imported instantiated types.
+ baseSym := typ.OrigSym
baseType := baseSym.Def.(*ir.Name).Type()
- for j, m := range typ.Methods().Slice() {
- name := m.Nname.(*ir.Name)
- targs := make([]ir.Node, len(typ.RParams()))
- for k, targ := range typ.RParams() {
- targs[k] = ir.TypeNode(targ)
- }
+ for j, _ := range typ.Methods().Slice() {
baseNname := baseType.Methods().Slice()[j].Nname.(*ir.Name)
- name.Func = g.getInstantiation(baseNname, targs, true)
+ // Eagerly generate the instantiations and dictionaries that implement these methods.
+ // We don't use the instantiations here, just generate them (and any
+ // further instantiations those generate, etc.).
+ // Note that we don't set the Func for any methods on instantiated
+ // types. Their signatures don't match so that would be confusing.
+ // Direct method calls go directly to the instantiations, implemented above.
+ // Indirect method calls use wrappers generated in reflectcall. Those wrappers
+ // will use these instantiations if they are needed (for interface tables or reflection).
+ _ = g.getInstantiation(baseNname, typ.RParams(), true)
+ _ = g.getDictionarySym(baseNname, typ.RParams(), true)
}
}
g.instTypeList = nil
}
-// genericSym returns the name of the base generic type for the type named by
-// sym. It simply returns the name obtained by removing everything after the
-// first bracket ("[").
-func genericTypeName(sym *types.Sym) string {
- return sym.Name[0:strings.Index(sym.Name, "[")]
-}
-
-// getInstantiationForNode returns the function/method instantiation for a
-// InstExpr node inst.
-func (g *irgen) getInstantiationForNode(inst *ir.InstExpr) *ir.Func {
+// getInstNameNode returns the name node for the method or function being instantiated, and a bool which is true if a method is being instantiated.
+func (g *irgen) getInstNameNode(inst *ir.InstExpr) (*ir.Name, bool) {
if meth, ok := inst.X.(*ir.SelectorExpr); ok {
- return g.getInstantiation(meth.Selection.Nname.(*ir.Name), inst.Targs, true)
+ return meth.Selection.Nname.(*ir.Name), true
} else {
- return g.getInstantiation(inst.X.(*ir.Name), inst.Targs, false)
+ return inst.X.(*ir.Name), false
}
}
-// getInstantiation gets the instantiantion of the function or method nameNode
-// with the type arguments targs. If the instantiated function is not already
-// cached, then it calls genericSubst to create the new instantiation.
-func (g *irgen) getInstantiation(nameNode *ir.Name, targs []ir.Node, isMeth bool) *ir.Func {
- sym := makeInstName(nameNode.Sym(), targs, isMeth)
- st := g.target.Stencils[sym]
- if st == nil {
- // If instantiation doesn't exist yet, create it and add
- // to the list of decls.
- st = g.genericSubst(sym, nameNode, targs, isMeth)
- g.target.Stencils[sym] = st
- g.target.Decls = append(g.target.Decls, st)
- if base.Flag.W > 1 {
- ir.Dump(fmt.Sprintf("\nstenciled %v", st), st)
+// getDictOrSubdict returns, for a method/function call or reference (node n) in an
+// instantiation (described by instInfo), a node which is accessing a sub-dictionary
+// or main/static dictionary, as needed, and also returns a boolean indicating if a
+// sub-dictionary was accessed. nameNode is the particular function or method being
+// called/referenced, and targs are the type arguments.
+func (g *irgen) getDictOrSubdict(declInfo *instInfo, n ir.Node, nameNode *ir.Name, targs []*types.Type, isMeth bool) (ir.Node, bool) {
+ var dict ir.Node
+ usingSubdict := false
+ if declInfo != nil {
+ // Get the dictionary arg via sub-dictionary reference
+ entry, ok := declInfo.dictEntryMap[n]
+ // If the entry is not found, it may be that this node did not have
+ // any type args that depend on type params, so we need a main
+ // dictionary, not a sub-dictionary.
+ if ok {
+ dict = getDictionaryEntry(n.Pos(), declInfo.dictParam, entry, declInfo.dictLen)
+ usingSubdict = true
}
}
- return st
+ if !usingSubdict {
+ dict = g.getDictionaryValue(nameNode, targs, isMeth)
+ }
+ return dict, usingSubdict
}
-// makeInstName makes the unique name for a stenciled generic function or method,
-// based on the name of the function fy=nsym and the targs. It replaces any
-// existing bracket type list in the name. makeInstName asserts that fnsym has
-// brackets in its name if and only if hasBrackets is true.
-// TODO(danscales): remove the assertions and the hasBrackets argument later.
-//
-// Names of declared generic functions have no brackets originally, so hasBrackets
-// should be false. Names of generic methods already have brackets, since the new
-// type parameter is specified in the generic type of the receiver (e.g. func
-// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set.
-//
-// The standard naming is something like: 'genFn[int,bool]' for functions and
-// '(*genType[int,bool]).methodName' for methods
-func makeInstName(fnsym *types.Sym, targs []ir.Node, hasBrackets bool) *types.Sym {
- b := bytes.NewBufferString("")
- name := fnsym.Name
- i := strings.Index(name, "[")
- assert(hasBrackets == (i >= 0))
- if i >= 0 {
- b.WriteString(name[0:i])
- } else {
- b.WriteString(name)
+// checkFetchBody checks if a generic body can be fetched, but hasn't been loaded
+// yet. If so, it imports the body.
+func checkFetchBody(nameNode *ir.Name) {
+ if nameNode.Func.Body == nil && nameNode.Func.Inl != nil {
+ // If there is no body yet but Func.Inl exists, then we can can
+ // import the whole generic body.
+ assert(nameNode.Func.Inl.Cost == 1 && nameNode.Sym().Pkg != types.LocalPkg)
+ typecheck.ImportBody(nameNode.Func)
+ assert(nameNode.Func.Inl.Body != nil)
+ nameNode.Func.Body = nameNode.Func.Inl.Body
+ nameNode.Func.Dcl = nameNode.Func.Inl.Dcl
}
- b.WriteString("[")
- for i, targ := range targs {
- if i > 0 {
- b.WriteString(",")
+}
+
+// getInstantiation gets the instantiantion and dictionary of the function or method nameNode
+// with the type arguments shapes. If the instantiated function is not already
+// cached, then it calls genericSubst to create the new instantiation.
+func (g *irgen) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMeth bool) *ir.Func {
+ checkFetchBody(nameNode)
+
+ // Convert any non-shape type arguments to their shape, so we can reduce the
+ // number of instantiations we have to generate. You can actually have a mix
+ // of shape and non-shape arguments, because of inferred or explicitly
+ // specified concrete type args.
+ var s1 []*types.Type
+ for i, t := range shapes {
+ if !t.HasShape() {
+ if s1 == nil {
+ s1 = make([]*types.Type, len(shapes))
+ copy(s1[0:i], shapes[0:i])
+ }
+ s1[i] = typecheck.Shapify(t)
+ } else if s1 != nil {
+ s1[i] = shapes[i]
}
- b.WriteString(targ.Type().String())
}
- b.WriteString("]")
- if i >= 0 {
- i2 := strings.Index(name[i:], "]")
- assert(i2 >= 0)
- b.WriteString(name[i+i2+1:])
+ if s1 != nil {
+ shapes = s1
}
- return typecheck.Lookup(b.String())
+
+ sym := typecheck.MakeInstName(nameNode.Sym(), shapes, isMeth)
+ info := g.instInfoMap[sym]
+ if info == nil {
+ // If instantiation doesn't exist yet, create it and add
+ // to the list of decls.
+ gfInfo := g.getGfInfo(nameNode)
+ info = &instInfo{
+ gf: nameNode,
+ gfInfo: gfInfo,
+ startSubDict: len(shapes) + len(gfInfo.derivedTypes),
+ startItabConv: len(shapes) + len(gfInfo.derivedTypes) + len(gfInfo.subDictCalls),
+ dictLen: len(shapes) + len(gfInfo.derivedTypes) + len(gfInfo.subDictCalls) + len(gfInfo.itabConvs),
+ dictEntryMap: make(map[ir.Node]int),
+ }
+ // genericSubst fills in info.dictParam and info.dictEntryMap.
+ st := g.genericSubst(sym, nameNode, shapes, isMeth, info)
+ info.fun = st
+ g.instInfoMap[sym] = info
+ // This ensures that the linker drops duplicates of this instantiation.
+ // All just works!
+ st.SetDupok(true)
+ g.target.Decls = append(g.target.Decls, st)
+ if base.Flag.W > 1 {
+ ir.Dump(fmt.Sprintf("\nstenciled %v", st), st)
+ }
+ }
+ return info.fun
}
// Struct containing info needed for doing the substitution as we create the
@@ -243,32 +617,30 @@ type subster struct {
g *irgen
isMethod bool // If a method is being instantiated
newf *ir.Func // Func node for the new stenciled function
- tparams []*types.Field
- targs []ir.Node
- // The substitution map from name nodes in the generic function to the
- // name nodes in the new stenciled function.
- vars map[*ir.Name]*ir.Name
+ ts typecheck.Tsubster
+ info *instInfo // Place to put extra info in the instantiation
}
// genericSubst returns a new function with name newsym. The function is an
// instantiation of a generic function or method specified by namedNode with type
-// args targs. For a method with a generic receiver, it returns an instantiated
-// function type where the receiver becomes the first parameter. Otherwise the
-// instantiated method would still need to be transformed by later compiler
-// phases.
-func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []ir.Node, isMethod bool) *ir.Func {
- var tparams []*types.Field
+// args shapes. For a method with a generic receiver, it returns an instantiated
+// function type where the receiver becomes the first parameter. For either a generic
+// method or function, a dictionary parameter is the added as the very first
+// parameter. genericSubst fills in info.dictParam and info.dictEntryMap.
+func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, shapes []*types.Type, isMethod bool, info *instInfo) *ir.Func {
+ var tparams []*types.Type
if isMethod {
// Get the type params from the method receiver (after skipping
// over any pointer)
recvType := nameNode.Type().Recv().Type
recvType = deref(recvType)
- tparams = make([]*types.Field, len(recvType.RParams()))
- for i, rparam := range recvType.RParams() {
- tparams[i] = types.NewField(src.NoXPos, nil, rparam)
- }
+ tparams = recvType.RParams()
} else {
- tparams = nameNode.Type().TParams().Fields().Slice()
+ fields := nameNode.Type().TParams().Fields().Slice()
+ tparams = make([]*types.Type, len(fields))
+ for i, f := range fields {
+ tparams[i] = f.Type
+ }
}
gf := nameNode.Func
// Pos of the instantiated function is same as the generic function
@@ -283,78 +655,214 @@ func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []ir.No
// depend on ir.CurFunc being set.
ir.CurFunc = newf
- assert(len(tparams) == len(targs))
+ assert(len(tparams) == len(shapes))
subst := &subster{
g: g,
isMethod: isMethod,
newf: newf,
- tparams: tparams,
- targs: targs,
- vars: make(map[*ir.Name]*ir.Name),
+ info: info,
+ ts: typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: shapes,
+ Vars: make(map[*ir.Name]*ir.Name),
+ },
}
- newf.Dcl = make([]*ir.Name, len(gf.Dcl))
- for i, n := range gf.Dcl {
- newf.Dcl[i] = subst.node(n).(*ir.Name)
+ newf.Dcl = make([]*ir.Name, 0, len(gf.Dcl)+1)
+
+ // Create the needed dictionary param
+ dictionarySym := newsym.Pkg.Lookup(".dict")
+ dictionaryType := types.Types[types.TUINTPTR]
+ dictionaryName := ir.NewNameAt(gf.Pos(), dictionarySym)
+ typed(dictionaryType, dictionaryName)
+ dictionaryName.Class = ir.PPARAM
+ dictionaryName.Curfn = newf
+ newf.Dcl = append(newf.Dcl, dictionaryName)
+ for _, n := range gf.Dcl {
+ if n.Sym().Name == ".dict" {
+ panic("already has dictionary")
+ }
+ newf.Dcl = append(newf.Dcl, subst.localvar(n))
}
+ dictionaryArg := types.NewField(gf.Pos(), dictionarySym, dictionaryType)
+ dictionaryArg.Nname = dictionaryName
+ info.dictParam = dictionaryName
+
+ // We add the dictionary as the first parameter in the function signature.
+ // We also transform a method type to the corresponding function type
+ // (make the receiver be the next parameter after the dictionary).
+ oldt := nameNode.Type()
+ var args []*types.Field
+ args = append(args, dictionaryArg)
+ args = append(args, oldt.Recvs().FieldSlice()...)
+ args = append(args, oldt.Params().FieldSlice()...)
- // Ugly: we have to insert the Name nodes of the parameters/results into
+ // Replace the types in the function signature via subst.fields.
+ // Ugly: also, we have to insert the Name nodes of the parameters/results into
// the function type. The current function type has no Nname fields set,
// because it came via conversion from the types2 type.
- oldt := nameNode.Type()
- // We also transform a generic method type to the corresponding
- // instantiated function type where the receiver is the first parameter.
newt := types.NewSignature(oldt.Pkg(), nil, nil,
- subst.fields(ir.PPARAM, append(oldt.Recvs().FieldSlice(), oldt.Params().FieldSlice()...), newf.Dcl),
+ subst.fields(ir.PPARAM, args, newf.Dcl),
subst.fields(ir.PPARAMOUT, oldt.Results().FieldSlice(), newf.Dcl))
- newf.Nname.SetType(newt)
+ typed(newt, newf.Nname)
ir.MarkFunc(newf.Nname)
newf.SetTypecheck(1)
- newf.Nname.SetTypecheck(1)
// Make sure name/type of newf is set before substituting the body.
newf.Body = subst.list(gf.Body)
+
+ // Add code to check that the dictionary is correct.
+ // TODO: must be adjusted to deal with shapes, but will go away soon when we move
+ // to many->1 shape to concrete mapping.
+ // newf.Body.Prepend(subst.checkDictionary(dictionaryName, shapes)...)
+
ir.CurFunc = savef
+ // Add any new, fully instantiated types seen during the substitution to
+ // g.instTypeList.
+ g.instTypeList = append(g.instTypeList, subst.ts.InstTypeList...)
+
+ if doubleCheck {
+ okConvs := map[ir.Node]bool{}
+ ir.Visit(newf, func(n ir.Node) {
+ if n.Op() == ir.OIDATA {
+ // IDATA(OCONVIFACE(x)) is ok, as we don't use the type of x.
+ // TODO: use some other op besides OCONVIFACE. ONEW might work
+ // (with appropriate direct vs. indirect interface cases).
+ okConvs[n.(*ir.UnaryExpr).X] = true
+ }
+ if n.Op() == ir.OCONVIFACE && !okConvs[n] {
+ c := n.(*ir.ConvExpr)
+ if c.X.Type().HasShape() {
+ ir.Dump("BAD FUNCTION", newf)
+ ir.Dump("BAD CONVERSION", c)
+ base.Fatalf("converting shape type to interface")
+ }
+ }
+ })
+ }
return newf
}
-// node is like DeepCopy(), but creates distinct ONAME nodes, and also descends
-// into closures. It substitutes type arguments for type parameters in all the new
-// nodes.
+// localvar creates a new name node for the specified local variable and enters it
+// in subst.vars. It substitutes type arguments for type parameters in the type of
+// name as needed.
+func (subst *subster) localvar(name *ir.Name) *ir.Name {
+ m := ir.NewNameAt(name.Pos(), name.Sym())
+ if name.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ m.SetType(subst.ts.Typ(name.Type()))
+ m.BuiltinOp = name.BuiltinOp
+ m.Curfn = subst.newf
+ m.Class = name.Class
+ assert(name.Class != ir.PEXTERN && name.Class != ir.PFUNC)
+ m.Func = name.Func
+ subst.ts.Vars[name] = m
+ m.SetTypecheck(1)
+ return m
+}
+
+// checkDictionary returns code that does runtime consistency checks
+// between the dictionary and the types it should contain.
+func (subst *subster) checkDictionary(name *ir.Name, targs []*types.Type) (code []ir.Node) {
+ if false {
+ return // checking turned off
+ }
+ // TODO: when moving to GCshape, this test will become harder. Call into
+ // runtime to check the expected shape is correct?
+ pos := name.Pos()
+ // Convert dictionary to *[N]uintptr
+ d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], name)
+ d.SetTypecheck(1)
+ d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(len(targs))).PtrTo(), d)
+ d.SetTypecheck(1)
+ types.CheckSize(d.Type().Elem())
+
+ // Check that each type entry in the dictionary is correct.
+ for i, t := range targs {
+ if t.HasShape() {
+ // Check the concrete type, not the shape type.
+ base.Fatalf("shape type in dictionary %s %+v\n", name.Sym().Name, t)
+ }
+ want := reflectdata.TypePtr(t)
+ typed(types.Types[types.TUINTPTR], want)
+ deref := ir.NewStarExpr(pos, d)
+ typed(d.Type().Elem(), deref)
+ idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), name) // TODO: what to set orig to?
+ typed(types.Types[types.TUINTPTR], idx)
+ got := ir.NewIndexExpr(pos, deref, idx)
+ typed(types.Types[types.TUINTPTR], got)
+ cond := ir.NewBinaryExpr(pos, ir.ONE, want, got)
+ typed(types.Types[types.TBOOL], cond)
+ panicArg := ir.NewNilExpr(pos)
+ typed(types.NewInterface(types.LocalPkg, nil), panicArg)
+ then := ir.NewUnaryExpr(pos, ir.OPANIC, panicArg)
+ then.SetTypecheck(1)
+ x := ir.NewIfStmt(pos, cond, []ir.Node{then}, nil)
+ x.SetTypecheck(1)
+ code = append(code, x)
+ }
+ return
+}
+
+// getDictionaryEntry gets the i'th entry in the dictionary dict.
+func getDictionaryEntry(pos src.XPos, dict *ir.Name, i int, size int) ir.Node {
+ // Convert dictionary to *[N]uintptr
+ // All entries in the dictionary are pointers. They all point to static data, though, so we
+ // treat them as uintptrs so the GC doesn't need to keep track of them.
+ d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], dict)
+ d.SetTypecheck(1)
+ d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(size)).PtrTo(), d)
+ d.SetTypecheck(1)
+ types.CheckSize(d.Type().Elem())
+
+ // Load entry i out of the dictionary.
+ deref := ir.NewStarExpr(pos, d)
+ typed(d.Type().Elem(), deref)
+ idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), dict) // TODO: what to set orig to?
+ typed(types.Types[types.TUINTPTR], idx)
+ r := ir.NewIndexExpr(pos, deref, idx)
+ typed(types.Types[types.TUINTPTR], r)
+ return r
+}
+
+// getDictionaryType returns a *runtime._type from the dictionary entry i (which
+// refers to a type param or a derived type that uses type params). It uses the
+// specified dictionary dictParam, rather than the one in info.dictParam.
+func getDictionaryType(info *instInfo, dictParam *ir.Name, pos src.XPos, i int) ir.Node {
+ if i < 0 || i >= info.startSubDict {
+ base.Fatalf(fmt.Sprintf("bad dict index %d", i))
+ }
+
+ r := getDictionaryEntry(pos, info.dictParam, i, info.startSubDict)
+ // change type of retrieved dictionary entry to *byte, which is the
+ // standard typing of a *runtime._type in the compiler
+ typed(types.Types[types.TUINT8].PtrTo(), r)
+ return r
+}
+
+// node is like DeepCopy(), but substitutes ONAME nodes based on subst.ts.vars, and
+// also descends into closures. It substitutes type arguments for type parameters
+// in all the new nodes.
func (subst *subster) node(n ir.Node) ir.Node {
// Use closure to capture all state needed by the ir.EditChildren argument.
var edit func(ir.Node) ir.Node
edit = func(x ir.Node) ir.Node {
switch x.Op() {
case ir.OTYPE:
- return ir.TypeNode(subst.typ(x.Type()))
+ return ir.TypeNode(subst.ts.Typ(x.Type()))
case ir.ONAME:
- name := x.(*ir.Name)
- if v := subst.vars[name]; v != nil {
+ if v := subst.ts.Vars[x.(*ir.Name)]; v != nil {
return v
}
- m := ir.NewNameAt(name.Pos(), name.Sym())
- if name.IsClosureVar() {
- m.SetIsClosureVar(true)
- }
- t := x.Type()
- if t == nil {
- assert(name.BuiltinOp != 0)
- } else {
- newt := subst.typ(t)
- m.SetType(newt)
- }
- m.BuiltinOp = name.BuiltinOp
- m.Curfn = subst.newf
- m.Class = name.Class
- m.Func = name.Func
- subst.vars[name] = m
- m.SetTypecheck(1)
- return m
+ return x
+ case ir.ONONAME:
+ // This handles the identifier in a type switch guard
+ fallthrough
case ir.OLITERAL, ir.ONIL:
if x.Sym() != nil {
return x
@@ -369,55 +877,66 @@ func (subst *subster) node(n ir.Node) ir.Node {
// an error.
_, isCallExpr := m.(*ir.CallExpr)
_, isStructKeyExpr := m.(*ir.StructKeyExpr)
- if !isCallExpr && !isStructKeyExpr && x.Op() != ir.OPANIC &&
+ _, isKeyExpr := m.(*ir.KeyExpr)
+ if !isCallExpr && !isStructKeyExpr && !isKeyExpr && x.Op() != ir.OPANIC &&
x.Op() != ir.OCLOSE {
base.Fatalf(fmt.Sprintf("Nil type for %v", x))
}
} else if x.Op() != ir.OCLOSURE {
- m.SetType(subst.typ(x.Type()))
+ m.SetType(subst.ts.Typ(x.Type()))
}
}
- ir.EditChildren(m, edit)
-
- if x.Typecheck() == 3 {
- // These are nodes whose transforms were delayed until
- // their instantiated type was known.
- m.SetTypecheck(1)
- if typecheck.IsCmp(x.Op()) {
- transformCompare(m.(*ir.BinaryExpr))
- } else {
- switch x.Op() {
- case ir.OSLICE, ir.OSLICE3:
- transformSlice(m.(*ir.SliceExpr))
-
- case ir.OADD:
- m = transformAdd(m.(*ir.BinaryExpr))
- case ir.OINDEX:
- transformIndex(m.(*ir.IndexExpr))
+ for i, de := range subst.info.gfInfo.subDictCalls {
+ if de == x {
+ // Remember the dictionary entry associated with this
+ // node in the instantiated function
+ // TODO: make sure this remains correct with respect to the
+ // transformations below.
+ subst.info.dictEntryMap[m] = subst.info.startSubDict + i
+ break
+ }
+ }
- case ir.OAS2:
- as2 := m.(*ir.AssignListStmt)
- transformAssign(as2, as2.Lhs, as2.Rhs)
+ ir.EditChildren(m, edit)
- case ir.OAS:
- as := m.(*ir.AssignStmt)
+ m.SetTypecheck(1)
+ if typecheck.IsCmp(x.Op()) {
+ transformCompare(m.(*ir.BinaryExpr))
+ } else {
+ switch x.Op() {
+ case ir.OSLICE, ir.OSLICE3:
+ transformSlice(m.(*ir.SliceExpr))
+
+ case ir.OADD:
+ m = transformAdd(m.(*ir.BinaryExpr))
+
+ case ir.OINDEX:
+ transformIndex(m.(*ir.IndexExpr))
+
+ case ir.OAS2:
+ as2 := m.(*ir.AssignListStmt)
+ transformAssign(as2, as2.Lhs, as2.Rhs)
+
+ case ir.OAS:
+ as := m.(*ir.AssignStmt)
+ if as.Y != nil {
+ // transformAssign doesn't handle the case
+ // of zeroing assignment of a dcl (rhs[0] is nil).
lhs, rhs := []ir.Node{as.X}, []ir.Node{as.Y}
transformAssign(as, lhs, rhs)
+ }
- case ir.OASOP:
- as := m.(*ir.AssignOpStmt)
- transformCheckAssign(as, as.X)
+ case ir.OASOP:
+ as := m.(*ir.AssignOpStmt)
+ transformCheckAssign(as, as.X)
- case ir.ORETURN:
- transformReturn(m.(*ir.ReturnStmt))
+ case ir.ORETURN:
+ transformReturn(m.(*ir.ReturnStmt))
- case ir.OSEND:
- transformSend(m.(*ir.SendStmt))
+ case ir.OSEND:
+ transformSend(m.(*ir.SendStmt))
- default:
- base.Fatalf("Unexpected node with Typecheck() == 3")
- }
}
}
@@ -445,11 +964,40 @@ func (subst *subster) node(n ir.Node) ir.Node {
// instantiated receiver type. We need to do this now,
// since the access/selection to the method for the real
// type is very different from the selection for the type
- // param. m will be transformed to an OCALLPART node. It
+ // param. m will be transformed to an OMETHVALUE node. It
// will be transformed to an ODOTMETH or ODOTINTER node if
// we find in the OCALL case below that the method value
// is actually called.
- transformDot(m.(*ir.SelectorExpr), false)
+ mse := m.(*ir.SelectorExpr)
+ if src := mse.X.Type(); src.IsShape() {
+ // The only dot on a shape type value are methods.
+ if mse.X.Op() == ir.OTYPE {
+ // Method expression T.M
+ m = subst.g.buildClosure2(subst, m, x)
+ // No need for transformDot - buildClosure2 has already
+ // transformed to OCALLINTER/ODOTINTER.
+ } else {
+ // Implement x.M as a conversion-to-bound-interface
+ // 1) convert x to the bound interface
+ // 2) call M on that interface
+ gsrc := x.(*ir.SelectorExpr).X.Type()
+ bound := gsrc.Bound()
+ dst := bound
+ if dst.HasTParam() {
+ dst = subst.ts.Typ(dst)
+ }
+ if src.IsInterface() {
+ // If type arg is an interface (unusual case),
+ // we do a type assert to the type bound.
+ mse.X = assertToBound(subst.info, subst.info.dictParam, m.Pos(), mse.X, bound, dst)
+ } else {
+ mse.X = convertUsingDictionary(subst.info, subst.info.dictParam, m.Pos(), mse.X, x, dst, gsrc)
+ }
+ transformDot(mse, false)
+ }
+ } else {
+ transformDot(mse, false)
+ }
m.SetTypecheck(1)
case ir.OCALL:
@@ -458,9 +1006,11 @@ func (subst *subster) node(n ir.Node) ir.Node {
case ir.OTYPE:
// Transform the conversion, now that we know the
// type argument.
- m = transformConvCall(m.(*ir.CallExpr))
+ m = transformConvCall(call)
+ // CONVIFACE transformation was already done in node2
+ assert(m.Op() != ir.OCONVIFACE)
- case ir.OCALLPART:
+ case ir.OMETHVALUE, ir.OMETHEXPR:
// Redo the transformation of OXDOT, now that we
// know the method value is being called. Then
// transform the call.
@@ -479,7 +1029,7 @@ func (subst *subster) node(n ir.Node) ir.Node {
name := call.X.Name()
if name.BuiltinOp != ir.OXXX {
switch name.BuiltinOp {
- case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OLEN, ir.OCAP, ir.OAPPEND:
+ case ir.OMAKE, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.ODELETE:
// Transform these builtins now that we
// know the type of the args.
m = transformBuiltin(call)
@@ -506,41 +1056,127 @@ func (subst *subster) node(n ir.Node) ir.Node {
}
case ir.OCLOSURE:
+ // We're going to create a new closure from scratch, so clear m
+ // to avoid using the ir.Copy by accident until we reassign it.
+ m = nil
+
x := x.(*ir.ClosureExpr)
// Need to duplicate x.Func.Nname, x.Func.Dcl, x.Func.ClosureVars, and
// x.Func.Body.
oldfn := x.Func
- newfn := ir.NewFunc(oldfn.Pos())
- if oldfn.ClosureCalled() {
- newfn.SetClosureCalled(true)
- }
- newfn.SetIsHiddenClosure(true)
- m.(*ir.ClosureExpr).Func = newfn
- // Closure name can already have brackets, if it derives
- // from a generic method
- newsym := makeInstName(oldfn.Nname.Sym(), subst.targs, subst.isMethod)
- newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), newsym)
- newfn.Nname.Func = newfn
- newfn.Nname.Defn = newfn
- ir.MarkFunc(newfn.Nname)
- newfn.OClosure = m.(*ir.ClosureExpr)
+ newfn := ir.NewClosureFunc(oldfn.Pos(), subst.newf != nil)
+ ir.NameClosure(newfn.OClosure, subst.newf)
saveNewf := subst.newf
ir.CurFunc = newfn
subst.newf = newfn
newfn.Dcl = subst.namelist(oldfn.Dcl)
- newfn.ClosureVars = subst.namelist(oldfn.ClosureVars)
- typed(subst.typ(oldfn.Nname.Type()), newfn.Nname)
- typed(newfn.Nname.Type(), m)
+ // Make a closure variable for the dictionary of the
+ // containing function.
+ cdict := ir.CaptureName(oldfn.Pos(), newfn, subst.info.dictParam)
+ typed(types.Types[types.TUINTPTR], cdict)
+ ir.FinishCaptureNames(oldfn.Pos(), saveNewf, newfn)
+ newfn.ClosureVars = append(newfn.ClosureVars, subst.namelist(oldfn.ClosureVars)...)
+
+ // Create inst info for the instantiated closure. The dict
+ // param is the closure variable for the dictionary of the
+ // outer function. Since the dictionary is shared, use the
+ // same entries for startSubDict, dictLen, dictEntryMap.
+ cinfo := &instInfo{
+ fun: newfn,
+ dictParam: cdict,
+ gf: subst.info.gf,
+ gfInfo: subst.info.gfInfo,
+ startSubDict: subst.info.startSubDict,
+ startItabConv: subst.info.startItabConv,
+ dictLen: subst.info.dictLen,
+ dictEntryMap: subst.info.dictEntryMap,
+ }
+ subst.g.instInfoMap[newfn.Nname.Sym()] = cinfo
+
+ typed(subst.ts.Typ(oldfn.Nname.Type()), newfn.Nname)
+ typed(newfn.Nname.Type(), newfn.OClosure)
newfn.SetTypecheck(1)
+ outerinfo := subst.info
+ subst.info = cinfo
// Make sure type of closure function is set before doing body.
newfn.Body = subst.list(oldfn.Body)
+ subst.info = outerinfo
subst.newf = saveNewf
ir.CurFunc = saveNewf
- subst.g.target.Decls = append(subst.g.target.Decls, newfn)
+ m = ir.UseClosure(newfn.OClosure, subst.g.target)
+ m.(*ir.ClosureExpr).SetInit(subst.list(x.Init()))
+
+ case ir.OCONVIFACE:
+ x := x.(*ir.ConvExpr)
+ // Note: x's argument is still typed as a type parameter.
+ // m's argument now has an instantiated type.
+ if x.X.Type().HasTParam() {
+ m = convertUsingDictionary(subst.info, subst.info.dictParam, m.Pos(), m.(*ir.ConvExpr).X, x, m.Type(), x.X.Type())
+ }
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ dt := m.(*ir.TypeAssertExpr)
+ var rt ir.Node
+ if dt.Type().IsInterface() || dt.X.Type().IsEmptyInterface() {
+ ix := findDictType(subst.info, x.Type())
+ assert(ix >= 0)
+ rt = getDictionaryType(subst.info, subst.info.dictParam, dt.Pos(), ix)
+ } else {
+ // nonempty interface to noninterface. Need an itab.
+ ix := -1
+ for i, ic := range subst.info.gfInfo.itabConvs {
+ if ic == x {
+ ix = subst.info.startItabConv + i
+ break
+ }
+ }
+ assert(ix >= 0)
+ rt = getDictionaryEntry(dt.Pos(), subst.info.dictParam, ix, subst.info.dictLen)
+ }
+ op := ir.ODYNAMICDOTTYPE
+ if x.Op() == ir.ODOTTYPE2 {
+ op = ir.ODYNAMICDOTTYPE2
+ }
+ m = ir.NewDynamicTypeAssertExpr(dt.Pos(), op, dt.X, rt)
+ m.SetType(dt.Type())
+ m.SetTypecheck(1)
+ case ir.OCASE:
+ if _, ok := x.(*ir.CommClause); ok {
+ // This is not a type switch. TODO: Should we use an OSWITCH case here instead of OCASE?
+ break
+ }
+ x := x.(*ir.CaseClause)
+ m := m.(*ir.CaseClause)
+ for i, c := range x.List {
+ if c.Op() == ir.OTYPE && c.Type().HasTParam() {
+ // Use a *runtime._type for the dynamic type.
+ ix := findDictType(subst.info, c.Type())
+ assert(ix >= 0)
+ dt := ir.NewDynamicType(c.Pos(), getDictionaryEntry(c.Pos(), subst.info.dictParam, ix, subst.info.dictLen))
+
+ // For type switch from nonempty interfaces to non-interfaces, we need an itab as well.
+ if !m.List[i].Type().IsInterface() {
+ if _, ok := subst.info.gfInfo.type2switchType[c]; ok {
+ // Type switch from nonempty interface. We need a *runtime.itab
+ // for the dynamic type.
+ ix := -1
+ for i, ic := range subst.info.gfInfo.itabConvs {
+ if ic == c {
+ ix = subst.info.startItabConv + i
+ break
+ }
+ }
+ assert(ix >= 0)
+ dt.ITab = getDictionaryEntry(c.Pos(), subst.info.dictParam, ix, subst.info.dictLen)
+ }
+ }
+ typed(m.List[i].Type(), dt)
+ m.List[i] = dt
+ }
+ }
}
return m
}
@@ -548,10 +1184,73 @@ func (subst *subster) node(n ir.Node) ir.Node {
return edit(n)
}
+// findDictType looks for type t in the typeparams or derived types in the generic
+// function info.gfInfo. This will indicate the dictionary entry with the
+// correct concrete type for the associated instantiated function.
+func findDictType(info *instInfo, t *types.Type) int {
+ for i, dt := range info.gfInfo.tparams {
+ if dt == t {
+ return i
+ }
+ }
+ for i, dt := range info.gfInfo.derivedTypes {
+ if types.Identical(dt, t) {
+ return i + len(info.gfInfo.tparams)
+ }
+ }
+ return -1
+}
+
+// convertUsingDictionary converts value v from instantiated type src to an interface
+// type dst, by returning a new set of nodes that make use of a dictionary entry. src
+// is the generic (not shape) type, and gn is the original generic node of the
+// CONVIFACE node or XDOT node (for a bound method call) that is causing the
+// conversion.
+func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v ir.Node, gn ir.Node, dst, src *types.Type) ir.Node {
+ assert(src.HasTParam())
+ assert(dst.IsInterface())
+
+ var rt ir.Node
+ if !dst.IsEmptyInterface() {
+ // We should have an itab entry in the dictionary. Using this itab
+ // will be more efficient than converting to an empty interface first
+ // and then type asserting to dst.
+ ix := -1
+ for i, ic := range info.gfInfo.itabConvs {
+ if ic == gn {
+ ix = info.startItabConv + i
+ break
+ }
+ }
+ assert(ix >= 0)
+ rt = getDictionaryEntry(pos, dictParam, ix, info.dictLen)
+ } else {
+ ix := findDictType(info, src)
+ assert(ix >= 0)
+ // Load the actual runtime._type of the type parameter from the dictionary.
+ rt = getDictionaryType(info, dictParam, pos, ix)
+ }
+
+ // Figure out what the data field of the interface will be.
+ var data ir.Node
+ if v.Type().IsInterface() {
+ data = ir.NewUnaryExpr(pos, ir.OIDATA, v)
+ } else {
+ data = ir.NewConvExpr(pos, ir.OCONVIDATA, nil, v)
+ }
+ typed(types.Types[types.TUNSAFEPTR], data)
+
+ // Build an interface from the type and data parts.
+ var i ir.Node = ir.NewBinaryExpr(pos, ir.OEFACE, rt, data)
+ typed(dst, i)
+ return i
+
+}
+
func (subst *subster) namelist(l []*ir.Name) []*ir.Name {
s := make([]*ir.Name, len(l))
for i, n := range l {
- s[i] = subst.node(n).(*ir.Name)
+ s[i] = subst.localvar(n)
if n.Defn != nil {
s[i].Defn = subst.node(n.Defn)
}
@@ -570,348 +1269,701 @@ func (subst *subster) list(l []ir.Node) []ir.Node {
return s
}
-// tstruct substitutes type params in types of the fields of a structure type. For
-// each field, if Nname is set, tstruct also translates the Nname using
-// subst.vars, if Nname is in subst.vars. To always force the creation of a new
-// (top-level) struct, regardless of whether anything changed with the types or
-// names of the struct's fields, set force to true.
-func (subst *subster) tstruct(t *types.Type, force bool) *types.Type {
- if t.NumFields() == 0 {
- if t.HasTParam() {
- // For an empty struct, we need to return a new type,
- // since it may now be fully instantiated (HasTParam
- // becomes false).
- return types.NewStruct(t.Pkg(), nil)
- }
- return t
- }
- var newfields []*types.Field
- if force {
- newfields = make([]*types.Field, t.NumFields())
- }
- for i, f := range t.Fields().Slice() {
- t2 := subst.typ(f.Type)
- if (t2 != f.Type || f.Nname != nil) && newfields == nil {
- newfields = make([]*types.Field, t.NumFields())
- for j := 0; j < i; j++ {
- newfields[j] = t.Field(j)
- }
- }
- if newfields != nil {
- // TODO(danscales): make sure this works for the field
- // names of embedded types (which should keep the name of
- // the type param, not the instantiated type).
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
- if f.Nname != nil {
- // f.Nname may not be in subst.vars[] if this is
- // a function name or a function instantiation type
- // that we are translating
- v := subst.vars[f.Nname.(*ir.Name)]
- // Be careful not to put a nil var into Nname,
- // since Nname is an interface, so it would be a
- // non-nil interface.
- if v != nil {
- newfields[i].Nname = v
- }
- }
+// fields sets the Nname field for the Field nodes inside a type signature, based
+// on the corresponding in/out parameters in dcl. It depends on the in and out
+// parameters being in order in dcl.
+func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir.Name) []*types.Field {
+ // Find the starting index in dcl of declarations of the class (either
+ // PPARAM or PPARAMOUT).
+ var i int
+ for i = range dcl {
+ if dcl[i].Class == class {
+ break
}
}
- if newfields != nil {
- return types.NewStruct(t.Pkg(), newfields)
+
+ // Create newfields nodes that are copies of the oldfields nodes, but
+ // with substitution for any type params, and with Nname set to be the node in
+ // Dcl for the corresponding PPARAM or PPARAMOUT.
+ newfields := make([]*types.Field, len(oldfields))
+ for j := range oldfields {
+ newfields[j] = oldfields[j].Copy()
+ newfields[j].Type = subst.ts.Typ(oldfields[j].Type)
+ // A PPARAM field will be missing from dcl if its name is
+ // unspecified or specified as "_". So, we compare the dcl sym
+ // with the field sym (or sym of the field's Nname node). (Unnamed
+ // results still have a name like ~r2 in their Nname node.) If
+ // they don't match, this dcl (if there is one left) must apply to
+ // a later field.
+ if i < len(dcl) && (dcl[i].Sym() == oldfields[j].Sym ||
+ (oldfields[j].Nname != nil && dcl[i].Sym() == oldfields[j].Nname.Sym())) {
+ newfields[j].Nname = dcl[i]
+ i++
+ }
+ }
+ return newfields
+}
+
+// deref does a single deref of type t, if it is a pointer type.
+func deref(t *types.Type) *types.Type {
+ if t.IsPtr() {
+ return t.Elem()
}
return t
+}
+// markTypeUsed marks type t as used in order to help avoid dead-code elimination of
+// needed methods.
+func markTypeUsed(t *types.Type, lsym *obj.LSym) {
+ if t.IsInterface() {
+ // Mark all the methods of the interface as used.
+ // TODO: we should really only mark the interface methods
+ // that are actually called in the application.
+ for i, _ := range t.AllMethods().Slice() {
+ reflectdata.MarkUsedIfaceMethodIndex(lsym, t, i)
+ }
+ } else {
+ // TODO: This is somewhat overkill, we really only need it
+ // for types that are put into interfaces.
+ reflectdata.MarkTypeUsedInInterface(t, lsym)
+ }
}
-// tinter substitutes type params in types of the methods of an interface type.
-func (subst *subster) tinter(t *types.Type) *types.Type {
- if t.Methods().Len() == 0 {
- return t
+// getDictionarySym returns the dictionary for the named generic function gf, which
+// is instantiated with the type arguments targs.
+func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool) *types.Sym {
+ if len(targs) == 0 {
+ base.Fatalf("%s should have type arguments", gf.Sym().Name)
+ }
+
+ // Enforce that only concrete types can make it to here.
+ for _, t := range targs {
+ if t.HasShape() {
+ panic(fmt.Sprintf("shape %+v in dictionary for %s", t, gf.Sym().Name))
+ }
+ }
+
+ // Get a symbol representing the dictionary.
+ sym := typecheck.MakeDictName(gf.Sym(), targs, isMeth)
+
+ // Initialize the dictionary, if we haven't yet already.
+ lsym := sym.Linksym()
+ if len(lsym.P) > 0 {
+ // We already started creating this dictionary and its lsym.
+ return sym
+ }
+
+ info := g.getGfInfo(gf)
+
+ infoPrint("=== Creating dictionary %v\n", sym.Name)
+ off := 0
+ // Emit an entry for each targ (concrete type or gcshape).
+ for _, t := range targs {
+ infoPrint(" * %v\n", t)
+ s := reflectdata.TypeLinksym(t)
+ off = objw.SymPtr(lsym, off, s, 0)
+ markTypeUsed(t, lsym)
+ }
+ subst := typecheck.Tsubster{
+ Tparams: info.tparams,
+ Targs: targs,
}
- var newfields []*types.Field
- for i, f := range t.Methods().Slice() {
- t2 := subst.typ(f.Type)
- if (t2 != f.Type || f.Nname != nil) && newfields == nil {
- newfields = make([]*types.Field, t.Methods().Len())
- for j := 0; j < i; j++ {
- newfields[j] = t.Methods().Index(j)
+ // Emit an entry for each derived type (after substituting targs)
+ for _, t := range info.derivedTypes {
+ ts := subst.Typ(t)
+ infoPrint(" - %v\n", ts)
+ s := reflectdata.TypeLinksym(ts)
+ off = objw.SymPtr(lsym, off, s, 0)
+ markTypeUsed(ts, lsym)
+ }
+ // Emit an entry for each subdictionary (after substituting targs)
+ for _, n := range info.subDictCalls {
+ var sym *types.Sym
+ switch n.Op() {
+ case ir.OCALL:
+ call := n.(*ir.CallExpr)
+ if call.X.Op() == ir.OXDOT {
+ var nameNode *ir.Name
+ se := call.X.(*ir.SelectorExpr)
+ if types.IsInterfaceMethod(se.Selection.Type) {
+ // This is a method call enabled by a type bound.
+ tmpse := ir.NewSelectorExpr(base.Pos, ir.OXDOT, se.X, se.Sel)
+ tmpse = typecheck.AddImplicitDots(tmpse)
+ tparam := tmpse.X.Type()
+ assert(tparam.IsTypeParam())
+ recvType := targs[tparam.Index()]
+ if recvType.IsInterface() || len(recvType.RParams()) == 0 {
+ // No sub-dictionary entry is
+ // actually needed, since the
+ // type arg is not an
+ // instantiated type that
+ // will have generic methods.
+ break
+ }
+ // This is a method call for an
+ // instantiated type, so we need a
+ // sub-dictionary.
+ targs := recvType.RParams()
+ genRecvType := recvType.OrigSym.Def.Type()
+ nameNode = typecheck.Lookdot1(call.X, se.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
+ sym = g.getDictionarySym(nameNode, targs, true)
+ } else {
+ // This is the case of a normal
+ // method call on a generic type.
+ nameNode = call.X.(*ir.SelectorExpr).Selection.Nname.(*ir.Name)
+ subtargs := deref(call.X.(*ir.SelectorExpr).X.Type()).RParams()
+ s2targs := make([]*types.Type, len(subtargs))
+ for i, t := range subtargs {
+ s2targs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, s2targs, true)
+ }
+ } else {
+ inst := call.X.(*ir.InstExpr)
+ var nameNode *ir.Name
+ var meth *ir.SelectorExpr
+ var isMeth bool
+ if meth, isMeth = inst.X.(*ir.SelectorExpr); isMeth {
+ nameNode = meth.Selection.Nname.(*ir.Name)
+ } else {
+ nameNode = inst.X.(*ir.Name)
+ }
+ subtargs := typecheck.TypesOf(inst.Targs)
+ for i, t := range subtargs {
+ subtargs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, subtargs, isMeth)
}
+
+ case ir.OFUNCINST:
+ inst := n.(*ir.InstExpr)
+ nameNode := inst.X.(*ir.Name)
+ subtargs := typecheck.TypesOf(inst.Targs)
+ for i, t := range subtargs {
+ subtargs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, subtargs, false)
+
+ case ir.OXDOT:
+ selExpr := n.(*ir.SelectorExpr)
+ subtargs := deref(selExpr.X.Type()).RParams()
+ s2targs := make([]*types.Type, len(subtargs))
+ for i, t := range subtargs {
+ s2targs[i] = subst.Typ(t)
+ }
+ nameNode := selExpr.Selection.Nname.(*ir.Name)
+ sym = g.getDictionarySym(nameNode, s2targs, true)
+
+ default:
+ assert(false)
}
- if newfields != nil {
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+
+ if sym == nil {
+ // Unused sub-dictionary entry, just emit 0.
+ off = objw.Uintptr(lsym, off, 0)
+ infoPrint(" - Unused subdict entry\n")
+ } else {
+ off = objw.SymPtr(lsym, off, sym.Linksym(), 0)
+ infoPrint(" - Subdict %v\n", sym.Name)
}
}
- if newfields != nil {
- return types.NewInterface(t.Pkg(), newfields)
+
+ delay := &delayInfo{
+ gf: gf,
+ targs: targs,
+ sym: sym,
+ off: off,
}
- return t
+ g.dictSymsToFinalize = append(g.dictSymsToFinalize, delay)
+ g.instTypeList = append(g.instTypeList, subst.InstTypeList...)
+ return sym
}
-// instTypeName creates a name for an instantiated type, based on the name of the
-// generic type and the type args
-func instTypeName(name string, targs []*types.Type) string {
- b := bytes.NewBufferString(name)
- b.WriteByte('[')
- for i, targ := range targs {
- if i > 0 {
- b.WriteByte(',')
- }
- b.WriteString(targ.String())
- }
- b.WriteByte(']')
- return b.String()
-}
-
-// typ computes the type obtained by substituting any type parameter in t with the
-// corresponding type argument in subst. If t contains no type parameters, the
-// result is t; otherwise the result is a new type. It deals with recursive types
-// by using TFORW types and finding partially or fully created types via sym.Def.
-func (subst *subster) typ(t *types.Type) *types.Type {
- if !t.HasTParam() && t.Kind() != types.TFUNC {
- // Note: function types need to be copied regardless, as the
- // types of closures may contain declarations that need
- // to be copied. See #45738.
- return t
- }
-
- if t.Kind() == types.TTYPEPARAM {
- for i, tp := range subst.tparams {
- if tp.Type == t {
- return subst.targs[i].Type()
- }
- }
- // If t is a simple typeparam T, then t has the name/symbol 'T'
- // and t.Underlying() == t.
- //
- // However, consider the type definition: 'type P[T any] T'. We
- // might use this definition so we can have a variant of type T
- // that we can add new methods to. Suppose t is a reference to
- // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM,
- // because P[T] is defined as T. If we look at t.Underlying(), it
- // is different, because the name of t.Underlying() is 'T' rather
- // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM.
- // In this case, we do the needed recursive substitution in the
- // case statement below.
- if t.Underlying() == t {
- // t is a simple typeparam that didn't match anything in tparam
- return t
- }
- // t is a more complex typeparam (e.g. P[T], as above, whose
- // definition is just T).
- assert(t.Sym() != nil)
- }
-
- var newsym *types.Sym
- var neededTargs []*types.Type
- var forw *types.Type
-
- if t.Sym() != nil {
- // Translate the type params for this type according to
- // the tparam/targs mapping from subst.
- neededTargs = make([]*types.Type, len(t.RParams()))
- for i, rparam := range t.RParams() {
- neededTargs[i] = subst.typ(rparam)
- }
- // For a named (defined) type, we have to change the name of the
- // type as well. We do this first, so we can look up if we've
- // already seen this type during this substitution or other
- // definitions/substitutions.
- genName := genericTypeName(t.Sym())
- newsym = t.Sym().Pkg.Lookup(instTypeName(genName, neededTargs))
- if newsym.Def != nil {
- // We've already created this instantiated defined type.
- return newsym.Def.Type()
- }
-
- // In order to deal with recursive generic types, create a TFORW
- // type initially and set the Def field of its sym, so it can be
- // found if this type appears recursively within the type.
- forw = newIncompleteNamedType(t.Pos(), newsym)
- //println("Creating new type by sub", newsym.Name, forw.HasTParam())
- forw.SetRParams(neededTargs)
- }
-
- var newt *types.Type
+// finalizeSyms finishes up all dictionaries on g.dictSymsToFinalize, by writing out
+// any needed LSyms for itabs. The itab lsyms create wrappers which need various
+// dictionaries and method instantiations to be complete, so, to avoid recursive
+// dependencies, we finalize the itab lsyms only after all dictionaries syms and
+// instantiations have been created.
+func (g *irgen) finalizeSyms() {
+ for _, d := range g.dictSymsToFinalize {
+ infoPrint("=== Finalizing dictionary %s\n", d.sym.Name)
+
+ lsym := d.sym.Linksym()
+ info := g.getGfInfo(d.gf)
+
+ subst := typecheck.Tsubster{
+ Tparams: info.tparams,
+ Targs: d.targs,
+ }
- switch t.Kind() {
- case types.TTYPEPARAM:
- if t.Sym() == newsym {
- // The substitution did not change the type.
- return t
+ // Emit an entry for each itab
+ for _, n := range info.itabConvs {
+ var srctype, dsttype *types.Type
+ switch n.Op() {
+ case ir.OXDOT:
+ se := n.(*ir.SelectorExpr)
+ srctype = subst.Typ(se.X.Type())
+ dsttype = subst.Typ(se.X.Type().Bound())
+ found := false
+ for i, m := range dsttype.AllMethods().Slice() {
+ if se.Sel == m.Sym {
+ // Mark that this method se.Sel is
+ // used for the dsttype interface, so
+ // it won't get deadcoded.
+ reflectdata.MarkUsedIfaceMethodIndex(lsym, dsttype, i)
+ found = true
+ break
+ }
+ }
+ assert(found)
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ srctype = subst.Typ(n.(*ir.TypeAssertExpr).Type())
+ dsttype = subst.Typ(n.(*ir.TypeAssertExpr).X.Type())
+ case ir.OCONVIFACE:
+ srctype = subst.Typ(n.(*ir.ConvExpr).X.Type())
+ dsttype = subst.Typ(n.Type())
+ case ir.OTYPE:
+ srctype = subst.Typ(n.Type())
+ dsttype = subst.Typ(info.type2switchType[n])
+ default:
+ base.Fatalf("itab entry with unknown op %s", n.Op())
+ }
+ if srctype.IsInterface() {
+ // No itab is wanted if src type is an interface. We
+ // will use a type assert instead.
+ d.off = objw.Uintptr(lsym, d.off, 0)
+ infoPrint(" + Unused itab entry for %v\n", srctype)
+ } else {
+ itabLsym := reflectdata.ITabLsym(srctype, dsttype)
+ d.off = objw.SymPtr(lsym, d.off, itabLsym, 0)
+ infoPrint(" + Itab for (%v,%v)\n", srctype, dsttype)
+ }
}
- // Substitute the underlying typeparam (e.g. T in P[T], see
- // the example describing type P[T] above).
- newt = subst.typ(t.Underlying())
- assert(newt != t)
- case types.TARRAY:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewArray(newelem, t.NumElem())
+ objw.Global(lsym, int32(d.off), obj.DUPOK|obj.RODATA)
+ infoPrint("=== Finalized dictionary %s\n", d.sym.Name)
+
+ g.instTypeList = append(g.instTypeList, subst.InstTypeList...)
+ }
+ g.dictSymsToFinalize = nil
+}
+
+func (g *irgen) getDictionaryValue(gf *ir.Name, targs []*types.Type, isMeth bool) ir.Node {
+ sym := g.getDictionarySym(gf, targs, isMeth)
+
+ // Make a node referencing the dictionary symbol.
+ n := typecheck.NewName(sym)
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+
+ // Return the address of the dictionary.
+ np := typecheck.NodAddr(n)
+ // Note: treat dictionary pointers as uintptrs, so they aren't pointers
+ // with respect to GC. That saves on stack scanning work, write barriers, etc.
+ // We can get away with it because dictionaries are global variables.
+ // TODO: use a cast, or is typing directly ok?
+ np.SetType(types.Types[types.TUINTPTR])
+ np.SetTypecheck(1)
+ return np
+}
+
+// hasTParamNodes returns true if the type of any node in targs has a typeparam.
+func hasTParamNodes(targs []ir.Node) bool {
+ for _, n := range targs {
+ if n.Type().HasTParam() {
+ return true
}
+ }
+ return false
+}
+
+// hasTParamNodes returns true if any type in targs has a typeparam.
+func hasTParamTypes(targs []*types.Type) bool {
+ for _, t := range targs {
+ if t.HasTParam() {
+ return true
+ }
+ }
+ return false
+}
+
+// getGfInfo get information for a generic function - type params, derived generic
+// types, and subdictionaries.
+func (g *irgen) getGfInfo(gn *ir.Name) *gfInfo {
+ infop := g.gfInfoMap[gn.Sym()]
+ if infop != nil {
+ return infop
+ }
- case types.TPTR:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewPtr(newelem)
+ checkFetchBody(gn)
+ var info gfInfo
+ gf := gn.Func
+ recv := gf.Type().Recv()
+ if recv != nil {
+ info.tparams = deref(recv.Type).RParams()
+ } else {
+ tparams := gn.Type().TParams().FieldSlice()
+ info.tparams = make([]*types.Type, len(tparams))
+ for i, f := range tparams {
+ info.tparams[i] = f.Type
}
+ }
- case types.TSLICE:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewSlice(newelem)
+ for _, t := range info.tparams {
+ b := t.Bound()
+ if b.HasTParam() {
+ // If a type bound is parameterized (unusual case), then we
+ // may need its derived type to do a type assert when doing a
+ // bound call for a type arg that is an interface.
+ addType(&info, nil, b)
}
+ }
- case types.TSTRUCT:
- newt = subst.tstruct(t, false)
- if newt == t {
- newt = nil
+ for _, n := range gf.Dcl {
+ addType(&info, n, n.Type())
+ }
+
+ if infoPrintMode {
+ fmt.Printf(">>> GfInfo for %v\n", gn)
+ for _, t := range info.tparams {
+ fmt.Printf(" Typeparam %v\n", t)
}
+ }
- case types.TFUNC:
- newrecvs := subst.tstruct(t.Recvs(), false)
- newparams := subst.tstruct(t.Params(), false)
- newresults := subst.tstruct(t.Results(), false)
- if newrecvs != t.Recvs() || newparams != t.Params() || newresults != t.Results() {
- // If any types have changed, then the all the fields of
- // of recv, params, and results must be copied, because they have
- // offset fields that are dependent, and so must have an
- // independent copy for each new signature.
- var newrecv *types.Field
- if newrecvs.NumFields() > 0 {
- if newrecvs == t.Recvs() {
- newrecvs = subst.tstruct(t.Recvs(), true)
+ var visitFunc func(ir.Node)
+ visitFunc = func(n ir.Node) {
+ if n.Op() == ir.OFUNCINST && !n.(*ir.InstExpr).Implicit() {
+ if hasTParamNodes(n.(*ir.InstExpr).Targs) {
+ infoPrint(" Closure&subdictionary required at generic function value %v\n", n.(*ir.InstExpr).X)
+ info.subDictCalls = append(info.subDictCalls, n)
+ }
+ } else if n.Op() == ir.OXDOT && !n.(*ir.SelectorExpr).Implicit() &&
+ n.(*ir.SelectorExpr).Selection != nil &&
+ len(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) > 0 {
+ if hasTParamTypes(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) {
+ if n.(*ir.SelectorExpr).X.Op() == ir.OTYPE {
+ infoPrint(" Closure&subdictionary required at generic meth expr %v\n", n)
+ } else {
+ infoPrint(" Closure&subdictionary required at generic meth value %v\n", n)
}
- newrecv = newrecvs.Field(0)
+ info.subDictCalls = append(info.subDictCalls, n)
}
- if newparams == t.Params() {
- newparams = subst.tstruct(t.Params(), true)
+ }
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OFUNCINST {
+ n.(*ir.CallExpr).X.(*ir.InstExpr).SetImplicit(true)
+ if hasTParamNodes(n.(*ir.CallExpr).X.(*ir.InstExpr).Targs) {
+ infoPrint(" Subdictionary at generic function/method call: %v - %v\n", n.(*ir.CallExpr).X.(*ir.InstExpr).X, n)
+ info.subDictCalls = append(info.subDictCalls, n)
}
- if newresults == t.Results() {
- newresults = subst.tstruct(t.Results(), true)
+ }
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OXDOT &&
+ n.(*ir.CallExpr).X.(*ir.SelectorExpr).Selection != nil &&
+ len(deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).RParams()) > 0 {
+ n.(*ir.CallExpr).X.(*ir.SelectorExpr).SetImplicit(true)
+ if hasTParamTypes(deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).RParams()) {
+ infoPrint(" Subdictionary at generic method call: %v\n", n)
+ info.subDictCalls = append(info.subDictCalls, n)
}
- newt = types.NewSignature(t.Pkg(), newrecv, t.TParams().FieldSlice(), newparams.FieldSlice(), newresults.FieldSlice())
}
-
- case types.TINTER:
- newt = subst.tinter(t)
- if newt == t {
- newt = nil
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OXDOT &&
+ n.(*ir.CallExpr).X.(*ir.SelectorExpr).Selection != nil &&
+ deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).IsTypeParam() {
+ n.(*ir.CallExpr).X.(*ir.SelectorExpr).SetImplicit(true)
+ infoPrint(" Optional subdictionary at generic bound call: %v\n", n)
+ info.subDictCalls = append(info.subDictCalls, n)
}
-
- case types.TMAP:
- newkey := subst.typ(t.Key())
- newval := subst.typ(t.Elem())
- if newkey != t.Key() || newval != t.Elem() {
- newt = types.NewMap(newkey, newval)
- }
-
- case types.TCHAN:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewChan(newelem, t.ChanDir())
- if !newt.HasTParam() {
- // TODO(danscales): not sure why I have to do this
- // only for channels.....
- types.CheckSize(newt)
- }
- }
- }
- if newt == nil {
- // Even though there were typeparams in the type, there may be no
- // change if this is a function type for a function call (which will
- // have its own tparams/targs in the function instantiation).
- return t
- }
-
- if t.Sym() == nil {
- // Not a named type, so there was no forwarding type and there are
- // no methods to substitute.
- assert(t.Methods().Len() == 0)
- return newt
- }
-
- forw.SetUnderlying(newt)
- newt = forw
-
- if t.Kind() != types.TINTER && t.Methods().Len() > 0 {
- // Fill in the method info for the new type.
- var newfields []*types.Field
- newfields = make([]*types.Field, t.Methods().Len())
- for i, f := range t.Methods().Slice() {
- t2 := subst.typ(f.Type)
- oldsym := f.Nname.Sym()
- newsym := makeInstName(oldsym, subst.targs, true)
- var nname *ir.Name
- if newsym.Def != nil {
- nname = newsym.Def.(*ir.Name)
- } else {
- nname = ir.NewNameAt(f.Pos, newsym)
- nname.SetType(t2)
- newsym.Def = nname
+ if n.Op() == ir.OCONVIFACE && n.Type().IsInterface() &&
+ !n.Type().IsEmptyInterface() &&
+ n.(*ir.ConvExpr).X.Type().HasTParam() {
+ infoPrint(" Itab for interface conv: %v\n", n)
+ info.itabConvs = append(info.itabConvs, n)
+ }
+ if n.Op() == ir.OXDOT && n.(*ir.SelectorExpr).X.Type().IsTypeParam() {
+ infoPrint(" Itab for bound call: %v\n", n)
+ info.itabConvs = append(info.itabConvs, n)
+ }
+ if (n.Op() == ir.ODOTTYPE || n.Op() == ir.ODOTTYPE2) && !n.(*ir.TypeAssertExpr).Type().IsInterface() && !n.(*ir.TypeAssertExpr).X.Type().IsEmptyInterface() {
+ infoPrint(" Itab for dot type: %v\n", n)
+ info.itabConvs = append(info.itabConvs, n)
+ }
+ if n.Op() == ir.OCLOSURE {
+ // Visit the closure body and add all relevant entries to the
+ // dictionary of the outer function (closure will just use
+ // the dictionary of the outer function).
+ for _, n1 := range n.(*ir.ClosureExpr).Func.Body {
+ ir.Visit(n1, visitFunc)
}
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
- newfields[i].Nname = nname
}
- newt.Methods().Set(newfields)
- if !newt.HasTParam() {
- // Generate all the methods for a new fully-instantiated type.
- subst.g.instTypeList = append(subst.g.instTypeList, newt)
+ if n.Op() == ir.OSWITCH && n.(*ir.SwitchStmt).Tag != nil && n.(*ir.SwitchStmt).Tag.Op() == ir.OTYPESW && !n.(*ir.SwitchStmt).Tag.(*ir.TypeSwitchGuard).X.Type().IsEmptyInterface() {
+ for _, cc := range n.(*ir.SwitchStmt).Cases {
+ for _, c := range cc.List {
+ if c.Op() == ir.OTYPE && c.Type().HasTParam() {
+ // Type switch from a non-empty interface - might need an itab.
+ infoPrint(" Itab for type switch: %v\n", c)
+ info.itabConvs = append(info.itabConvs, c)
+ if info.type2switchType == nil {
+ info.type2switchType = map[ir.Node]*types.Type{}
+ }
+ info.type2switchType[c] = n.(*ir.SwitchStmt).Tag.(*ir.TypeSwitchGuard).X.Type()
+ }
+ }
+ }
+ }
+ addType(&info, n, n.Type())
+ }
+
+ for _, stmt := range gf.Body {
+ ir.Visit(stmt, visitFunc)
+ }
+ if infoPrintMode {
+ for _, t := range info.derivedTypes {
+ fmt.Printf(" Derived type %v\n", t)
}
+ fmt.Printf(">>> Done Gfinfo\n")
}
- return newt
+ g.gfInfoMap[gn.Sym()] = &info
+ return &info
}
-// fields sets the Nname field for the Field nodes inside a type signature, based
-// on the corresponding in/out parameters in dcl. It depends on the in and out
-// parameters being in order in dcl.
-func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir.Name) []*types.Field {
- // Find the starting index in dcl of declarations of the class (either
- // PPARAM or PPARAMOUT).
- var i int
- for i = range dcl {
- if dcl[i].Class == class {
- break
+// addType adds t to info.derivedTypes if it is parameterized type (which is not
+// just a simple type param) that is different from any existing type on
+// info.derivedTypes.
+func addType(info *gfInfo, n ir.Node, t *types.Type) {
+ if t == nil || !t.HasTParam() {
+ return
+ }
+ if t.IsTypeParam() && t.Underlying() == t {
+ return
+ }
+ if t.Kind() == types.TFUNC && n != nil &&
+ (t.Recv() != nil ||
+ n.Op() == ir.ONAME && n.Name().Class == ir.PFUNC) {
+ // Don't use the type of a named generic function or method,
+ // since that is parameterized by other typeparams.
+ // (They all come from arguments of a FUNCINST node.)
+ return
+ }
+ if doubleCheck && !parameterizedBy(t, info.tparams) {
+ base.Fatalf("adding type with invalid parameters %+v", t)
+ }
+ if t.Kind() == types.TSTRUCT && t.IsFuncArgStruct() {
+ // Multiple return values are not a relevant new type (?).
+ return
+ }
+ // Ignore a derived type we've already added.
+ for _, et := range info.derivedTypes {
+ if types.Identical(t, et) {
+ return
}
}
+ info.derivedTypes = append(info.derivedTypes, t)
+}
- // Create newfields nodes that are copies of the oldfields nodes, but
- // with substitution for any type params, and with Nname set to be the node in
- // Dcl for the corresponding PPARAM or PPARAMOUT.
- newfields := make([]*types.Field, len(oldfields))
- for j := range oldfields {
- newfields[j] = oldfields[j].Copy()
- newfields[j].Type = subst.typ(oldfields[j].Type)
- // A param field will be missing from dcl if its name is
- // unspecified or specified as "_". So, we compare the dcl sym
- // with the field sym. If they don't match, this dcl (if there is
- // one left) must apply to a later field.
- if i < len(dcl) && dcl[i].Sym() == oldfields[j].Sym {
- newfields[j].Nname = dcl[i]
- i++
+// parameterizedBy returns true if t is parameterized by (at most) params.
+func parameterizedBy(t *types.Type, params []*types.Type) bool {
+ return parameterizedBy1(t, params, map[*types.Type]bool{})
+}
+func parameterizedBy1(t *types.Type, params []*types.Type, visited map[*types.Type]bool) bool {
+ if visited[t] {
+ return true
+ }
+ visited[t] = true
+
+ if t.Sym() != nil && len(t.RParams()) > 0 {
+ // This defined type is instantiated. Check the instantiating types.
+ for _, r := range t.RParams() {
+ if !parameterizedBy1(r, params, visited) {
+ return false
+ }
}
+ return true
+ }
+ switch t.Kind() {
+ case types.TTYPEPARAM:
+ // Check if t is one of the allowed parameters in scope.
+ for _, p := range params {
+ if p == t {
+ return true
+ }
+ }
+ // Couldn't find t in the list of allowed parameters.
+ return false
+
+ case types.TARRAY, types.TPTR, types.TSLICE, types.TCHAN:
+ return parameterizedBy1(t.Elem(), params, visited)
+
+ case types.TMAP:
+ return parameterizedBy1(t.Key(), params, visited) && parameterizedBy1(t.Elem(), params, visited)
+
+ case types.TFUNC:
+ return parameterizedBy1(t.TParams(), params, visited) && parameterizedBy1(t.Recvs(), params, visited) && parameterizedBy1(t.Params(), params, visited) && parameterizedBy1(t.Results(), params, visited)
+
+ case types.TSTRUCT:
+ for _, f := range t.Fields().Slice() {
+ if !parameterizedBy1(f.Type, params, visited) {
+ return false
+ }
+ }
+ return true
+
+ case types.TINTER:
+ for _, f := range t.Methods().Slice() {
+ if !parameterizedBy1(f.Type, params, visited) {
+ return false
+ }
+ }
+ return true
+
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+ types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64,
+ types.TUINTPTR, types.TBOOL, types.TSTRING, types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
+ return true
+
+ case types.TUNION:
+ for i := 0; i < t.NumTerms(); i++ {
+ tt, _ := t.Term(i)
+ if !parameterizedBy1(tt, params, visited) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ base.Fatalf("bad type kind %+v", t)
+ return true
}
- return newfields
}
-// defer does a single defer of type t, if it is a pointer type.
-func deref(t *types.Type) *types.Type {
- if t.IsPtr() {
- return t.Elem()
+// startClosures starts creation of a closure that has the function type typ. It
+// creates all the formal params and results according to the type typ. On return,
+// the body and closure variables of the closure must still be filled in, and
+// ir.UseClosure() called.
+func startClosure(pos src.XPos, outer *ir.Func, typ *types.Type) (*ir.Func, []*types.Field, []*types.Field) {
+ // Make a new internal function.
+ fn := ir.NewClosureFunc(pos, outer != nil)
+ ir.NameClosure(fn.OClosure, outer)
+
+ // Build formal argument and return lists.
+ var formalParams []*types.Field // arguments of closure
+ var formalResults []*types.Field // returns of closure
+ for i := 0; i < typ.NumParams(); i++ {
+ t := typ.Params().Field(i).Type
+ arg := ir.NewNameAt(pos, typecheck.LookupNum("a", i))
+ arg.Class = ir.PPARAM
+ typed(t, arg)
+ arg.Curfn = fn
+ fn.Dcl = append(fn.Dcl, arg)
+ f := types.NewField(pos, arg.Sym(), t)
+ f.Nname = arg
+ formalParams = append(formalParams, f)
}
- return t
+ for i := 0; i < typ.NumResults(); i++ {
+ t := typ.Results().Field(i).Type
+ result := ir.NewNameAt(pos, typecheck.LookupNum("r", i)) // TODO: names not needed?
+ result.Class = ir.PPARAMOUT
+ typed(t, result)
+ result.Curfn = fn
+ fn.Dcl = append(fn.Dcl, result)
+ f := types.NewField(pos, result.Sym(), t)
+ f.Nname = result
+ formalResults = append(formalResults, f)
+ }
+
+ // Build an internal function with the right signature.
+ closureType := types.NewSignature(typ.Pkg(), nil, nil, formalParams, formalResults)
+ typed(closureType, fn.Nname)
+ typed(typ, fn.OClosure)
+ fn.SetTypecheck(1)
+ return fn, formalParams, formalResults
+
+}
+
+// assertToBound returns a new node that converts a node rcvr with interface type to
+// the 'dst' interface type. bound is the unsubstituted form of dst.
+func assertToBound(info *instInfo, dictVar *ir.Name, pos src.XPos, rcvr ir.Node, bound, dst *types.Type) ir.Node {
+ if bound.HasTParam() {
+ ix := findDictType(info, bound)
+ assert(ix >= 0)
+ rt := getDictionaryType(info, dictVar, pos, ix)
+ rcvr = ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, rcvr, rt)
+ typed(dst, rcvr)
+ } else {
+ rcvr = ir.NewTypeAssertExpr(pos, rcvr, nil)
+ typed(bound, rcvr)
+ }
+ return rcvr
}
-// newIncompleteNamedType returns a TFORW type t with name specified by sym, such
-// that t.nod and sym.Def are set correctly.
-func newIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type {
- name := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
- forw := types.NewNamed(name)
- name.SetType(forw)
- sym.Def = name
- return forw
+// buildClosure2 makes a closure to implement a method expression m (generic form x)
+// which has a shape type as receiver. If the receiver is exactly a shape (i.e. from
+// a typeparam), then the body of the closure converts m.X (the receiver) to the
+// interface bound type, and makes an interface call with the remaining arguments.
+//
+// The returned closure is fully substituted and has already had any needed
+// transformations done.
+func (g *irgen) buildClosure2(subst *subster, m, x ir.Node) ir.Node {
+ outer := subst.newf
+ info := subst.info
+ pos := m.Pos()
+ typ := m.Type() // type of the closure
+
+ fn, formalParams, formalResults := startClosure(pos, outer, typ)
+
+ // Capture dictionary calculated in the outer function
+ dictVar := ir.CaptureName(pos, fn, info.dictParam)
+ typed(types.Types[types.TUINTPTR], dictVar)
+
+ // Build arguments to call inside the closure.
+ var args []ir.Node
+ for i := 0; i < typ.NumParams(); i++ {
+ args = append(args, formalParams[i].Nname.(*ir.Name))
+ }
+
+ // Build call itself. This involves converting the first argument to the
+ // bound type (an interface) using the dictionary, and then making an
+ // interface call with the remaining arguments.
+ var innerCall ir.Node
+ rcvr := args[0]
+ args = args[1:]
+ assert(m.(*ir.SelectorExpr).X.Type().IsShape())
+ gsrc := x.(*ir.SelectorExpr).X.Type()
+ bound := gsrc.Bound()
+ dst := bound
+ if dst.HasTParam() {
+ dst = subst.ts.Typ(bound)
+ }
+ if m.(*ir.SelectorExpr).X.Type().IsInterface() {
+ // If type arg is an interface (unusual case), we do a type assert to
+ // the type bound.
+ rcvr = assertToBound(info, dictVar, pos, rcvr, bound, dst)
+ } else {
+ rcvr = convertUsingDictionary(info, dictVar, pos, rcvr, x, dst, gsrc)
+ }
+ dot := ir.NewSelectorExpr(pos, ir.ODOTINTER, rcvr, x.(*ir.SelectorExpr).Sel)
+ dot.Selection = typecheck.Lookdot1(dot, dot.Sel, dot.X.Type(), dot.X.Type().AllMethods(), 1)
+
+ // Do a type substitution on the generic bound, in case it is parameterized.
+ typed(subst.ts.Typ(x.(*ir.SelectorExpr).Selection.Type), dot)
+ innerCall = ir.NewCallExpr(pos, ir.OCALLINTER, dot, args)
+ t := m.Type()
+ if t.NumResults() == 0 {
+ innerCall.SetTypecheck(1)
+ } else if t.NumResults() == 1 {
+ typed(t.Results().Field(0).Type, innerCall)
+ } else {
+ typed(t.Results(), innerCall)
+ }
+ if len(formalResults) > 0 {
+ innerCall = ir.NewReturnStmt(pos, []ir.Node{innerCall})
+ innerCall.SetTypecheck(1)
+ }
+ fn.Body = []ir.Node{innerCall}
+
+ // We're all done with the captured dictionary
+ ir.FinishCaptureNames(pos, outer, fn)
+
+ // Do final checks on closure and return it.
+ return ir.UseClosure(fn.OClosure, g.target)
}
diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go
index 32a1483b4a..1949f56095 100644
--- a/src/cmd/compile/internal/noder/stmt.go
+++ b/src/cmd/compile/internal/noder/stmt.go
@@ -35,11 +35,7 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
case *syntax.BlockStmt:
return ir.NewBlockStmt(g.pos(stmt), g.blockStmt(stmt))
case *syntax.ExprStmt:
- x := g.expr(stmt.X)
- if call, ok := x.(*ir.CallExpr); ok {
- call.Use = ir.CallUseStmt
- }
- return x
+ return g.expr(stmt.X)
case *syntax.SendStmt:
n := ir.NewSendStmt(g.pos(stmt), g.expr(stmt.Chan), g.expr(stmt.Value))
if n.Chan.Type().HasTParam() || n.Value.Type().HasTParam() {
@@ -61,7 +57,10 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
if stmt.Rhs == nil {
n = IncDec(g.pos(stmt), op, g.expr(stmt.Lhs))
} else {
- n = ir.NewAssignOpStmt(g.pos(stmt), op, g.expr(stmt.Lhs), g.expr(stmt.Rhs))
+ // Eval rhs before lhs, for compatibility with noder1
+ rhs := g.expr(stmt.Rhs)
+ lhs := g.expr(stmt.Lhs)
+ n = ir.NewAssignOpStmt(g.pos(stmt), op, lhs, rhs)
}
if n.X.Typecheck() == 3 {
n.SetTypecheck(3)
@@ -72,8 +71,9 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
return n
}
- names, lhs := g.assignList(stmt.Lhs, stmt.Op == syntax.Def)
+ // Eval rhs before lhs, for compatibility with noder1
rhs := g.exprList(stmt.Rhs)
+ names, lhs := g.assignList(stmt.Lhs, stmt.Op == syntax.Def)
// We must delay transforming the assign statement if any of the
// lhs or rhs nodes are also delayed, since transformAssign needs
@@ -128,6 +128,12 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
if e.Type().HasTParam() {
// Delay transforming the return statement if any of the
// return values have a type param.
+ if !ir.HasNamedResults(ir.CurFunc) {
+ transformArgs(n)
+ // But add CONVIFACE nodes where needed if
+ // any of the return values have interface type.
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, true)
+ }
n.SetTypecheck(3)
return n
}
@@ -266,6 +272,12 @@ func (g *irgen) forStmt(stmt *syntax.ForStmt) ir.Node {
key, value := unpackTwo(lhs)
n := ir.NewRangeStmt(g.pos(r), key, value, g.expr(r.X), g.blockStmt(stmt.Body))
n.Def = initDefn(n, names)
+ if key != nil {
+ transformCheckAssign(n, key)
+ }
+ if value != nil {
+ transformCheckAssign(n, value)
+ }
return n
}
diff --git a/src/cmd/compile/internal/noder/sync.go b/src/cmd/compile/internal/noder/sync.go
new file mode 100644
index 0000000000..7af558f8b2
--- /dev/null
+++ b/src/cmd/compile/internal/noder/sync.go
@@ -0,0 +1,187 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "strings"
+)
+
+// enableSync controls whether sync markers are written into unified
+// IR's export data format and also whether they're expected when
+// reading them back in. They're inessential to the correct
+// functioning of unified IR, but are helpful during development to
+// detect mistakes.
+//
+// When sync is enabled, writer stack frames will also be included in
+// the export data. Currently, a fixed number of frames are included,
+// controlled by -d=syncframes (default 0).
+const enableSync = true
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// syncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type syncMarker int
+
+//go:generate stringer -type=syncMarker -trimprefix=sync
+
+// TODO(mdempsky): Cleanup unneeded sync markers.
+
+// TODO(mdempsky): Split these markers into public/stable markers, and
+// private ones. Also, trim unused ones.
+const (
+ _ syncMarker = iota
+ syncNode
+ syncBool
+ syncInt64
+ syncUint64
+ syncString
+ syncPos
+ syncPkg
+ syncSym
+ syncSelector
+ syncKind
+ syncType
+ syncTypePkg
+ syncSignature
+ syncParam
+ syncOp
+ syncObject
+ syncExpr
+ syncStmt
+ syncDecl
+ syncConstDecl
+ syncFuncDecl
+ syncTypeDecl
+ syncVarDecl
+ syncPragma
+ syncValue
+ syncEOF
+ syncMethod
+ syncFuncBody
+ syncUse
+ syncUseObj
+ syncObjectIdx
+ syncTypeIdx
+ syncBOF
+ syncEntry
+ syncOpenScope
+ syncCloseScope
+ syncGlobal
+ syncLocal
+ syncDefine
+ syncDefLocal
+ syncUseLocal
+ syncDefGlobal
+ syncUseGlobal
+ syncTypeParams
+ syncUseLabel
+ syncDefLabel
+ syncFuncLit
+ syncCommonFunc
+ syncBodyRef
+ syncLinksymExt
+ syncHack
+ syncSetlineno
+ syncName
+ syncImportDecl
+ syncDeclNames
+ syncDeclName
+ syncExprList
+ syncExprs
+ syncWrapname
+ syncTypeExpr
+ syncTypeExprOrNil
+ syncChanDir
+ syncParams
+ syncCloseAnotherScope
+ syncSum
+ syncUnOp
+ syncBinOp
+ syncStructType
+ syncInterfaceType
+ syncPackname
+ syncEmbedded
+ syncStmts
+ syncStmtsFall
+ syncStmtFall
+ syncBlockStmt
+ syncIfStmt
+ syncForStmt
+ syncSwitchStmt
+ syncRangeStmt
+ syncCaseClause
+ syncCommClause
+ syncSelectStmt
+ syncDecls
+ syncLabeledStmt
+ syncCompLit
+
+ sync1
+ sync2
+ sync3
+ sync4
+
+ syncN
+ syncDefImplicit
+ syncUseName
+ syncUseObjLocal
+ syncAddLocal
+ syncBothSignature
+ syncSetUnderlying
+ syncLinkname
+ syncStmt1
+ syncStmtsEnd
+ syncDeclare
+ syncTopDecls
+ syncTopConstDecl
+ syncTopFuncDecl
+ syncTopTypeDecl
+ syncTopVarDecl
+ syncObject1
+ syncAddBody
+ syncLabel
+ syncFuncExt
+ syncMethExt
+ syncOptLabel
+ syncScalar
+ syncStmtDecls
+ syncDeclLocal
+ syncObjLocal
+ syncObjLocal1
+ syncDeclareLocal
+ syncPublic
+ syncPrivate
+ syncRelocs
+ syncReloc
+ syncUseReloc
+ syncVarExt
+ syncPkgDef
+ syncTypeExt
+ syncVal
+ syncCodeObj
+ syncPosBase
+ syncLocalIdent
+ syncTypeParamNames
+ syncTypeParamBounds
+ syncImplicitTypes
+ syncObjectName
+)
diff --git a/src/cmd/compile/internal/noder/syncmarker_string.go b/src/cmd/compile/internal/noder/syncmarker_string.go
new file mode 100644
index 0000000000..14747b7c10
--- /dev/null
+++ b/src/cmd/compile/internal/noder/syncmarker_string.go
@@ -0,0 +1,155 @@
+// Code generated by "stringer -type=syncMarker -trimprefix=sync"; DO NOT EDIT.
+
+package noder
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[syncNode-1]
+ _ = x[syncBool-2]
+ _ = x[syncInt64-3]
+ _ = x[syncUint64-4]
+ _ = x[syncString-5]
+ _ = x[syncPos-6]
+ _ = x[syncPkg-7]
+ _ = x[syncSym-8]
+ _ = x[syncSelector-9]
+ _ = x[syncKind-10]
+ _ = x[syncType-11]
+ _ = x[syncTypePkg-12]
+ _ = x[syncSignature-13]
+ _ = x[syncParam-14]
+ _ = x[syncOp-15]
+ _ = x[syncObject-16]
+ _ = x[syncExpr-17]
+ _ = x[syncStmt-18]
+ _ = x[syncDecl-19]
+ _ = x[syncConstDecl-20]
+ _ = x[syncFuncDecl-21]
+ _ = x[syncTypeDecl-22]
+ _ = x[syncVarDecl-23]
+ _ = x[syncPragma-24]
+ _ = x[syncValue-25]
+ _ = x[syncEOF-26]
+ _ = x[syncMethod-27]
+ _ = x[syncFuncBody-28]
+ _ = x[syncUse-29]
+ _ = x[syncUseObj-30]
+ _ = x[syncObjectIdx-31]
+ _ = x[syncTypeIdx-32]
+ _ = x[syncBOF-33]
+ _ = x[syncEntry-34]
+ _ = x[syncOpenScope-35]
+ _ = x[syncCloseScope-36]
+ _ = x[syncGlobal-37]
+ _ = x[syncLocal-38]
+ _ = x[syncDefine-39]
+ _ = x[syncDefLocal-40]
+ _ = x[syncUseLocal-41]
+ _ = x[syncDefGlobal-42]
+ _ = x[syncUseGlobal-43]
+ _ = x[syncTypeParams-44]
+ _ = x[syncUseLabel-45]
+ _ = x[syncDefLabel-46]
+ _ = x[syncFuncLit-47]
+ _ = x[syncCommonFunc-48]
+ _ = x[syncBodyRef-49]
+ _ = x[syncLinksymExt-50]
+ _ = x[syncHack-51]
+ _ = x[syncSetlineno-52]
+ _ = x[syncName-53]
+ _ = x[syncImportDecl-54]
+ _ = x[syncDeclNames-55]
+ _ = x[syncDeclName-56]
+ _ = x[syncExprList-57]
+ _ = x[syncExprs-58]
+ _ = x[syncWrapname-59]
+ _ = x[syncTypeExpr-60]
+ _ = x[syncTypeExprOrNil-61]
+ _ = x[syncChanDir-62]
+ _ = x[syncParams-63]
+ _ = x[syncCloseAnotherScope-64]
+ _ = x[syncSum-65]
+ _ = x[syncUnOp-66]
+ _ = x[syncBinOp-67]
+ _ = x[syncStructType-68]
+ _ = x[syncInterfaceType-69]
+ _ = x[syncPackname-70]
+ _ = x[syncEmbedded-71]
+ _ = x[syncStmts-72]
+ _ = x[syncStmtsFall-73]
+ _ = x[syncStmtFall-74]
+ _ = x[syncBlockStmt-75]
+ _ = x[syncIfStmt-76]
+ _ = x[syncForStmt-77]
+ _ = x[syncSwitchStmt-78]
+ _ = x[syncRangeStmt-79]
+ _ = x[syncCaseClause-80]
+ _ = x[syncCommClause-81]
+ _ = x[syncSelectStmt-82]
+ _ = x[syncDecls-83]
+ _ = x[syncLabeledStmt-84]
+ _ = x[syncCompLit-85]
+ _ = x[sync1-86]
+ _ = x[sync2-87]
+ _ = x[sync3-88]
+ _ = x[sync4-89]
+ _ = x[syncN-90]
+ _ = x[syncDefImplicit-91]
+ _ = x[syncUseName-92]
+ _ = x[syncUseObjLocal-93]
+ _ = x[syncAddLocal-94]
+ _ = x[syncBothSignature-95]
+ _ = x[syncSetUnderlying-96]
+ _ = x[syncLinkname-97]
+ _ = x[syncStmt1-98]
+ _ = x[syncStmtsEnd-99]
+ _ = x[syncDeclare-100]
+ _ = x[syncTopDecls-101]
+ _ = x[syncTopConstDecl-102]
+ _ = x[syncTopFuncDecl-103]
+ _ = x[syncTopTypeDecl-104]
+ _ = x[syncTopVarDecl-105]
+ _ = x[syncObject1-106]
+ _ = x[syncAddBody-107]
+ _ = x[syncLabel-108]
+ _ = x[syncFuncExt-109]
+ _ = x[syncMethExt-110]
+ _ = x[syncOptLabel-111]
+ _ = x[syncScalar-112]
+ _ = x[syncStmtDecls-113]
+ _ = x[syncDeclLocal-114]
+ _ = x[syncObjLocal-115]
+ _ = x[syncObjLocal1-116]
+ _ = x[syncDeclareLocal-117]
+ _ = x[syncPublic-118]
+ _ = x[syncPrivate-119]
+ _ = x[syncRelocs-120]
+ _ = x[syncReloc-121]
+ _ = x[syncUseReloc-122]
+ _ = x[syncVarExt-123]
+ _ = x[syncPkgDef-124]
+ _ = x[syncTypeExt-125]
+ _ = x[syncVal-126]
+ _ = x[syncCodeObj-127]
+ _ = x[syncPosBase-128]
+ _ = x[syncLocalIdent-129]
+ _ = x[syncTypeParamNames-130]
+ _ = x[syncTypeParamBounds-131]
+ _ = x[syncImplicitTypes-132]
+}
+
+const _syncMarker_name = "NodeBoolInt64Uint64StringPosPkgSymSelectorKindTypeTypePkgSignatureParamOpObjectExprStmtDeclConstDeclFuncDeclTypeDeclVarDeclPragmaValueEOFMethodFuncBodyUseUseObjObjectIdxTypeIdxBOFEntryOpenScopeCloseScopeGlobalLocalDefineDefLocalUseLocalDefGlobalUseGlobalTypeParamsUseLabelDefLabelFuncLitCommonFuncBodyRefLinksymExtHackSetlinenoNameImportDeclDeclNamesDeclNameExprListExprsWrapnameTypeExprTypeExprOrNilChanDirParamsCloseAnotherScopeSumUnOpBinOpStructTypeInterfaceTypePacknameEmbeddedStmtsStmtsFallStmtFallBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtCompLit1234NDefImplicitUseNameUseObjLocalAddLocalBothSignatureSetUnderlyingLinknameStmt1StmtsEndDeclareTopDeclsTopConstDeclTopFuncDeclTopTypeDeclTopVarDeclObject1AddBodyLabelFuncExtMethExtOptLabelScalarStmtDeclsDeclLocalObjLocalObjLocal1DeclareLocalPublicPrivateRelocsRelocUseRelocVarExtPkgDefTypeExtValCodeObjPosBaseLocalIdentTypeParamNamesTypeParamBoundsImplicitTypes"
+
+var _syncMarker_index = [...]uint16{0, 4, 8, 13, 19, 25, 28, 31, 34, 42, 46, 50, 57, 66, 71, 73, 79, 83, 87, 91, 100, 108, 116, 123, 129, 134, 137, 143, 151, 154, 160, 169, 176, 179, 184, 193, 203, 209, 214, 220, 228, 236, 245, 254, 264, 272, 280, 287, 297, 304, 314, 318, 327, 331, 341, 350, 358, 366, 371, 379, 387, 400, 407, 413, 430, 433, 437, 442, 452, 465, 473, 481, 486, 495, 503, 512, 518, 525, 535, 544, 554, 564, 574, 579, 590, 597, 598, 599, 600, 601, 602, 613, 620, 631, 639, 652, 665, 673, 678, 686, 693, 701, 713, 724, 735, 745, 752, 759, 764, 771, 778, 786, 792, 801, 810, 818, 827, 839, 845, 852, 858, 863, 871, 877, 883, 890, 893, 900, 907, 917, 931, 946, 959}
+
+func (i syncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= syncMarker(len(_syncMarker_index)-1) {
+ return "syncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _syncMarker_name[_syncMarker_index[i]:_syncMarker_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/noder/transform.go b/src/cmd/compile/internal/noder/transform.go
index 2859089e69..e1eeb8e739 100644
--- a/src/cmd/compile/internal/noder/transform.go
+++ b/src/cmd/compile/internal/noder/transform.go
@@ -85,7 +85,15 @@ func stringtoruneslit(n *ir.ConvExpr) ir.Node {
// etc. Corresponds to typecheck.tcConv.
func transformConv(n *ir.ConvExpr) ir.Node {
t := n.X.Type()
- op, _ := typecheck.Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+ op, why := typecheck.Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+ if op == ir.OXXX {
+ // types2 currently ignores pragmas, so a 'notinheap' mismatch is the
+ // one type-related error that it does not catch. This error will be
+ // caught here by Convertop (see two checks near beginning of
+ // Convertop) and reported at the end of noding.
+ base.ErrorfAt(n.Pos(), "cannot convert %L to type %v%s", n.X, n.Type(), why)
+ return n
+ }
n.SetOp(op)
switch n.Op() {
case ir.OCONVNOP:
@@ -122,7 +130,8 @@ func transformConvCall(n *ir.CallExpr) ir.Node {
}
// transformCall transforms a normal function/method call. Corresponds to last half
-// (non-conversion, non-builtin part) of typecheck.tcCall.
+// (non-conversion, non-builtin part) of typecheck.tcCall. This code should work even
+// in the case of OCALL/OFUNCINST.
func transformCall(n *ir.CallExpr) {
// n.Type() can be nil for calls with no return value
assert(n.Typecheck() == 1)
@@ -148,10 +157,11 @@ func transformCall(n *ir.CallExpr) {
n.SetOp(ir.OCALLFUNC)
}
- typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)
+ typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, false)
+ if l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {
+ typecheck.FixMethodCall(n)
+ }
if t.NumResults() == 1 {
- n.SetType(l.Type().Results().Field(0).Type)
-
if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
@@ -185,7 +195,7 @@ func transformCompare(n *ir.BinaryExpr) {
aop, _ := typecheck.Assignop(lt, rt)
if aop != ir.OXXX {
types.CalcSize(lt)
- if rt.IsInterface() == lt.IsInterface() || lt.Width >= 1<<16 {
+ if lt.HasTParam() || rt.IsInterface() == lt.IsInterface() || lt.Width >= 1<<16 {
l = ir.NewConvExpr(base.Pos, aop, rt, l)
l.SetTypecheck(1)
}
@@ -198,7 +208,7 @@ func transformCompare(n *ir.BinaryExpr) {
aop, _ := typecheck.Assignop(rt, lt)
if aop != ir.OXXX {
types.CalcSize(rt)
- if rt.IsInterface() == lt.IsInterface() || rt.Width >= 1<<16 {
+ if rt.HasTParam() || rt.IsInterface() == lt.IsInterface() || rt.Width >= 1<<16 {
r = ir.NewConvExpr(base.Pos, aop, lt, r)
r.SetTypecheck(1)
}
@@ -303,6 +313,10 @@ assignOK:
r := r.(*ir.TypeAssertExpr)
stmt.SetOp(ir.OAS2DOTTYPE)
r.SetOp(ir.ODOTTYPE2)
+ case ir.ODYNAMICDOTTYPE:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODYNAMICDOTTYPE2)
default:
break assignOK
}
@@ -323,11 +337,22 @@ assignOK:
stmt := stmt.(*ir.AssignListStmt)
stmt.SetOp(ir.OAS2FUNC)
r := rhs[0].(*ir.CallExpr)
- r.Use = ir.CallUseList
rtyp := r.Type()
+ mismatched := false
+ failed := false
for i := range lhs {
- checkLHS(i, rtyp.Field(i).Type)
+ result := rtyp.Field(i).Type
+ checkLHS(i, result)
+
+ if lhs[i].Type() == nil || result == nil {
+ failed = true
+ } else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) {
+ mismatched = true
+ }
+ }
+ if mismatched && !failed {
+ typecheck.RewriteMultiValueCall(stmt, r)
}
return
}
@@ -340,12 +365,12 @@ assignOK:
}
}
-// Corresponds to typecheck.typecheckargs.
+// Corresponds to typecheck.typecheckargs. Really just deals with multi-value calls.
func transformArgs(n ir.InitNode) {
var list []ir.Node
switch n := n.(type) {
default:
- base.Fatalf("typecheckargs %+v", n.Op())
+ base.Fatalf("transformArgs %+v", n.Op())
case *ir.CallExpr:
list = n.Args
if n.IsDDD {
@@ -363,46 +388,13 @@ func transformArgs(n ir.InitNode) {
return
}
- // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
-
// Save n as n.Orig for fmt.go.
if ir.Orig(n) == n {
n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
}
- as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- as.Rhs.Append(list...)
-
- // If we're outside of function context, then this call will
- // be executed during the generated init function. However,
- // init.go hasn't yet created it. Instead, associate the
- // temporary variables with InitTodoFunc for now, and init.go
- // will reassociate them later when it's appropriate.
- static := ir.CurFunc == nil
- if static {
- ir.CurFunc = typecheck.InitTodoFunc
- }
- list = nil
- for _, f := range t.FieldSlice() {
- t := typecheck.Temp(f.Type)
- as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
- as.Lhs.Append(t)
- list = append(list, t)
- }
- if static {
- ir.CurFunc = nil
- }
-
- switch n := n.(type) {
- case *ir.CallExpr:
- n.Args = list
- case *ir.ReturnStmt:
- n.Results = list
- }
-
- transformAssign(as, as.Lhs, as.Rhs)
- as.SetTypecheck(1)
- n.PtrInit().Append(as)
+ // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+ typecheck.RewriteMultiValueCall(n, list[0])
}
// assignconvfn converts node n for assignment to type t. Corresponds to
@@ -416,7 +408,10 @@ func assignconvfn(n ir.Node, t *types.Type) ir.Node {
return n
}
- op, _ := typecheck.Assignop(n.Type(), t)
+ op, why := typecheck.Assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Fatalf("found illegal assignment %+v -> %+v; %s", n.Type(), t, why)
+ }
r := ir.NewConvExpr(base.Pos, op, t, n)
r.SetTypecheck(1)
@@ -424,8 +419,11 @@ func assignconvfn(n ir.Node, t *types.Type) ir.Node {
return r
}
-// Corresponds to typecheck.typecheckaste.
-func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes) {
+// Corresponds to typecheck.typecheckaste, but we add an extra flag convifaceOnly
+// only. If convifaceOnly is true, we only do interface conversion. We use this to do
+// early insertion of CONVIFACE nodes during noder2, when the function or args may
+// have typeparams.
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, convifaceOnly bool) {
var t *types.Type
var i int
@@ -444,7 +442,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
if isddd {
n = nl[i]
ir.SetPos(n)
- if n.Type() != nil {
+ if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
nl[i] = assignconvfn(n, t)
}
return
@@ -454,7 +452,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
for ; i < len(nl); i++ {
n = nl[i]
ir.SetPos(n)
- if n.Type() != nil {
+ if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
nl[i] = assignconvfn(n, t.Elem())
}
}
@@ -463,7 +461,7 @@ func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl i
n = nl[i]
ir.SetPos(n)
- if n.Type() != nil {
+ if n.Type() != nil && (!convifaceOnly || t.IsInterface()) {
nl[i] = assignconvfn(n, t)
}
i++
@@ -485,7 +483,7 @@ func transformReturn(rs *ir.ReturnStmt) {
return
}
- typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl)
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl, false)
}
// transformSelect transforms a select node, creating an assignment list as needed
@@ -537,13 +535,31 @@ func transformAsOp(n *ir.AssignOpStmt) {
}
// transformDot transforms an OXDOT (or ODOT) or ODOT, ODOTPTR, ODOTMETH,
-// ODOTINTER, or OCALLPART, as appropriate. It adds in extra nodes as needed to
+// ODOTINTER, or OMETHVALUE, as appropriate. It adds in extra nodes as needed to
// access embedded fields. Corresponds to typecheck.tcDot.
func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node {
assert(n.Type() != nil && n.Typecheck() == 1)
if n.Op() == ir.OXDOT {
n = typecheck.AddImplicitDots(n)
n.SetOp(ir.ODOT)
+
+ // Set the Selection field and typecheck flag for any new ODOT nodes
+ // added by AddImplicitDots(), and also transform to ODOTPTR if
+ // needed. Equivalent to 'n.X = typecheck(n.X, ctxExpr|ctxType)' in
+ // tcDot.
+ for n1 := n; n1.X.Op() == ir.ODOT; {
+ n1 = n1.X.(*ir.SelectorExpr)
+ if !n1.Implicit() {
+ break
+ }
+ t1 := n1.X.Type()
+ if t1.IsPtr() && !t1.Elem().IsInterface() {
+ t1 = t1.Elem()
+ n1.SetOp(ir.ODOTPTR)
+ }
+ typecheck.Lookdot(n1, t1, 0)
+ n1.SetTypecheck(1)
+ }
}
t := n.X.Type()
@@ -561,8 +577,13 @@ func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node {
assert(f != nil)
if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && !isCall {
- n.SetOp(ir.OCALLPART)
- n.SetType(typecheck.MethodValueWrapper(n).Type())
+ n.SetOp(ir.OMETHVALUE)
+ if len(n.X.Type().RParams()) > 0 || n.X.Type().IsPtr() && len(n.X.Type().Elem().RParams()) > 0 {
+ // TODO: MethodValueWrapper needed for generics?
+ // Or did we successfully desugar all that at stencil time?
+ return n
+ }
+ n.SetType(typecheck.NewMethodType(n.Type(), nil))
}
return n
}
@@ -594,7 +615,11 @@ func transformMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
s := n.Sel
m := typecheck.Lookdot1(n, s, t, ms, 0)
- assert(m != nil)
+ if !t.HasShape() {
+ // It's OK to not find the method if t is instantiated by shape types,
+ // because we will use the methods on the generic type anyway.
+ assert(m != nil)
+ }
n.SetOp(ir.OMETHEXPR)
n.Selection = m
@@ -911,9 +936,7 @@ func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
f := t.Field(i)
n1 = assignconvfn(n1, f.Type)
- sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
- sk.Offset = f.Offset
- ls[i] = sk
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
}
assert(len(ls) >= t.NumFields())
} else {
@@ -922,33 +945,26 @@ func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
for i, l := range ls {
ir.SetPos(l)
- if l.Op() == ir.OKEY {
- kv := l.(*ir.KeyExpr)
- key := kv.Key
-
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
- s = typecheck.Lookup(s.Name)
- }
-
- // An OXDOT uses the Sym field to hold
- // the field to the right of the dot,
- // so s will be non-nil, but an OXDOT
- // is never a valid struct literal key.
- assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
+ kv := l.(*ir.KeyExpr)
+ key := kv.Key
- l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
- ls[i] = l
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ s := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
+ s = typecheck.Lookup(s.Name)
}
- assert(l.Op() == ir.OSTRUCTKEY)
- l := l.(*ir.StructKeyExpr)
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
- f := typecheck.Lookdot1(nil, l.Field, t, t.Fields(), 0)
- l.Offset = f.Offset
+ f := typecheck.Lookdot1(nil, s, t, t.Fields(), 0)
+ l := ir.NewStructKeyExpr(l.Pos(), f, kv.Value)
+ ls[i] = l
l.Value = assignconvfn(l.Value, f.Type)
}
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
index 8680559a41..8d596e599e 100644
--- a/src/cmd/compile/internal/noder/types.go
+++ b/src/cmd/compile/internal/noder/types.go
@@ -39,6 +39,11 @@ func (g *irgen) typ(typ types2.Type) *types.Type {
// recursive types have been fully constructed before we call CheckSize.
if res != nil && !res.IsUntyped() && !res.IsFuncArgStruct() && !res.HasTParam() {
types.CheckSize(res)
+ if res.IsPtr() {
+ // Pointers always have their size set, even though their element
+ // may not have its size set.
+ types.CheckSize(res.Elem())
+ }
}
return res
}
@@ -68,8 +73,10 @@ func instTypeName2(name string, targs []types2.Type) string {
if i > 0 {
b.WriteByte(',')
}
+ // Include package names for all types, including typeparams, to
+ // make sure type arguments are uniquely specified.
tname := types2.TypeString(targ,
- func(*types2.Package) string { return "" })
+ func(pkg *types2.Package) string { return pkg.Name() })
if strings.Index(tname, ", ") >= 0 {
// types2.TypeString puts spaces after a comma in a type
// list, but we don't want spaces in our actual type names
@@ -89,50 +96,49 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
case *types2.Basic:
return g.basic(typ)
case *types2.Named:
- if typ.TParams() != nil {
+ // If tparams is set, but targs is not, typ is a base generic
+ // type. typ is appearing as part of the source type of an alias,
+ // since that is the only use of a generic type that doesn't
+ // involve instantiation. We just translate the named type in the
+ // normal way below using g.obj().
+ if typ.TParams() != nil && typ.TArgs() != nil {
// typ is an instantiation of a defined (named) generic type.
// This instantiation should also be a defined (named) type.
// types2 gives us the substituted type in t.Underlying()
// The substituted type may or may not still have type
// params. We might, for example, be substituting one type
// param for another type param.
-
- if typ.TArgs() == nil {
- base.Fatalf("In typ0, Targs should be set if TParams is set")
- }
-
- // When converted to types.Type, typ must have a name,
- // based on the names of the type arguments. We need a
- // name to deal with recursive generic types (and it also
- // looks better when printing types).
+ //
+ // When converted to types.Type, typ has a unique name,
+ // based on the names of the type arguments.
instName := instTypeName2(typ.Obj().Name(), typ.TArgs())
s := g.pkg(typ.Obj().Pkg()).Lookup(instName)
if s.Def != nil {
- // We have already encountered this instantiation,
- // so use the type we previously created, since there
+ // We have already encountered this instantiation.
+ // Use the type we previously created, since there
// must be exactly one instance of a defined type.
return s.Def.Type()
}
// Create a forwarding type first and put it in the g.typs
- // map, in order to deal with recursive generic types.
- // Fully set up the extra ntyp information (Def, RParams,
- // which may set HasTParam) before translating the
- // underlying type itself, so we handle recursion
- // correctly, including via method signatures.
- ntyp := newIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
+ // map, in order to deal with recursive generic types
+ // (including via method signatures).. Set up the extra
+ // ntyp information (Def, RParams, which may set
+ // HasTParam) before translating the underlying type
+ // itself, so we handle recursion correctly.
+ ntyp := typecheck.NewIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
g.typs[typ] = ntyp
// If ntyp still has type params, then we must be
// referencing something like 'value[T2]', as when
- // specifying the generic receiver of a method,
- // where value was defined as "type value[T any]
- // ...". Save the type args, which will now be the
- // new type of the current type.
+ // specifying the generic receiver of a method, where
+ // value was defined as "type value[T any] ...". Save the
+ // type args, which will now be the new typeparams of the
+ // current type.
//
// If ntyp does not have type params, we are saving the
- // concrete types used to instantiate this type. We'll use
- // these when instantiating the methods of the
+ // non-generic types used to instantiate this type. We'll
+ // use these when instantiating the methods of the
// instantiated type.
rparams := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
@@ -143,6 +149,8 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
ntyp.SetUnderlying(g.typ1(typ.Underlying()))
g.fillinMethods(typ, ntyp)
+ // Save the symbol for the base generic type.
+ ntyp.OrigSym = g.pkg(typ.Obj().Pkg()).Lookup(typ.Obj().Name())
return ntyp
}
obj := g.obj(typ.Obj())
@@ -183,12 +191,9 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
for i := range embeddeds {
// TODO(mdempsky): Get embedding position.
e := typ.EmbeddedType(i)
- if t := types2.AsInterface(e); t != nil && t.IsComparable() {
- // Ignore predefined type 'comparable', since it
- // doesn't resolve and it doesn't have any
- // relevant methods.
- continue
- }
+
+ // With Go 1.18, an embedded element can be any type, not
+ // just an interface.
embeddeds[j] = types.NewField(src.NoXPos, nil, g.typ1(e))
j++
}
@@ -204,20 +209,39 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...))
case *types2.TypeParam:
- tp := types.NewTypeParam(g.tpkg(typ))
// Save the name of the type parameter in the sym of the type.
// Include the types2 subscript in the sym name
- sym := g.pkg(typ.Obj().Pkg()).Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
- tp.SetSym(sym)
+ pkg := g.tpkg(typ)
+ sym := pkg.Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.Type()
+ }
+ tp := types.NewTypeParam(sym, typ.Index())
+ nname := ir.NewDeclNameAt(g.pos(typ.Obj().Pos()), ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(tp)
+ tp.SetNod(nname)
// Set g.typs[typ] in case the bound methods reference typ.
g.typs[typ] = tp
- // TODO(danscales): we don't currently need to use the bounds
- // anywhere, so eventually we can probably remove.
- bound := g.typ1(typ.Bound())
- *tp.Methods() = *bound.Methods()
+ bound := g.typ1(typ.Constraint())
+ tp.SetBound(bound)
return tp
+ case *types2.Union:
+ nt := typ.Len()
+ tlist := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range tlist {
+ t := typ.Term(i)
+ tlist[i] = g.typ1(t.Type())
+ tildes[i] = t.Tilde()
+ }
+ return types.NewUnion(tlist, tildes)
+
case *types2.Tuple:
// Tuples are used for the type of a function call (i.e. the
// return value of the function).
@@ -243,19 +267,23 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// and for actually generating the methods for instantiated types.
func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
if typ.NumMethods() != 0 {
- targs := make([]ir.Node, len(typ.TArgs()))
+ targs := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
- targs[i] = ir.TypeNode(g.typ1(targ))
+ targs[i] = g.typ1(targ)
}
methods := make([]*types.Field, typ.NumMethods())
for i := range methods {
m := typ.Method(i)
- meth := g.obj(m)
- recvType := types2.AsSignature(m.Type()).Recv().Type()
- ptr := types2.AsPointer(recvType)
- if ptr != nil {
- recvType = ptr.Elem()
+ recvType := deref2(types2.AsSignature(m.Type()).Recv().Type())
+ var meth *ir.Name
+ if m.Pkg() != g.self {
+ // Imported methods cannot be loaded by name (what
+ // g.obj() does) - they must be loaded via their
+ // type.
+ meth = g.obj(recvType.(*types2.Named).Obj()).Type().Methods().Index(i).Nname.(*ir.Name)
+ } else {
+ meth = g.obj(m)
}
if recvType != types2.Type(typ) {
// Unfortunately, meth is the type of the method of the
@@ -276,18 +304,21 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
} else {
meth2 = ir.NewNameAt(meth.Pos(), newsym)
rparams := types2.AsSignature(m.Type()).RParams()
- tparams := make([]*types.Field, len(rparams))
- for i, rparam := range rparams {
- tparams[i] = types.NewField(src.NoXPos, nil, g.typ1(rparam.Type()))
+ tparams := make([]*types.Type, rparams.Len())
+ for i := range tparams {
+ tparams[i] = g.typ1(rparams.At(i).Type())
}
assert(len(tparams) == len(targs))
- subst := &subster{
- g: g,
- tparams: tparams,
- targs: targs,
+ ts := typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: targs,
}
// Do the substitution of the type
- meth2.SetType(subst.typ(meth.Type()))
+ meth2.SetType(ts.Typ(meth.Type()))
+ // Add any new fully instantiated types
+ // seen during the substitution to
+ // g.instTypeList.
+ g.instTypeList = append(g.instTypeList, ts.InstTypeList...)
newsym.Def = meth2
}
meth = meth2
@@ -296,7 +327,7 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
methods[i].Nname = meth
}
ntyp.Methods().Set(methods)
- if !ntyp.HasTParam() {
+ if !ntyp.HasTParam() && !ntyp.HasShape() {
// Generate all the methods for a new fully-instantiated type.
g.instTypeList = append(g.instTypeList, ntyp)
}
@@ -305,9 +336,9 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
func (g *irgen) signature(recv *types.Field, sig *types2.Signature) *types.Type {
tparams2 := sig.TParams()
- tparams := make([]*types.Field, len(tparams2))
+ tparams := make([]*types.Field, tparams2.Len())
for i := range tparams {
- tp := tparams2[i]
+ tp := tparams2.At(i)
tparams[i] = types.NewField(g.pos(tp), g.sym(tp), g.typ1(tp.Type()))
}
@@ -346,7 +377,7 @@ func (g *irgen) selector(obj types2.Object) *types.Sym {
return pkg.Lookup(name)
}
-// tpkg returns the package that a function, interface, or struct type
+// tpkg returns the package that a function, interface, struct, or typeparam type
// expression appeared in.
//
// Caveat: For the degenerate types "func()", "interface{}", and
@@ -356,36 +387,39 @@ func (g *irgen) selector(obj types2.Object) *types.Sym {
// particular types is because go/types does *not* report it for
// them. So in practice this limitation is probably moot.
func (g *irgen) tpkg(typ types2.Type) *types.Pkg {
- anyObj := func() types2.Object {
- switch typ := typ.(type) {
- case *types2.Signature:
- if recv := typ.Recv(); recv != nil {
- return recv
- }
- if params := typ.Params(); params.Len() > 0 {
- return params.At(0)
- }
- if results := typ.Results(); results.Len() > 0 {
- return results.At(0)
- }
- case *types2.Struct:
- if typ.NumFields() > 0 {
- return typ.Field(0)
- }
- case *types2.Interface:
- if typ.NumExplicitMethods() > 0 {
- return typ.ExplicitMethod(0)
- }
- }
- return nil
- }
-
- if obj := anyObj(); obj != nil {
+ if obj := anyObj(typ); obj != nil {
return g.pkg(obj.Pkg())
}
return types.LocalPkg
}
+// anyObj returns some object accessible from typ, if any.
+func anyObj(typ types2.Type) types2.Object {
+ switch typ := typ.(type) {
+ case *types2.Signature:
+ if recv := typ.Recv(); recv != nil {
+ return recv
+ }
+ if params := typ.Params(); params.Len() > 0 {
+ return params.At(0)
+ }
+ if results := typ.Results(); results.Len() > 0 {
+ return results.At(0)
+ }
+ case *types2.Struct:
+ if typ.NumFields() > 0 {
+ return typ.Field(0)
+ }
+ case *types2.Interface:
+ if typ.NumExplicitMethods() > 0 {
+ return typ.ExplicitMethod(0)
+ }
+ case *types2.TypeParam:
+ return typ.Obj()
+ }
+ return nil
+}
+
func (g *irgen) basic(typ *types2.Basic) *types.Type {
switch typ.Name() {
case "byte":
@@ -430,3 +464,11 @@ var dirs = [...]types.ChanDir{
types2.SendOnly: types.Csend,
types2.RecvOnly: types.Crecv,
}
+
+// deref2 does a single deref of types2 type t, if it is a pointer type.
+func deref2(t types2.Type) types2.Type {
+ if ptr := types2.AsPointer(t); ptr != nil {
+ t = ptr.Elem()
+ }
+ return t
+}
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
new file mode 100644
index 0000000000..9f80ca000d
--- /dev/null
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -0,0 +1,340 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "internal/goversion"
+ "io"
+ "runtime"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// localPkgReader holds the package reader used for reading the local
+// package. It exists so the unified IR linker can refer back to it
+// later.
+var localPkgReader *pkgReader
+
+// unified construct the local package's IR from syntax's AST.
+//
+// The pipeline contains 2 steps:
+//
+// (1) Generate package export data "stub".
+//
+// (2) Generate package IR from package export data.
+//
+// The package data "stub" at step (1) contains everything from the local package,
+// but nothing that have been imported. When we're actually writing out export data
+// to the output files (see writeNewExport function), we run the "linker", which does
+// a few things:
+//
+// + Updates compiler extensions data (e.g., inlining cost, escape analysis results).
+//
+// + Handles re-exporting any transitive dependencies.
+//
+// + Prunes out any unnecessary details (e.g., non-inlineable functions, because any
+// downstream importers only care about inlinable functions).
+//
+// The source files are typechecked twice, once before writing export data
+// using types2 checker, once after read export data using gc/typecheck.
+// This duplication of work will go away once we always use types2 checker,
+// we can remove the gc/typecheck pass. The reason it is still here:
+//
+// + It reduces engineering costs in maintaining a fork of typecheck
+// (e.g., no need to backport fixes like CL 327651).
+//
+// + It makes it easier to pass toolstash -cmp.
+//
+// + Historically, we would always re-run the typechecker after import, even though
+// we know the imported data is valid. It's not ideal, but also not causing any
+// problem either.
+//
+// + There's still transformation that being done during gc/typecheck, like rewriting
+// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
+//
+// Using syntax+types2 tree, which already has a complete representation of generics,
+// the unified IR has the full typed AST for doing introspection during step (1).
+// In other words, we have all necessary information to build the generic IR form
+// (see writer.captureVars for an example).
+func unified(noders []*noder) {
+ inline.NewInline = InlineCall
+
+ if !quirksMode() {
+ writeNewExportFunc = writeNewExport
+ } else if base.Flag.G != 0 {
+ base.Errorf("cannot use -G and -d=quirksmode together")
+ }
+
+ newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ pr := newPkgDecoder(pkg1.Path, data)
+
+ // Read package descriptors for both types2 and compiler backend.
+ readPackage(newPkgReader(pr), pkg1)
+ pkg2 = readPackage2(check, packages, pr)
+ return
+ }
+
+ data := writePkgStub(noders)
+
+ // We already passed base.Flag.Lang to types2 to handle validating
+ // the user's source code. Bump it up now to the current version and
+ // re-parse, so typecheck doesn't complain if we construct IR that
+ // utilizes newer Go features.
+ base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
+ types.ParseLangFlag()
+
+ assert(types.LocalPkg.Path == "")
+ types.LocalPkg.Height = 0 // reset so pkgReader.pkgIdx doesn't complain
+ target := typecheck.Target
+
+ typecheck.TypecheckAllowed = true
+
+ localPkgReader = newPkgReader(newPkgDecoder(types.LocalPkg.Path, data))
+ readPackage(localPkgReader, types.LocalPkg)
+
+ r := localPkgReader.newReader(relocMeta, privateRootIdx, syncPrivate)
+ r.ext = r
+ r.pkgInit(types.LocalPkg, target)
+
+ // Type-check any top-level assignments. We ignore non-assignments
+ // here because other declarations are typechecked as they're
+ // constructed.
+ for i, ndecls := 0, len(target.Decls); i < ndecls; i++ {
+ switch n := target.Decls[i]; n.Op() {
+ case ir.OAS, ir.OAS2:
+ target.Decls[i] = typecheck.Stmt(n)
+ }
+ }
+
+ // Don't use range--bodyIdx can add closures to todoBodies.
+ for len(todoBodies) > 0 {
+ // The order we expand bodies doesn't matter, so pop from the end
+ // to reduce todoBodies reallocations if it grows further.
+ fn := todoBodies[len(todoBodies)-1]
+ todoBodies = todoBodies[:len(todoBodies)-1]
+
+ pri, ok := bodyReader[fn]
+ assert(ok)
+ pri.funcBody(fn)
+
+ // Instantiated generic function: add to Decls for typechecking
+ // and compilation.
+ if fn.OClosure == nil && len(pri.dict.targs) != 0 {
+ target.Decls = append(target.Decls, fn)
+ }
+ }
+ todoBodies = nil
+
+ if !quirksMode() {
+ // TODO(mdempsky): Investigate generating wrappers in quirks mode too.
+ r.wrapTypes(target)
+ }
+
+ // Check that nothing snuck past typechecking.
+ for _, n := range target.Decls {
+ if n.Typecheck() == 0 {
+ base.FatalfAt(n.Pos(), "missed typecheck: %v", n)
+ }
+
+ // For functions, check that at least their first statement (if
+ // any) was typechecked too.
+ if fn, ok := n.(*ir.Func); ok && len(fn.Body) != 0 {
+ if stmt := fn.Body[0]; stmt.Typecheck() == 0 {
+ base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt)
+ }
+ }
+ }
+
+ base.ExitIfErrors() // just in case
+}
+
+// writePkgStub type checks the given parsed source files,
+// writes an export data package stub representing them,
+// and returns the result.
+func writePkgStub(noders []*noder) string {
+ m, pkg, info := checkFiles(noders)
+
+ pw := newPkgWriter(m, pkg, info)
+
+ pw.collectDecls(noders)
+
+ publicRootWriter := pw.newWriter(relocMeta, syncPublic)
+ privateRootWriter := pw.newWriter(relocMeta, syncPrivate)
+
+ assert(publicRootWriter.idx == publicRootIdx)
+ assert(privateRootWriter.idx == privateRootIdx)
+
+ {
+ w := publicRootWriter
+ w.pkg(pkg)
+ w.bool(false) // has init; XXX
+
+ scope := pkg.Scope()
+ names := scope.Names()
+ w.len(len(names))
+ for _, name := range scope.Names() {
+ w.obj(scope.Lookup(name), nil)
+ }
+
+ w.sync(syncEOF)
+ w.flush()
+ }
+
+ {
+ w := privateRootWriter
+ w.ext = w
+ w.pkgInit(noders)
+ w.flush()
+ }
+
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+ pw.dump(&sb)
+
+ // At this point, we're done with types2. Make sure the package is
+ // garbage collected.
+ freePackage(pkg)
+
+ return sb.String()
+}
+
+// freePackage ensures the given package is garbage collected.
+func freePackage(pkg *types2.Package) {
+ // The GC test below relies on a precise GC that runs finalizers as
+ // soon as objects are unreachable. Our implementation provides
+ // this, but other/older implementations may not (e.g., Go 1.4 does
+ // not because of #22350). To avoid imposing unnecessary
+ // restrictions on the GOROOT_BOOTSTRAP toolchain, we skip the test
+ // during bootstrapping.
+ if base.CompilerBootstrap {
+ return
+ }
+
+ // Set a finalizer on pkg so we can detect if/when it's collected.
+ done := make(chan struct{})
+ runtime.SetFinalizer(pkg, func(*types2.Package) { close(done) })
+
+ // Important: objects involved in cycles are not finalized, so zero
+ // out pkg to break its cycles and allow the finalizer to run.
+ *pkg = types2.Package{}
+
+ // It typically takes just 1 or 2 cycles to release pkg, but it
+ // doesn't hurt to try a few more times.
+ for i := 0; i < 10; i++ {
+ select {
+ case <-done:
+ return
+ default:
+ runtime.GC()
+ }
+ }
+
+ base.Fatalf("package never finalized")
+}
+
+func readPackage(pr *pkgReader, importpkg *types.Pkg) {
+ r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+
+ pkg := r.pkg()
+ assert(pkg == importpkg)
+
+ if r.bool() {
+ sym := pkg.Lookup(".inittask")
+ task := ir.NewNameAt(src.NoXPos, sym)
+ task.Class = ir.PEXTERN
+ sym.Def = task
+ }
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.sync(syncObject)
+ assert(!r.bool())
+ idx := r.reloc(relocObj)
+ assert(r.len() == 0)
+
+ path, name, code := r.p.peekObj(idx)
+ if code != objStub {
+ objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil}
+ }
+ }
+}
+
+func writeNewExport(out io.Writer) {
+ l := linker{
+ pw: newPkgEncoder(),
+
+ pkgs: make(map[string]int),
+ decls: make(map[*types.Sym]int),
+ }
+
+ publicRootWriter := l.pw.newEncoder(relocMeta, syncPublic)
+ assert(publicRootWriter.idx == publicRootIdx)
+
+ var selfPkgIdx int
+
+ {
+ pr := localPkgReader
+ r := pr.newDecoder(relocMeta, publicRootIdx, syncPublic)
+
+ r.sync(syncPkg)
+ selfPkgIdx = l.relocIdx(pr, relocPkg, r.reloc(relocPkg))
+
+ r.bool() // has init
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.sync(syncObject)
+ assert(!r.bool())
+ idx := r.reloc(relocObj)
+ assert(r.len() == 0)
+
+ xpath, xname, xtag := pr.peekObj(idx)
+ assert(xpath == pr.pkgPath)
+ assert(xtag != objStub)
+
+ if types.IsExported(xname) {
+ l.relocIdx(pr, relocObj, idx)
+ }
+ }
+
+ r.sync(syncEOF)
+ }
+
+ {
+ var idxs []int
+ for _, idx := range l.decls {
+ idxs = append(idxs, idx)
+ }
+ sort.Ints(idxs)
+
+ w := publicRootWriter
+
+ w.sync(syncPkg)
+ w.reloc(relocPkg, selfPkgIdx)
+
+ w.bool(typecheck.Lookup(".inittask").Def != nil)
+
+ w.len(len(idxs))
+ for _, idx := range idxs {
+ w.sync(syncObject)
+ w.bool(false)
+ w.reloc(relocObj, idx)
+ w.len(0)
+ }
+
+ w.sync(syncEOF)
+ w.flush()
+ }
+
+ l.pw.dump(out)
+}
diff --git a/src/cmd/compile/internal/noder/unified_test.go b/src/cmd/compile/internal/noder/unified_test.go
new file mode 100644
index 0000000000..96cc66f775
--- /dev/null
+++ b/src/cmd/compile/internal/noder/unified_test.go
@@ -0,0 +1,153 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder_test
+
+import (
+ "encoding/json"
+ "flag"
+ exec "internal/execabs"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var (
+ flagPkgs = flag.String("pkgs", "std", "list of packages to compare (ignored in -short mode)")
+ flagAll = flag.Bool("all", false, "enable testing of all GOOS/GOARCH targets")
+ flagParallel = flag.Bool("parallel", false, "test GOOS/GOARCH targets in parallel")
+)
+
+// TestUnifiedCompare implements a test similar to running:
+//
+// $ go build -toolexec="toolstash -cmp" std
+//
+// The -pkgs flag controls the list of packages tested.
+//
+// By default, only the native GOOS/GOARCH target is enabled. The -all
+// flag enables testing of non-native targets. The -parallel flag
+// additionally enables testing of targets in parallel.
+//
+// Caution: Testing all targets is very resource intensive! On an IBM
+// P920 (dual Intel Xeon Gold 6154 CPUs; 36 cores, 192GB RAM), testing
+// all targets in parallel takes about 5 minutes. Using the 'go test'
+// command's -run flag for subtest matching is recommended for less
+// powerful machines.
+func TestUnifiedCompare(t *testing.T) {
+ targets, err := exec.Command("go", "tool", "dist", "list").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, target := range strings.Fields(string(targets)) {
+ t.Run(target, func(t *testing.T) {
+ parts := strings.Split(target, "/")
+ goos, goarch := parts[0], parts[1]
+
+ if !(*flagAll || goos == runtime.GOOS && goarch == runtime.GOARCH) {
+ t.Skip("skipping non-native target (use -all to enable)")
+ }
+ if *flagParallel {
+ t.Parallel()
+ }
+
+ pkgs1 := loadPackages(t, goos, goarch, "-d=unified=0 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
+ pkgs2 := loadPackages(t, goos, goarch, "-d=unified=1 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
+
+ if len(pkgs1) != len(pkgs2) {
+ t.Fatalf("length mismatch: %v != %v", len(pkgs1), len(pkgs2))
+ }
+
+ for i := range pkgs1 {
+ pkg1 := pkgs1[i]
+ pkg2 := pkgs2[i]
+
+ path := pkg1.ImportPath
+ if path != pkg2.ImportPath {
+ t.Fatalf("mismatched paths: %q != %q", path, pkg2.ImportPath)
+ }
+
+ // Packages that don't have any source files (e.g., packages
+ // unsafe, embed/internal/embedtest, and cmd/internal/moddeps).
+ if pkg1.Export == "" && pkg2.Export == "" {
+ continue
+ }
+
+ if pkg1.BuildID == pkg2.BuildID {
+ t.Errorf("package %q: build IDs unexpectedly matched", path)
+ }
+
+ // Unlike toolstash -cmp, we're comparing the same compiler
+ // binary against itself, just with different flags. So we
+ // don't need to worry about skipping over mismatched version
+ // strings, but we do need to account for differing build IDs.
+ //
+ // Fortunately, build IDs are cryptographic 256-bit hashes,
+ // and cmd/go provides us with them up front. So we can just
+ // use them as delimeters to split the files, and then check
+ // that the substrings are all equal.
+ file1 := strings.Split(readFile(t, pkg1.Export), pkg1.BuildID)
+ file2 := strings.Split(readFile(t, pkg2.Export), pkg2.BuildID)
+ if !reflect.DeepEqual(file1, file2) {
+ t.Errorf("package %q: compile output differs", path)
+ }
+ }
+ })
+ }
+}
+
+type pkg struct {
+ ImportPath string
+ Export string
+ BuildID string
+ Incomplete bool
+}
+
+func loadPackages(t *testing.T, goos, goarch, gcflags string) []pkg {
+ args := []string{"list", "-e", "-export", "-json", "-gcflags=all=" + gcflags, "--"}
+ if testing.Short() {
+ t.Log("short testing mode; only testing package runtime")
+ args = append(args, "runtime")
+ } else {
+ args = append(args, strings.Fields(*flagPkgs)...)
+ }
+
+ cmd := exec.Command("go", args...)
+ cmd.Env = append(os.Environ(), "GOOS="+goos, "GOARCH="+goarch)
+ cmd.Stderr = os.Stderr
+ t.Logf("running %v", cmd)
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ var res []pkg
+ for dec := json.NewDecoder(stdout); dec.More(); {
+ var pkg pkg
+ if err := dec.Decode(&pkg); err != nil {
+ t.Fatal(err)
+ }
+ if pkg.Incomplete {
+ t.Fatalf("incomplete package: %q", pkg.ImportPath)
+ }
+ res = append(res, pkg)
+ }
+ if err := cmd.Wait(); err != nil {
+ t.Fatal(err)
+ }
+ return res
+}
+
+func readFile(t *testing.T, name string) string {
+ buf, err := os.ReadFile(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return string(buf)
+}
diff --git a/src/cmd/compile/internal/noder/validate.go b/src/cmd/compile/internal/noder/validate.go
index b926222c89..68a059b96f 100644
--- a/src/cmd/compile/internal/noder/validate.go
+++ b/src/cmd/compile/internal/noder/validate.go
@@ -55,7 +55,15 @@ func (g *irgen) validate(n syntax.Node) {
case *syntax.CallExpr:
tv := g.info.Types[n.Fun]
if tv.IsBuiltin() {
- switch builtin := n.Fun.(type) {
+ fun := n.Fun
+ for {
+ builtin, ok := fun.(*syntax.ParenExpr)
+ if !ok {
+ break
+ }
+ fun = builtin.X
+ }
+ switch builtin := fun.(type) {
case *syntax.Name:
g.validateBuiltin(builtin.Value, n)
case *syntax.SelectorExpr:
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
new file mode 100644
index 0000000000..b5028e7f69
--- /dev/null
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -0,0 +1,1885 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+)
+
+type pkgWriter struct {
+ pkgEncoder
+
+ m posMap
+ curpkg *types2.Package
+ info *types2.Info
+
+ posBasesIdx map[*syntax.PosBase]int
+ pkgsIdx map[*types2.Package]int
+ typsIdx map[types2.Type]int
+ globalsIdx map[types2.Object]int
+
+ funDecls map[*types2.Func]*syntax.FuncDecl
+ typDecls map[*types2.TypeName]typeDeclGen
+
+ linknames map[types2.Object]string
+ cgoPragmas [][]string
+
+ dups dupTypes
+}
+
+func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter {
+ return &pkgWriter{
+ pkgEncoder: newPkgEncoder(),
+
+ m: m,
+ curpkg: pkg,
+ info: info,
+
+ pkgsIdx: make(map[*types2.Package]int),
+ globalsIdx: make(map[types2.Object]int),
+ typsIdx: make(map[types2.Type]int),
+
+ posBasesIdx: make(map[*syntax.PosBase]int),
+
+ funDecls: make(map[*types2.Func]*syntax.FuncDecl),
+ typDecls: make(map[*types2.TypeName]typeDeclGen),
+
+ linknames: make(map[types2.Object]string),
+ }
+}
+
+func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) {
+ base.ErrorfAt(pw.m.pos(p), msg, args...)
+}
+
+func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) {
+ base.FatalfAt(pw.m.pos(p), msg, args...)
+}
+
+func (pw *pkgWriter) unexpected(what string, p poser) {
+ pw.fatalf(p, "unexpected %s: %v (%T)", what, p, p)
+}
+
+type writer struct {
+ p *pkgWriter
+
+ encoder
+
+ // For writing out object descriptions, ext points to the extension
+ // writer for where we can write the compiler's private extension
+ // details for the object.
+ //
+ // TODO(mdempsky): This is a little hacky, but works easiest with
+ // the way things are currently.
+ ext *writer
+
+ // TODO(mdempsky): We should be able to prune localsIdx whenever a
+ // scope closes, and then maybe we can just use the same map for
+ // storing the TypeParams too (as their TypeName instead).
+
+ // variables declared within this function
+ localsIdx map[*types2.Var]int
+
+ closureVars []posObj
+ closureVarsIdx map[*types2.Var]int
+
+ dict *writerDict
+ derived bool
+}
+
+// A writerDict tracks types and objects that are used by a declaration.
+type writerDict struct {
+ implicits []*types2.TypeName
+
+ // derived is a slice of type indices for computing derived types
+ // (i.e., types that depend on the declaration's type parameters).
+ derived []derivedInfo
+
+ // derivedIdx maps a Type to its corresponding index within the
+ // derived slice, if present.
+ derivedIdx map[types2.Type]int
+
+ // funcs lists references to generic functions that were
+ // instantiated with derived types (i.e., that require
+ // sub-dictionaries when called at run time).
+ funcs []objInfo
+}
+
+type derivedInfo struct {
+ idx int
+ needed bool
+}
+
+type typeInfo struct {
+ idx int
+ derived bool
+}
+
+type objInfo struct {
+ idx int // index for the generic function declaration
+ explicits []typeInfo // info for the type arguments
+}
+
+func (info objInfo) anyDerived() bool {
+ for _, explicit := range info.explicits {
+ if explicit.derived {
+ return true
+ }
+ }
+ return false
+}
+
+func (info objInfo) equals(other objInfo) bool {
+ if info.idx != other.idx {
+ return false
+ }
+ assert(len(info.explicits) == len(other.explicits))
+ for i, targ := range info.explicits {
+ if targ != other.explicits[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (pw *pkgWriter) newWriter(k reloc, marker syncMarker) *writer {
+ return &writer{
+ encoder: pw.newEncoder(k, marker),
+ p: pw,
+ }
+}
+
+// @@@ Positions
+
+func (w *writer) pos(p poser) {
+ w.sync(syncPos)
+ pos := p.Pos()
+
+ // TODO(mdempsky): Track down the remaining cases here and fix them.
+ if !w.bool(pos.IsKnown()) {
+ return
+ }
+
+ // TODO(mdempsky): Delta encoding. Also, if there's a b-side, update
+ // its position base too (but not vice versa!).
+ w.posBase(pos.Base())
+ w.uint(pos.Line())
+ w.uint(pos.Col())
+}
+
+func (w *writer) posBase(b *syntax.PosBase) {
+ w.reloc(relocPosBase, w.p.posBaseIdx(b))
+}
+
+func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) int {
+ if idx, ok := pw.posBasesIdx[b]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(relocPosBase, syncPosBase)
+ w.p.posBasesIdx[b] = w.idx
+
+ // TODO(mdempsky): What exactly does "fileh" do anyway? Is writing
+ // out both of these strings really the right thing to do here?
+ fn := b.Filename()
+ w.string(fn)
+ w.string(fileh(fn))
+
+ if !w.bool(b.IsFileBase()) {
+ w.pos(b)
+ w.uint(b.Line())
+ w.uint(b.Col())
+ }
+
+ return w.flush()
+}
+
+// @@@ Packages
+
+func (w *writer) pkg(pkg *types2.Package) {
+ w.sync(syncPkg)
+ w.reloc(relocPkg, w.p.pkgIdx(pkg))
+}
+
+func (pw *pkgWriter) pkgIdx(pkg *types2.Package) int {
+ if idx, ok := pw.pkgsIdx[pkg]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(relocPkg, syncPkgDef)
+ pw.pkgsIdx[pkg] = w.idx
+
+ if pkg == nil {
+ w.string("builtin")
+ } else {
+ var path string
+ if pkg != w.p.curpkg {
+ path = pkg.Path()
+ }
+ w.string(path)
+ w.string(pkg.Name())
+ w.len(pkg.Height())
+
+ w.len(len(pkg.Imports()))
+ for _, imp := range pkg.Imports() {
+ w.pkg(imp)
+ }
+ }
+
+ return w.flush()
+}
+
+// @@@ Types
+
+func (w *writer) typ(typ types2.Type) {
+ w.typInfo(w.p.typIdx(typ, w.dict))
+}
+
+func (w *writer) typInfo(info typeInfo) {
+ w.sync(syncType)
+ if w.bool(info.derived) {
+ w.len(info.idx)
+ w.derived = true
+ } else {
+ w.reloc(relocType, info.idx)
+ }
+}
+
+// typIdx returns the index where the export data description of type
+// can be read back in. If no such index exists yet, it's created.
+//
+// typIdx also reports whether typ is a derived type; that is, whether
+// its identity depends on type parameters.
+func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
+ if quirksMode() {
+ typ = pw.dups.orig(typ)
+ }
+
+ if idx, ok := pw.typsIdx[typ]; ok {
+ return typeInfo{idx: idx, derived: false}
+ }
+ if dict != nil {
+ if idx, ok := dict.derivedIdx[typ]; ok {
+ return typeInfo{idx: idx, derived: true}
+ }
+ }
+
+ w := pw.newWriter(relocType, syncTypeIdx)
+ w.dict = dict
+
+ switch typ := typ.(type) {
+ default:
+ base.Fatalf("unexpected type: %v (%T)", typ, typ)
+
+ case *types2.Basic:
+ switch kind := typ.Kind(); {
+ case kind == types2.Invalid:
+ base.Fatalf("unexpected types2.Invalid")
+
+ case types2.Typ[kind] == typ:
+ w.code(typeBasic)
+ w.len(int(kind))
+
+ default:
+ // Handle "byte" and "rune" as references to their TypeName.
+ obj := types2.Universe.Lookup(typ.Name())
+ assert(obj.Type() == typ)
+
+ w.code(typeNamed)
+ w.obj(obj, nil)
+ }
+
+ case *types2.Named:
+ // Type aliases can refer to uninstantiated generic types, so we
+ // might see len(TParams) != 0 && len(TArgs) == 0 here.
+ // TODO(mdempsky): Revisit after #46477 is resolved.
+ assert(typ.TParams().Len() == len(typ.TArgs()) || len(typ.TArgs()) == 0)
+
+ // TODO(mdempsky): Why do we need to loop here?
+ orig := typ
+ for orig.TArgs() != nil {
+ orig = orig.Orig()
+ }
+
+ w.code(typeNamed)
+ w.obj(orig.Obj(), typ.TArgs())
+
+ case *types2.TypeParam:
+ index := func() int {
+ for idx, name := range w.dict.implicits {
+ if name.Type().(*types2.TypeParam) == typ {
+ return idx
+ }
+ }
+
+ return len(w.dict.implicits) + typ.Index()
+ }()
+
+ w.derived = true
+ w.code(typeTypeParam)
+ w.len(index)
+
+ case *types2.Array:
+ w.code(typeArray)
+ w.uint64(uint64(typ.Len()))
+ w.typ(typ.Elem())
+
+ case *types2.Chan:
+ w.code(typeChan)
+ w.len(int(typ.Dir()))
+ w.typ(typ.Elem())
+
+ case *types2.Map:
+ w.code(typeMap)
+ w.typ(typ.Key())
+ w.typ(typ.Elem())
+
+ case *types2.Pointer:
+ w.code(typePointer)
+ w.typ(typ.Elem())
+
+ case *types2.Signature:
+ assert(typ.TParams() == nil)
+ w.code(typeSignature)
+ w.signature(typ)
+
+ case *types2.Slice:
+ w.code(typeSlice)
+ w.typ(typ.Elem())
+
+ case *types2.Struct:
+ w.code(typeStruct)
+ w.structType(typ)
+
+ case *types2.Interface:
+ w.code(typeInterface)
+ w.interfaceType(typ)
+
+ case *types2.Union:
+ w.code(typeUnion)
+ w.unionType(typ)
+ }
+
+ if w.derived {
+ idx := len(dict.derived)
+ dict.derived = append(dict.derived, derivedInfo{idx: w.flush()})
+ dict.derivedIdx[typ] = idx
+ return typeInfo{idx: idx, derived: true}
+ }
+
+ pw.typsIdx[typ] = w.idx
+ return typeInfo{idx: w.flush(), derived: false}
+}
+
+func (w *writer) structType(typ *types2.Struct) {
+ w.len(typ.NumFields())
+ for i := 0; i < typ.NumFields(); i++ {
+ f := typ.Field(i)
+ w.pos(f)
+ w.selector(f)
+ w.typ(f.Type())
+ w.string(typ.Tag(i))
+ w.bool(f.Embedded())
+ }
+}
+
+func (w *writer) unionType(typ *types2.Union) {
+ w.len(typ.Len())
+ for i := 0; i < typ.Len(); i++ {
+ t := typ.Term(i)
+ w.bool(t.Tilde())
+ w.typ(t.Type())
+ }
+}
+
+func (w *writer) interfaceType(typ *types2.Interface) {
+ w.len(typ.NumExplicitMethods())
+ w.len(typ.NumEmbeddeds())
+
+ for i := 0; i < typ.NumExplicitMethods(); i++ {
+ m := typ.ExplicitMethod(i)
+ sig := m.Type().(*types2.Signature)
+ assert(sig.TParams() == nil)
+
+ w.pos(m)
+ w.selector(m)
+ w.signature(sig)
+ }
+
+ for i := 0; i < typ.NumEmbeddeds(); i++ {
+ w.typ(typ.EmbeddedType(i))
+ }
+}
+
+func (w *writer) signature(sig *types2.Signature) {
+ w.sync(syncSignature)
+ w.params(sig.Params())
+ w.params(sig.Results())
+ w.bool(sig.Variadic())
+}
+
+func (w *writer) params(typ *types2.Tuple) {
+ w.sync(syncParams)
+ w.len(typ.Len())
+ for i := 0; i < typ.Len(); i++ {
+ w.param(typ.At(i))
+ }
+}
+
+func (w *writer) param(param *types2.Var) {
+ w.sync(syncParam)
+ w.pos(param)
+ w.localIdent(param)
+ w.typ(param.Type())
+}
+
+// @@@ Objects
+
+func (w *writer) obj(obj types2.Object, explicits []types2.Type) {
+ explicitInfos := make([]typeInfo, len(explicits))
+ for i, explicit := range explicits {
+ explicitInfos[i] = w.p.typIdx(explicit, w.dict)
+ }
+ info := objInfo{idx: w.p.objIdx(obj), explicits: explicitInfos}
+
+ if _, ok := obj.(*types2.Func); ok && info.anyDerived() {
+ idx := -1
+ for i, prev := range w.dict.funcs {
+ if prev.equals(info) {
+ idx = i
+ }
+ }
+ if idx < 0 {
+ idx = len(w.dict.funcs)
+ w.dict.funcs = append(w.dict.funcs, info)
+ }
+
+ // TODO(mdempsky): Push up into expr; this shouldn't appear
+ // outside of expression context.
+ w.sync(syncObject)
+ w.bool(true)
+ w.len(idx)
+ return
+ }
+
+ // TODO(mdempsky): Push up into typIdx; this shouldn't be needed
+ // except while writing out types.
+ if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
+ decl, ok := w.p.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ if len(decl.implicits) != 0 {
+ w.derived = true
+ }
+ }
+
+ w.sync(syncObject)
+ w.bool(false)
+ w.reloc(relocObj, info.idx)
+
+ w.len(len(info.explicits))
+ for _, info := range info.explicits {
+ w.typInfo(info)
+ }
+}
+
+func (pw *pkgWriter) objIdx(obj types2.Object) int {
+ if idx, ok := pw.globalsIdx[obj]; ok {
+ return idx
+ }
+
+ dict := &writerDict{
+ derivedIdx: make(map[types2.Type]int),
+ }
+
+ if isDefinedType(obj) && obj.Pkg() == pw.curpkg {
+ decl, ok := pw.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ dict.implicits = decl.implicits
+ }
+
+ w := pw.newWriter(relocObj, syncObject1)
+ w.ext = pw.newWriter(relocObjExt, syncObject1)
+ wname := pw.newWriter(relocName, syncObject1)
+ wdict := pw.newWriter(relocObjDict, syncObject1)
+
+ pw.globalsIdx[obj] = w.idx // break cycles
+ assert(w.ext.idx == w.idx)
+ assert(wname.idx == w.idx)
+ assert(wdict.idx == w.idx)
+
+ w.dict = dict
+ w.ext.dict = dict
+
+ code := w.doObj(obj)
+ w.flush()
+ w.ext.flush()
+
+ wname.qualifiedIdent(obj)
+ wname.code(code)
+ wname.flush()
+
+ wdict.objDict(obj, w.dict)
+ wdict.flush()
+
+ return w.idx
+}
+
+func (w *writer) doObj(obj types2.Object) codeObj {
+ if obj.Pkg() != w.p.curpkg {
+ return objStub
+ }
+
+ switch obj := obj.(type) {
+ default:
+ w.p.unexpected("object", obj)
+ panic("unreachable")
+
+ case *types2.Const:
+ w.pos(obj)
+ w.value(obj.Type(), obj.Val())
+ return objConst
+
+ case *types2.Func:
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+ sig := obj.Type().(*types2.Signature)
+
+ w.pos(obj)
+ w.typeParamNames(sig.TParams())
+ w.signature(sig)
+ w.pos(decl)
+ w.ext.funcExt(obj)
+ return objFunc
+
+ case *types2.TypeName:
+ decl, ok := w.p.typDecls[obj]
+ assert(ok)
+
+ if obj.IsAlias() {
+ w.pos(obj)
+ w.typ(obj.Type())
+ return objAlias
+ }
+
+ named := obj.Type().(*types2.Named)
+ assert(named.TArgs() == nil)
+
+ w.pos(obj)
+ w.typeParamNames(named.TParams())
+ w.ext.typeExt(obj)
+ w.typExpr(decl.Type)
+
+ w.len(named.NumMethods())
+ for i := 0; i < named.NumMethods(); i++ {
+ w.method(named.Method(i))
+ }
+
+ return objType
+
+ case *types2.Var:
+ w.pos(obj)
+ w.typ(obj.Type())
+ w.ext.varExt(obj)
+ return objVar
+ }
+}
+
+// typExpr writes the type represented by the given expression.
+func (w *writer) typExpr(expr syntax.Expr) {
+ tv, ok := w.p.info.Types[expr]
+ assert(ok)
+ assert(tv.IsType())
+ w.typ(tv.Type)
+}
+
+func (w *writer) value(typ types2.Type, val constant.Value) {
+ w.sync(syncValue)
+ w.typ(typ)
+ w.rawValue(val)
+}
+
+// objDict writes the dictionary needed for reading the given object.
+func (w *writer) objDict(obj types2.Object, dict *writerDict) {
+ // TODO(mdempsky): Split objDict into multiple entries? reader.go
+ // doesn't care about the type parameter bounds, and reader2.go
+ // doesn't care about referenced functions.
+
+ w.dict = dict // TODO(mdempsky): This is a bit sketchy.
+
+ w.len(len(dict.implicits))
+
+ tparams := objTypeParams(obj)
+ ntparams := tparams.Len()
+ w.len(ntparams)
+ for i := 0; i < ntparams; i++ {
+ w.typ(tparams.At(i).Type().(*types2.TypeParam).Constraint())
+ }
+
+ nderived := len(dict.derived)
+ w.len(nderived)
+ for _, typ := range dict.derived {
+ w.reloc(relocType, typ.idx)
+ w.bool(typ.needed)
+ }
+
+ nfuncs := len(dict.funcs)
+ w.len(nfuncs)
+ for _, fn := range dict.funcs {
+ w.reloc(relocObj, fn.idx)
+ w.len(len(fn.explicits))
+ for _, targ := range fn.explicits {
+ w.typInfo(targ)
+ }
+ }
+
+ assert(len(dict.derived) == nderived)
+ assert(len(dict.funcs) == nfuncs)
+}
+
+func (w *writer) typeParamNames(tparams *types2.TypeParams) {
+ w.sync(syncTypeParamNames)
+
+ ntparams := tparams.Len()
+ for i := 0; i < ntparams; i++ {
+ tparam := tparams.At(i)
+ w.pos(tparam)
+ w.localIdent(tparam)
+ }
+}
+
+func (w *writer) method(meth *types2.Func) {
+ decl, ok := w.p.funDecls[meth]
+ assert(ok)
+ sig := meth.Type().(*types2.Signature)
+
+ w.sync(syncMethod)
+ w.pos(meth)
+ w.selector(meth)
+ w.typeParamNames(sig.RParams())
+ w.param(sig.Recv())
+ w.signature(sig)
+
+ w.pos(decl) // XXX: Hack to workaround linker limitations.
+ w.ext.funcExt(meth)
+}
+
+// qualifiedIdent writes out the name of an object declared at package
+// scope. (For now, it's also used to refer to local defined types.)
+func (w *writer) qualifiedIdent(obj types2.Object) {
+ w.sync(syncSym)
+
+ name := obj.Name()
+ if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
+ decl, ok := w.p.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ if decl.gen != 0 {
+ // TODO(mdempsky): Find a better solution than embedding middle
+ // dot in the symbol name; this is terrible.
+ name = fmt.Sprintf("%s·%v", name, decl.gen)
+ }
+ }
+
+ w.pkg(obj.Pkg())
+ w.string(name)
+}
+
+// TODO(mdempsky): We should be able to omit pkg from both localIdent
+// and selector, because they should always be known from context.
+// However, past frustrations with this optimization in iexport make
+// me a little nervous to try it again.
+
+// localIdent writes the name of a locally declared object (i.e.,
+// objects that can only be accessed by name, within the context of a
+// particular function).
+func (w *writer) localIdent(obj types2.Object) {
+ assert(!isGlobal(obj))
+ w.sync(syncLocalIdent)
+ w.pkg(obj.Pkg())
+ w.string(obj.Name())
+}
+
+// selector writes the name of a field or method (i.e., objects that
+// can only be accessed using selector expressions).
+func (w *writer) selector(obj types2.Object) {
+ w.sync(syncSelector)
+ w.pkg(obj.Pkg())
+ w.string(obj.Name())
+}
+
+// @@@ Compiler extensions
+
+func (w *writer) funcExt(obj *types2.Func) {
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+
+ // TODO(mdempsky): Extend these pragma validation flags to account
+ // for generics. E.g., linkname probably doesn't make sense at
+ // least.
+
+ pragma := asPragmaFlag(decl.Pragma)
+ if pragma&ir.Systemstack != 0 && pragma&ir.Nosplit != 0 {
+ w.p.errorf(decl, "go:nosplit and go:systemstack cannot be combined")
+ }
+
+ if decl.Body != nil {
+ if pragma&ir.Noescape != 0 {
+ w.p.errorf(decl, "can only use //go:noescape with external func implementations")
+ }
+ } else {
+ if base.Flag.Complete || decl.Name.Value == "init" {
+ // Linknamed functions are allowed to have no body. Hopefully
+ // the linkname target has a body. See issue 23311.
+ if _, ok := w.p.linknames[obj]; !ok {
+ w.p.errorf(decl, "missing function body")
+ }
+ }
+ }
+
+ sig, block := obj.Type().(*types2.Signature), decl.Body
+ body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, block, w.dict)
+ assert(len(closureVars) == 0)
+
+ w.sync(syncFuncExt)
+ w.pragmaFlag(pragma)
+ w.linkname(obj)
+ w.bool(false) // stub extension
+ w.reloc(relocBody, body)
+ w.sync(syncEOF)
+}
+
+func (w *writer) typeExt(obj *types2.TypeName) {
+ decl, ok := w.p.typDecls[obj]
+ assert(ok)
+
+ w.sync(syncTypeExt)
+
+ w.pragmaFlag(asPragmaFlag(decl.Pragma))
+
+ // No LSym.SymIdx info yet.
+ w.int64(-1)
+ w.int64(-1)
+}
+
+func (w *writer) varExt(obj *types2.Var) {
+ w.sync(syncVarExt)
+ w.linkname(obj)
+}
+
+func (w *writer) linkname(obj types2.Object) {
+ w.sync(syncLinkname)
+ w.int64(-1)
+ w.string(w.p.linknames[obj])
+}
+
+func (w *writer) pragmaFlag(p ir.PragmaFlag) {
+ w.sync(syncPragma)
+ w.int(int(p))
+}
+
+// @@@ Function bodies
+
+func (pw *pkgWriter) bodyIdx(pkg *types2.Package, sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx int, closureVars []posObj) {
+ w := pw.newWriter(relocBody, syncFuncBody)
+ w.dict = dict
+
+ w.funcargs(sig)
+ if w.bool(block != nil) {
+ w.stmts(block.List)
+ w.pos(block.Rbrace)
+ }
+
+ return w.flush(), w.closureVars
+}
+
+func (w *writer) funcargs(sig *types2.Signature) {
+ do := func(params *types2.Tuple, result bool) {
+ for i := 0; i < params.Len(); i++ {
+ w.funcarg(params.At(i), result)
+ }
+ }
+
+ if recv := sig.Recv(); recv != nil {
+ w.funcarg(recv, false)
+ }
+ do(sig.Params(), false)
+ do(sig.Results(), true)
+}
+
+func (w *writer) funcarg(param *types2.Var, result bool) {
+ if param.Name() != "" || result {
+ w.addLocal(param)
+ }
+}
+
+func (w *writer) addLocal(obj *types2.Var) {
+ w.sync(syncAddLocal)
+ idx := len(w.localsIdx)
+ if enableSync {
+ w.int(idx)
+ }
+ if w.localsIdx == nil {
+ w.localsIdx = make(map[*types2.Var]int)
+ }
+ w.localsIdx[obj] = idx
+}
+
+func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) {
+ w.sync(syncUseObjLocal)
+
+ if idx, ok := w.localsIdx[obj]; w.bool(ok) {
+ w.len(idx)
+ return
+ }
+
+ idx, ok := w.closureVarsIdx[obj]
+ if !ok {
+ if w.closureVarsIdx == nil {
+ w.closureVarsIdx = make(map[*types2.Var]int)
+ }
+ idx = len(w.closureVars)
+ w.closureVars = append(w.closureVars, posObj{pos, obj})
+ w.closureVarsIdx[obj] = idx
+ }
+ w.len(idx)
+}
+
+func (w *writer) openScope(pos syntax.Pos) {
+ w.sync(syncOpenScope)
+ w.pos(pos)
+}
+
+func (w *writer) closeScope(pos syntax.Pos) {
+ w.sync(syncCloseScope)
+ w.pos(pos)
+ w.closeAnotherScope()
+}
+
+func (w *writer) closeAnotherScope() {
+ w.sync(syncCloseAnotherScope)
+}
+
+// @@@ Statements
+
+func (w *writer) stmt(stmt syntax.Stmt) {
+ var stmts []syntax.Stmt
+ if stmt != nil {
+ stmts = []syntax.Stmt{stmt}
+ }
+ w.stmts(stmts)
+}
+
+func (w *writer) stmts(stmts []syntax.Stmt) {
+ w.sync(syncStmts)
+ for _, stmt := range stmts {
+ w.stmt1(stmt)
+ }
+ w.code(stmtEnd)
+ w.sync(syncStmtsEnd)
+}
+
+func (w *writer) stmt1(stmt syntax.Stmt) {
+ switch stmt := stmt.(type) {
+ default:
+ w.p.unexpected("statement", stmt)
+
+ case nil, *syntax.EmptyStmt:
+ return
+
+ case *syntax.AssignStmt:
+ switch {
+ case stmt.Rhs == nil:
+ w.code(stmtIncDec)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+
+ case stmt.Op != 0 && stmt.Op != syntax.Def:
+ w.code(stmtAssignOp)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+ w.expr(stmt.Rhs)
+
+ default:
+ w.code(stmtAssign)
+ w.pos(stmt)
+ w.exprList(stmt.Rhs)
+ w.assignList(stmt.Lhs)
+ }
+
+ case *syntax.BlockStmt:
+ w.code(stmtBlock)
+ w.blockStmt(stmt)
+
+ case *syntax.BranchStmt:
+ w.code(stmtBranch)
+ w.pos(stmt)
+ w.op(branchOps[stmt.Tok])
+ w.optLabel(stmt.Label)
+
+ case *syntax.CallStmt:
+ w.code(stmtCall)
+ w.pos(stmt)
+ w.op(callOps[stmt.Tok])
+ w.expr(stmt.Call)
+
+ case *syntax.DeclStmt:
+ for _, decl := range stmt.DeclList {
+ w.declStmt(decl)
+ }
+
+ case *syntax.ExprStmt:
+ w.code(stmtExpr)
+ w.expr(stmt.X)
+
+ case *syntax.ForStmt:
+ w.code(stmtFor)
+ w.forStmt(stmt)
+
+ case *syntax.IfStmt:
+ w.code(stmtIf)
+ w.ifStmt(stmt)
+
+ case *syntax.LabeledStmt:
+ w.code(stmtLabel)
+ w.pos(stmt)
+ w.label(stmt.Label)
+ w.stmt1(stmt.Stmt)
+
+ case *syntax.ReturnStmt:
+ w.code(stmtReturn)
+ w.pos(stmt)
+ w.exprList(stmt.Results)
+
+ case *syntax.SelectStmt:
+ w.code(stmtSelect)
+ w.selectStmt(stmt)
+
+ case *syntax.SendStmt:
+ w.code(stmtSend)
+ w.pos(stmt)
+ w.expr(stmt.Chan)
+ w.expr(stmt.Value)
+
+ case *syntax.SwitchStmt:
+ w.code(stmtSwitch)
+ w.switchStmt(stmt)
+ }
+}
+
+func (w *writer) assignList(expr syntax.Expr) {
+ exprs := unpackListExpr(expr)
+ w.len(len(exprs))
+
+ for _, expr := range exprs {
+ if name, ok := expr.(*syntax.Name); ok && name.Value != "_" {
+ if obj, ok := w.p.info.Defs[name]; ok {
+ obj := obj.(*types2.Var)
+
+ w.bool(true)
+ w.pos(obj)
+ w.localIdent(obj)
+ w.typ(obj.Type())
+
+ // TODO(mdempsky): Minimize locals index size by deferring
+ // this until the variables actually come into scope.
+ w.addLocal(obj)
+ continue
+ }
+ }
+
+ w.bool(false)
+ w.expr(expr)
+ }
+}
+
+func (w *writer) declStmt(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ConstDecl:
+
+ case *syntax.TypeDecl:
+ // Quirk: The legacy inliner doesn't support inlining functions
+ // with type declarations. Unified IR doesn't have any need to
+ // write out type declarations explicitly (they're always looked
+ // up via global index tables instead), so we just write out a
+ // marker so the reader knows to synthesize a fake declaration to
+ // prevent inlining.
+ if quirksMode() {
+ w.code(stmtTypeDeclHack)
+ }
+
+ case *syntax.VarDecl:
+ values := unpackListExpr(decl.Values)
+
+ // Quirk: When N variables are declared with N initialization
+ // values, we need to decompose that into N interleaved
+ // declarations+initializations, because it leads to different
+ // (albeit semantically equivalent) code generation.
+ if quirksMode() && len(decl.NameList) == len(values) {
+ for i, name := range decl.NameList {
+ w.code(stmtAssign)
+ w.pos(decl)
+ w.exprList(values[i])
+ w.assignList(name)
+ }
+ break
+ }
+
+ w.code(stmtAssign)
+ w.pos(decl)
+ w.exprList(decl.Values)
+ w.assignList(namesAsExpr(decl.NameList))
+ }
+}
+
+func (w *writer) blockStmt(stmt *syntax.BlockStmt) {
+ w.sync(syncBlockStmt)
+ w.openScope(stmt.Pos())
+ w.stmts(stmt.List)
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) forStmt(stmt *syntax.ForStmt) {
+ w.sync(syncForStmt)
+ w.openScope(stmt.Pos())
+
+ if rang, ok := stmt.Init.(*syntax.RangeClause); w.bool(ok) {
+ w.pos(rang)
+ w.expr(rang.X)
+ w.assignList(rang.Lhs)
+ } else {
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Cond)
+ w.stmt(stmt.Post)
+ }
+
+ w.blockStmt(stmt.Body)
+ w.closeAnotherScope()
+}
+
+func (w *writer) ifStmt(stmt *syntax.IfStmt) {
+ w.sync(syncIfStmt)
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Cond)
+ w.blockStmt(stmt.Then)
+ w.stmt(stmt.Else)
+ w.closeAnotherScope()
+}
+
+func (w *writer) selectStmt(stmt *syntax.SelectStmt) {
+ w.sync(syncSelectStmt)
+
+ w.pos(stmt)
+ w.len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+ w.stmt(clause.Comm)
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+}
+
+func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
+ w.sync(syncSwitchStmt)
+
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+
+ if guard, ok := stmt.Tag.(*syntax.TypeSwitchGuard); w.bool(ok) {
+ w.pos(guard)
+ if tag := guard.Lhs; w.bool(tag != nil) {
+ w.pos(tag)
+ w.string(tag.Value)
+ }
+ w.expr(guard.X)
+ } else {
+ w.expr(stmt.Tag)
+ }
+
+ w.len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+ w.exprList(clause.Cases)
+
+ if obj, ok := w.p.info.Implicits[clause]; ok {
+ // TODO(mdempsky): These pos details are quirkish, but also
+ // necessary so the variable's position is correct for DWARF
+ // scope assignment later. It would probably be better for us to
+ // instead just set the variable's DWARF scoping info earlier so
+ // we can give it the correct position information.
+ pos := clause.Pos()
+ if typs := unpackListExpr(clause.Cases); len(typs) != 0 {
+ pos = typeExprEndPos(typs[len(typs)-1])
+ }
+ w.pos(pos)
+
+ obj := obj.(*types2.Var)
+ w.typ(obj.Type())
+ w.addLocal(obj)
+ }
+
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) label(label *syntax.Name) {
+ w.sync(syncLabel)
+
+ // TODO(mdempsky): Replace label strings with dense indices.
+ w.string(label.Value)
+}
+
+func (w *writer) optLabel(label *syntax.Name) {
+ w.sync(syncOptLabel)
+ if w.bool(label != nil) {
+ w.label(label)
+ }
+}
+
+// @@@ Expressions
+
+func (w *writer) expr(expr syntax.Expr) {
+ expr = unparen(expr) // skip parens; unneeded after typecheck
+
+ obj, targs := lookupObj(w.p.info, expr)
+
+ if tv, ok := w.p.info.Types[expr]; ok {
+ // TODO(mdempsky): Be more judicious about which types are marked as "needed".
+ w.needType(tv.Type)
+
+ if tv.IsType() {
+ w.code(exprType)
+ w.typ(tv.Type)
+ return
+ }
+
+ if tv.Value != nil {
+ pos := expr.Pos()
+ if quirksMode() {
+ if obj != nil {
+ // Quirk: IR (and thus iexport) doesn't track position
+ // information for uses of declared objects.
+ pos = syntax.Pos{}
+ } else if tv.Value.Kind() == constant.String {
+ // Quirk: noder.sum picks a particular position for certain
+ // string concatenations.
+ pos = sumPos(expr)
+ }
+ }
+
+ w.code(exprConst)
+ w.pos(pos)
+ w.value(tv.Type, tv.Value)
+
+ // TODO(mdempsky): These details are only important for backend
+ // diagnostics. Explore writing them out separately.
+ w.op(constExprOp(expr))
+ w.string(syntax.String(expr))
+ return
+ }
+ }
+
+ if obj != nil {
+ if isGlobal(obj) {
+ w.code(exprName)
+ w.obj(obj, targs)
+ return
+ }
+
+ obj := obj.(*types2.Var)
+ assert(len(targs) == 0)
+
+ w.code(exprLocal)
+ w.useLocal(expr.Pos(), obj)
+ return
+ }
+
+ switch expr := expr.(type) {
+ default:
+ w.p.unexpected("expression", expr)
+
+ case nil: // absent slice index, for condition, or switch tag
+ w.code(exprNone)
+
+ case *syntax.Name:
+ assert(expr.Value == "_")
+ w.code(exprBlank)
+
+ case *syntax.CompositeLit:
+ w.code(exprCompLit)
+ w.compLit(expr)
+
+ case *syntax.FuncLit:
+ w.code(exprFuncLit)
+ w.funcLit(expr)
+
+ case *syntax.SelectorExpr:
+ sel, ok := w.p.info.Selections[expr]
+ assert(ok)
+
+ w.code(exprSelector)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.selector(sel.Obj())
+
+ case *syntax.IndexExpr:
+ tv, ok := w.p.info.Types[expr.Index]
+ assert(ok && tv.IsValue())
+
+ w.code(exprIndex)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Index)
+
+ case *syntax.SliceExpr:
+ w.code(exprSlice)
+ w.expr(expr.X)
+ w.pos(expr)
+ for _, n := range &expr.Index {
+ w.expr(n)
+ }
+
+ case *syntax.AssertExpr:
+ w.code(exprAssert)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Type)
+
+ case *syntax.Operation:
+ if expr.Y == nil {
+ w.code(exprUnaryOp)
+ w.op(unOps[expr.Op])
+ w.pos(expr)
+ w.expr(expr.X)
+ break
+ }
+
+ w.code(exprBinaryOp)
+ w.op(binOps[expr.Op])
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Y)
+
+ case *syntax.CallExpr:
+ tv, ok := w.p.info.Types[expr.Fun]
+ assert(ok)
+ if tv.IsType() {
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+
+ w.code(exprConvert)
+ w.typ(tv.Type)
+ w.pos(expr)
+ w.expr(expr.ArgList[0])
+ break
+ }
+
+ writeFunExpr := func() {
+ if selector, ok := unparen(expr.Fun).(*syntax.SelectorExpr); ok {
+ if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal {
+ w.expr(selector.X)
+ w.bool(true) // method call
+ w.pos(selector)
+ w.selector(sel.Obj())
+ return
+ }
+ }
+
+ if inf, ok := w.p.info.Inferred[expr]; ok {
+ obj, _ := lookupObj(w.p.info, expr.Fun)
+ assert(obj != nil)
+
+ // As if w.expr(expr.Fun), but using inf.TArgs instead.
+ w.code(exprName)
+ w.obj(obj, inf.TArgs)
+ } else {
+ w.expr(expr.Fun)
+ }
+ w.bool(false) // not a method call (i.e., normal function call)
+ }
+
+ w.code(exprCall)
+ writeFunExpr()
+ w.pos(expr)
+ w.exprs(expr.ArgList)
+ w.bool(expr.HasDots)
+ }
+}
+
+func (w *writer) compLit(lit *syntax.CompositeLit) {
+ tv, ok := w.p.info.Types[lit]
+ assert(ok)
+
+ w.sync(syncCompLit)
+ w.pos(lit)
+ w.typ(tv.Type)
+
+ typ := tv.Type
+ if ptr, ok := typ.Underlying().(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ str, isStruct := typ.Underlying().(*types2.Struct)
+
+ w.len(len(lit.ElemList))
+ for i, elem := range lit.ElemList {
+ if isStruct {
+ if kv, ok := elem.(*syntax.KeyValueExpr); ok {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ w.len(fieldIndex(w.p.info, str, kv.Key.(*syntax.Name)))
+ elem = kv.Value
+ } else {
+ w.pos(elem)
+ w.len(i)
+ }
+ } else {
+ if kv, ok := elem.(*syntax.KeyValueExpr); w.bool(ok) {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ w.expr(kv.Key)
+ elem = kv.Value
+ }
+ }
+ w.pos(elem)
+ w.expr(elem)
+ }
+}
+
+func (w *writer) funcLit(expr *syntax.FuncLit) {
+ tv, ok := w.p.info.Types[expr]
+ assert(ok)
+ sig := tv.Type.(*types2.Signature)
+
+ body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, expr.Body, w.dict)
+
+ w.sync(syncFuncLit)
+ w.pos(expr)
+ w.pos(expr.Type) // for QuirksMode
+ w.signature(sig)
+
+ w.len(len(closureVars))
+ for _, cv := range closureVars {
+ w.pos(cv.pos)
+ if quirksMode() {
+ cv.pos = expr.Body.Rbrace
+ }
+ w.useLocal(cv.pos, cv.obj)
+ }
+
+ w.reloc(relocBody, body)
+}
+
+type posObj struct {
+ pos syntax.Pos
+ obj *types2.Var
+}
+
+func (w *writer) exprList(expr syntax.Expr) {
+ w.sync(syncExprList)
+ w.exprs(unpackListExpr(expr))
+}
+
+func (w *writer) exprs(exprs []syntax.Expr) {
+ if len(exprs) == 0 {
+ assert(exprs == nil)
+ }
+
+ w.sync(syncExprs)
+ w.len(len(exprs))
+ for _, expr := range exprs {
+ w.expr(expr)
+ }
+}
+
+func (w *writer) op(op ir.Op) {
+ // TODO(mdempsky): Remove in favor of explicit codes? Would make
+ // export data more stable against internal refactorings, but low
+ // priority at the moment.
+ assert(op != 0)
+ w.sync(syncOp)
+ w.len(int(op))
+}
+
+func (w *writer) needType(typ types2.Type) {
+ // Decompose tuple into component element types.
+ if typ, ok := typ.(*types2.Tuple); ok {
+ for i := 0; i < typ.Len(); i++ {
+ w.needType(typ.At(i).Type())
+ }
+ return
+ }
+
+ if info := w.p.typIdx(typ, w.dict); info.derived {
+ w.dict.derived[info.idx].needed = true
+ }
+}
+
+// @@@ Package initialization
+
+// Caution: This code is still clumsy, because toolstash -cmp is
+// particularly sensitive to it.
+
+type typeDeclGen struct {
+ *syntax.TypeDecl
+ gen int
+
+ // Implicit type parameters in scope at this type declaration.
+ implicits []*types2.TypeName
+}
+
+type fileImports struct {
+ importedEmbed, importedUnsafe bool
+}
+
+type declCollector struct {
+ pw *pkgWriter
+ typegen *int
+ file *fileImports
+ withinFunc bool
+ implicits []*types2.TypeName
+}
+
+func (c *declCollector) withTParams(obj types2.Object) *declCollector {
+ tparams := objTypeParams(obj)
+ n := tparams.Len()
+ if n == 0 {
+ return c
+ }
+
+ copy := *c
+ copy.implicits = copy.implicits[:len(copy.implicits):len(copy.implicits)]
+ for i := 0; i < n; i++ {
+ copy.implicits = append(copy.implicits, tparams.At(i))
+ }
+ return &copy
+}
+
+func (c *declCollector) Visit(n syntax.Node) syntax.Visitor {
+ pw := c.pw
+
+ switch n := n.(type) {
+ case *syntax.File:
+ pw.checkPragmas(n.Pragma, ir.GoBuildPragma, false)
+
+ case *syntax.ImportDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ switch pkgNameOf(pw.info, n).Imported().Path() {
+ case "embed":
+ c.file.importedEmbed = true
+ case "unsafe":
+ c.file.importedUnsafe = true
+ }
+
+ case *syntax.ConstDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ case *syntax.FuncDecl:
+ pw.checkPragmas(n.Pragma, funcPragmas, false)
+
+ obj := pw.info.Defs[n.Name].(*types2.Func)
+ pw.funDecls[obj] = n
+
+ return c.withTParams(obj)
+
+ case *syntax.TypeDecl:
+ obj := pw.info.Defs[n.Name].(*types2.TypeName)
+ d := typeDeclGen{TypeDecl: n, implicits: c.implicits}
+
+ if n.Alias {
+ pw.checkPragmas(n.Pragma, 0, false)
+ } else {
+ pw.checkPragmas(n.Pragma, typePragmas, false)
+
+ // Assign a unique ID to function-scoped defined types.
+ if c.withinFunc {
+ *c.typegen++
+ d.gen = *c.typegen
+ }
+ }
+
+ pw.typDecls[obj] = d
+
+ // TODO(mdempsky): Omit? Not strictly necessary; only matters for
+ // type declarations within function literals within parameterized
+ // type declarations, but types2 the function literals will be
+ // constant folded away.
+ return c.withTParams(obj)
+
+ case *syntax.VarDecl:
+ pw.checkPragmas(n.Pragma, 0, true)
+
+ if p, ok := n.Pragma.(*pragmas); ok && len(p.Embeds) > 0 {
+ if err := checkEmbed(n, c.file.importedEmbed, c.withinFunc); err != nil {
+ pw.errorf(p.Embeds[0].Pos, "%s", err)
+ }
+ }
+
+ // Workaround for #46208. For variable declarations that
+ // declare multiple variables and have an explicit type
+ // expression, the type expression is evaluated multiple
+ // times. This affects toolstash -cmp, because iexport is
+ // sensitive to *types.Type pointer identity.
+ if quirksMode() && n.Type != nil {
+ tv, ok := pw.info.Types[n.Type]
+ assert(ok)
+ assert(tv.IsType())
+ for _, name := range n.NameList {
+ obj := pw.info.Defs[name].(*types2.Var)
+ pw.dups.add(obj.Type(), tv.Type)
+ }
+ }
+
+ case *syntax.BlockStmt:
+ if !c.withinFunc {
+ copy := *c
+ copy.withinFunc = true
+ return &copy
+ }
+ }
+
+ return c
+}
+
+func (pw *pkgWriter) collectDecls(noders []*noder) {
+ var typegen int
+ for _, p := range noders {
+ var file fileImports
+
+ syntax.Walk(p.file, &declCollector{
+ pw: pw,
+ typegen: &typegen,
+ file: &file,
+ })
+
+ pw.cgoPragmas = append(pw.cgoPragmas, p.pragcgobuf...)
+
+ for _, l := range p.linknames {
+ if !file.importedUnsafe {
+ pw.errorf(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ continue
+ }
+
+ switch obj := pw.curpkg.Scope().Lookup(l.local).(type) {
+ case *types2.Func, *types2.Var:
+ if _, ok := pw.linknames[obj]; !ok {
+ pw.linknames[obj] = l.remote
+ } else {
+ pw.errorf(l.pos, "duplicate //go:linkname for %s", l.local)
+ }
+
+ default:
+ // TODO(mdempsky): Enable after #42938 is fixed.
+ if false {
+ pw.errorf(l.pos, "//go:linkname must refer to declared function or variable")
+ }
+ }
+ }
+ }
+}
+
+func (pw *pkgWriter) checkPragmas(p syntax.Pragma, allowed ir.PragmaFlag, embedOK bool) {
+ if p == nil {
+ return
+ }
+ pragma := p.(*pragmas)
+
+ for _, pos := range pragma.Pos {
+ if pos.Flag&^allowed != 0 {
+ pw.errorf(pos.Pos, "misplaced compiler directive")
+ }
+ }
+
+ if !embedOK {
+ for _, e := range pragma.Embeds {
+ pw.errorf(e.Pos, "misplaced go:embed directive")
+ }
+ }
+}
+
+func (w *writer) pkgInit(noders []*noder) {
+ if quirksMode() {
+ posBases := posBasesOf(noders)
+ w.len(len(posBases))
+ for _, posBase := range posBases {
+ w.posBase(posBase)
+ }
+
+ objs := importedObjsOf(w.p.curpkg, w.p.info, noders)
+ w.len(len(objs))
+ for _, obj := range objs {
+ w.qualifiedIdent(obj)
+ }
+ }
+
+ w.len(len(w.p.cgoPragmas))
+ for _, cgoPragma := range w.p.cgoPragmas {
+ w.strings(cgoPragma)
+ }
+
+ w.sync(syncDecls)
+ for _, p := range noders {
+ for _, decl := range p.file.DeclList {
+ w.pkgDecl(decl)
+ }
+ }
+ w.code(declEnd)
+
+ w.sync(syncEOF)
+}
+
+func (w *writer) pkgDecl(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ImportDecl:
+
+ case *syntax.ConstDecl:
+ w.code(declOther)
+ w.pkgObjs(decl.NameList...)
+
+ case *syntax.FuncDecl:
+ if decl.Name.Value == "_" {
+ break // skip blank functions
+ }
+
+ obj := w.p.info.Defs[decl.Name].(*types2.Func)
+ sig := obj.Type().(*types2.Signature)
+
+ if sig.RParams() != nil || sig.TParams() != nil {
+ break // skip generic functions
+ }
+
+ if recv := sig.Recv(); recv != nil {
+ w.code(declMethod)
+ w.typ(recvBase(recv))
+ w.selector(obj)
+ break
+ }
+
+ w.code(declFunc)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.TypeDecl:
+ if len(decl.TParamList) != 0 {
+ break // skip generic type decls
+ }
+
+ if decl.Name.Value == "_" {
+ break // skip blank type decls
+ }
+
+ name := w.p.info.Defs[decl.Name].(*types2.TypeName)
+ // Skip type declarations for interfaces that are only usable as
+ // type parameter bounds.
+ if iface, ok := name.Type().Underlying().(*types2.Interface); ok && iface.IsConstraint() {
+ break
+ }
+
+ // Skip aliases to uninstantiated generic types.
+ // TODO(mdempsky): Revisit after #46477 is resolved.
+ if name.IsAlias() {
+ named, ok := name.Type().(*types2.Named)
+ if ok && named.TParams().Len() != 0 && len(named.TArgs()) == 0 {
+ break
+ }
+ }
+
+ w.code(declOther)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.VarDecl:
+ w.code(declVar)
+ w.pos(decl)
+ w.pkgObjs(decl.NameList...)
+ w.exprList(decl.Values)
+
+ var embeds []pragmaEmbed
+ if p, ok := decl.Pragma.(*pragmas); ok {
+ embeds = p.Embeds
+ }
+ w.len(len(embeds))
+ for _, embed := range embeds {
+ w.pos(embed.Pos)
+ w.strings(embed.Patterns)
+ }
+ }
+}
+
+func (w *writer) pkgObjs(names ...*syntax.Name) {
+ w.sync(syncDeclNames)
+ w.len(len(names))
+
+ for _, name := range names {
+ obj, ok := w.p.info.Defs[name]
+ assert(ok)
+
+ w.sync(syncDeclName)
+ w.obj(obj, nil)
+ }
+}
+
+// @@@ Helpers
+
+// isDefinedType reports whether obj is a defined type.
+func isDefinedType(obj types2.Object) bool {
+ if obj, ok := obj.(*types2.TypeName); ok {
+ return !obj.IsAlias()
+ }
+ return false
+}
+
+// isGlobal reports whether obj was declared at package scope.
+//
+// Caveat: blank objects are not declared.
+func isGlobal(obj types2.Object) bool {
+ return obj.Parent() == obj.Pkg().Scope()
+}
+
+// lookupObj returns the object that expr refers to, if any. If expr
+// is an explicit instantiation of a generic object, then the type
+// arguments are returned as well.
+func lookupObj(info *types2.Info, expr syntax.Expr) (obj types2.Object, targs []types2.Type) {
+ if index, ok := expr.(*syntax.IndexExpr); ok {
+ if inf, ok := info.Inferred[index]; ok {
+ targs = inf.TArgs
+ } else {
+ args := unpackListExpr(index.Index)
+
+ if len(args) == 1 {
+ tv, ok := info.Types[args[0]]
+ assert(ok)
+ if tv.IsValue() {
+ return // normal index expression
+ }
+ }
+
+ targs = make([]types2.Type, len(args))
+ for i, arg := range args {
+ tv, ok := info.Types[arg]
+ assert(ok)
+ assert(tv.IsType())
+ targs[i] = tv.Type
+ }
+ }
+
+ expr = index.X
+ }
+
+ // Strip package qualifier, if present.
+ if sel, ok := expr.(*syntax.SelectorExpr); ok {
+ if !isPkgQual(info, sel) {
+ return // normal selector expression
+ }
+ expr = sel.Sel
+ }
+
+ if name, ok := expr.(*syntax.Name); ok {
+ obj, _ = info.Uses[name]
+ }
+ return
+}
+
+// isPkgQual reports whether the given selector expression is a
+// package-qualified identifier.
+func isPkgQual(info *types2.Info, sel *syntax.SelectorExpr) bool {
+ if name, ok := sel.X.(*syntax.Name); ok {
+ _, isPkgName := info.Uses[name].(*types2.PkgName)
+ return isPkgName
+ }
+ return false
+}
+
+// recvBase returns the base type for the given receiver parameter.
+func recvBase(recv *types2.Var) *types2.Named {
+ typ := recv.Type()
+ if ptr, ok := typ.(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ return typ.(*types2.Named)
+}
+
+// namesAsExpr returns a list of names as a syntax.Expr.
+func namesAsExpr(names []*syntax.Name) syntax.Expr {
+ if len(names) == 1 {
+ return names[0]
+ }
+
+ exprs := make([]syntax.Expr, len(names))
+ for i, name := range names {
+ exprs[i] = name
+ }
+ return &syntax.ListExpr{ElemList: exprs}
+}
+
+// fieldIndex returns the index of the struct field named by key.
+func fieldIndex(info *types2.Info, str *types2.Struct, key *syntax.Name) int {
+ field := info.Uses[key].(*types2.Var)
+
+ for i := 0; i < str.NumFields(); i++ {
+ if str.Field(i) == field {
+ return i
+ }
+ }
+
+ panic(fmt.Sprintf("%s: %v is not a field of %v", key.Pos(), field, str))
+}
+
+// objTypeParams returns the type parameters on the given object.
+func objTypeParams(obj types2.Object) *types2.TypeParams {
+ switch obj := obj.(type) {
+ case *types2.Func:
+ sig := obj.Type().(*types2.Signature)
+ if sig.Recv() != nil {
+ return sig.RParams()
+ }
+ return sig.TParams()
+ case *types2.TypeName:
+ if !obj.IsAlias() {
+ return obj.Type().(*types2.Named).TParams()
+ }
+ }
+ return nil
+}
+
+func asPragmaFlag(p syntax.Pragma) ir.PragmaFlag {
+ if p == nil {
+ return 0
+ }
+ return p.(*pragmas).Flag
+}
diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go
index 97d69629fb..0aad63a69f 100644
--- a/src/cmd/compile/internal/pkginit/initorder.go
+++ b/src/cmd/compile/internal/pkginit/initorder.go
@@ -304,7 +304,7 @@ func (d *initDeps) visit(n ir.Node) {
n := n.(*ir.ClosureExpr)
d.inspectList(n.Func.Body)
- case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+ case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
d.foundDep(ir.MethodExprName(n))
}
}
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
index 590290fa37..bff3e38f42 100644
--- a/src/cmd/compile/internal/ppc64/galign.go
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -20,7 +20,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnopdefer
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
index c76962cfb8..3ae6422bf9 100644
--- a/src/cmd/compile/internal/ppc64/ggen.go
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -53,30 +53,3 @@ func ginsnop(pp *objw.Progs) *obj.Prog {
p.To.Reg = ppc64.REG_R0
return p
}
-
-func ginsnopdefer(pp *objw.Progs) *obj.Prog {
- // On PPC64 two nops are required in the defer case.
- //
- // (see gc/cgen.go, gc/plive.go -- copy of comment below)
- //
- // On ppc64, when compiling Go into position
- // independent code on ppc64le we insert an
- // instruction to reload the TOC pointer from the
- // stack as well. See the long comment near
- // jmpdefer in runtime/asm_ppc64.s for why.
- // If the MOVD is not needed, insert a hardware NOP
- // so that the same number of instructions are used
- // on ppc64 in both shared and non-shared modes.
-
- ginsnop(pp)
- if base.Ctxt.Flag_shared {
- p := pp.Prog(ppc64.AMOVD)
- p.From.Type = obj.TYPE_MEM
- p.From.Offset = 24
- p.From.Reg = ppc64.REGSP
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ppc64.REG_R2
- return p
- }
- return ginsnop(pp)
-}
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
index 0707e0b61c..36ad389647 100644
--- a/src/cmd/compile/internal/reflectdata/alg.go
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -679,8 +679,7 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
fn := typecheck.LookupRuntime("memequal")
fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{sptr, tptr, ir.Copy(slen)})
- typecheck.Call(call)
+ call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
@@ -716,8 +715,7 @@ func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, []ir.Node{stab, sdata, tdata})
- typecheck.Call(call)
+ call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index eb9a8a6c9b..b04e4d684f 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -28,23 +28,13 @@ import (
"cmd/internal/src"
)
-type itabEntry struct {
- t, itype *types.Type
- lsym *obj.LSym // symbol of the itab itself
-
- // symbols of each method in
- // the itab, sorted by byte offset;
- // filled in by CompileITabs
- entries []*obj.LSym
-}
-
type ptabEntry struct {
s *types.Sym
t *types.Type
}
-func CountTabs() (numPTabs, numITabs int) {
- return len(ptabs), len(itabs)
+func CountPTabs() int {
+ return len(ptabs)
}
// runtime interface and reflection data structures
@@ -56,7 +46,6 @@ var (
gcsymmu sync.Mutex // protects gcsymset and gcsymslice
gcsymset = make(map[*types.Type]struct{})
- itabs []itabEntry
ptabs []*ir.Name
)
@@ -313,6 +302,10 @@ func MapIterType(t *types.Type) *types.Type {
// methods returns the methods of the non-interface type t, sorted by name.
// Generates stub functions as needed.
func methods(t *types.Type) []*typeSig {
+ if t.HasShape() {
+ // Shape types have no methods.
+ return nil
+ }
// method type
mt := types.ReceiverBaseType(t)
@@ -321,13 +314,6 @@ func methods(t *types.Type) []*typeSig {
}
typecheck.CalcMethods(mt)
- // type stored in interface word
- it := t
-
- if !types.IsDirectIface(it) {
- it = types.NewPtr(t)
- }
-
// make list of methods for t,
// generating code if necessary.
var ms []*typeSig
@@ -355,8 +341,8 @@ func methods(t *types.Type) []*typeSig {
sig := &typeSig{
name: f.Sym,
- isym: methodWrapper(it, f),
- tsym: methodWrapper(t, f),
+ isym: methodWrapper(t, f, true),
+ tsym: methodWrapper(t, f, false),
type_: typecheck.NewMethodType(f.Type, t),
mtype: typecheck.NewMethodType(f.Type, nil),
}
@@ -394,7 +380,7 @@ func imethods(t *types.Type) []*typeSig {
// IfaceType.Method is not in the reflect data.
// Generate the method body, so that compiled
// code can refer to it.
- methodWrapper(t, f)
+ methodWrapper(t, f, false)
}
return methods
@@ -735,7 +721,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
}
exported := false
- p := t.LongString()
+ p := t.NameString()
// If we're writing out type T,
// we are very likely to write out type *T as well.
// Use the string "*T"[1:] for "T", so that the two
@@ -799,11 +785,11 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
- return base.PkgLinksym("go.track", t.ShortString()+"."+f.Sym.Name, obj.ABI0)
+ return base.PkgLinksym("go.track", t.LinkString()+"."+f.Sym.Name, obj.ABI0)
}
func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
- p := prefix + "." + t.ShortString()
+ p := prefix + "." + t.LinkString()
s := types.TypeSymLookup(p)
// This function is for looking up type-related generated functions
@@ -848,16 +834,28 @@ func TypePtr(t *types.Type) *ir.AddrExpr {
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
-func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
- if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
- base.Fatalf("ITabAddr(%v, %v)", t, itype)
- }
- s, existed := ir.Pkgs.Itab.LookupOK(t.ShortString() + "," + itype.ShortString())
+// ITabLsym returns the LSym representing the itab for concreate type typ
+// implementing interface iface.
+func ITabLsym(typ, iface *types.Type) *obj.LSym {
+ s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
+ lsym := s.Linksym()
+
if !existed {
- itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
+ writeITab(lsym, typ, iface)
}
+ return lsym
+}
+// ITabAddr returns an expression representing a pointer to the itab
+// for concrete type typ implementing interface iface.
+func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
+ s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
lsym := s.Linksym()
+
+ if !existed {
+ writeITab(lsym, typ, iface)
+ }
+
n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
@@ -945,25 +943,27 @@ func writeType(t *types.Type) *obj.LSym {
if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
tbase = t.Elem()
}
+ if tbase.Kind() == types.TFORW {
+ base.Fatalf("unresolved defined type: %v", tbase)
+ }
+
dupok := 0
- if tbase.Sym() == nil {
+ if tbase.Sym() == nil || tbase.HasShape() { // TODO(mdempsky): Probably need DUPOK for instantiated types too.
dupok = obj.DUPOK
}
- if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
- // named types from other files are defined only by those files
- if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg {
- if i := typecheck.BaseTypeIndex(t); i >= 0 {
- lsym.Pkg = tbase.Sym().Pkg.Prefix
- lsym.SymIdx = int32(i)
- lsym.Set(obj.AttrIndexed, true)
- }
- return lsym
- }
- // TODO(mdempsky): Investigate whether this can happen.
- if tbase.Kind() == types.TFORW {
- return lsym
+ if !NeedEmit(tbase) {
+ if i := typecheck.BaseTypeIndex(t); i >= 0 {
+ lsym.Pkg = tbase.Sym().Pkg.Prefix
+ lsym.SymIdx = int32(i)
+ lsym.Set(obj.AttrIndexed, true)
}
+
+ // TODO(mdempsky): Investigate whether this still happens.
+ // If we know we don't need to emit code for a type,
+ // we should have a link-symbol index for it.
+ // See also TODO in NeedEmit.
+ return lsym
}
ot := 0
@@ -1226,88 +1226,11 @@ func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
}
-// for each itabEntry, gather the methods on
-// the concrete type that implement the interface
-func CompileITabs() {
- for i := range itabs {
- tab := &itabs[i]
- methods := genfun(tab.t, tab.itype)
- if len(methods) == 0 {
- continue
- }
- tab.entries = methods
- }
-}
-
-// for the given concrete type and interface
-// type, return the (sorted) set of methods
-// on the concrete type that implement the interface
-func genfun(t, it *types.Type) []*obj.LSym {
- if t == nil || it == nil {
- return nil
- }
- sigs := imethods(it)
- methods := methods(t)
- out := make([]*obj.LSym, 0, len(sigs))
- // TODO(mdempsky): Short circuit before calling methods(t)?
- // See discussion on CL 105039.
- if len(sigs) == 0 {
- return nil
- }
-
- // both sigs and methods are sorted by name,
- // so we can find the intersect in a single pass
- for _, m := range methods {
- if m.name == sigs[0].name {
- out = append(out, m.isym)
- sigs = sigs[1:]
- if len(sigs) == 0 {
- break
- }
- }
- }
-
- if len(sigs) != 0 {
- base.Fatalf("incomplete itab")
- }
-
- return out
-}
-
-// ITabSym uses the information gathered in
-// CompileITabs to de-virtualize interface methods.
-// Since this is called by the SSA backend, it shouldn't
-// generate additional Nodes, Syms, etc.
-func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
- var syms []*obj.LSym
- if it == nil {
- return nil
- }
-
- for i := range itabs {
- e := &itabs[i]
- if e.lsym == it {
- syms = e.entries
- break
- }
- }
- if syms == nil {
- return nil
- }
-
- // keep this arithmetic in sync with *itab layout
- methodnum := int((offset - 2*int64(types.PtrSize) - 8) / int64(types.PtrSize))
- if methodnum >= len(syms) {
- return nil
- }
- return syms[methodnum]
-}
-
// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
func NeedRuntimeType(t *types.Type) {
if t.HasTParam() {
- // Generic types don't have a runtime type descriptor (but will
- // have a dictionary)
+ // Generic types don't really exist at run-time and have no runtime
+ // type descriptor. But we do write out shape types.
return
}
if _, ok := signatset[t]; !ok {
@@ -1349,29 +1272,66 @@ func WriteRuntimeTypes() {
}
}
-func WriteTabs() {
- // process itabs
- for _, i := range itabs {
- // dump empty itab symbol into i.sym
- // type itab struct {
- // inter *interfacetype
- // _type *_type
- // hash uint32
- // _ [4]byte
- // fun [1]uintptr // variable sized
- // }
- o := objw.SymPtr(i.lsym, 0, writeType(i.itype), 0)
- o = objw.SymPtr(i.lsym, o, writeType(i.t), 0)
- o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
- o += 4 // skip unused field
- for _, fn := range genfun(i.t, i.itype) {
- o = objw.SymPtrWeak(i.lsym, o, fn, 0) // method pointer for each method
- }
- // Nothing writes static itabs, so they are read only.
- objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
- i.lsym.Set(obj.AttrContentAddressable, true)
+// writeITab writes the itab for concrete type typ implementing
+// interface iface.
+func writeITab(lsym *obj.LSym, typ, iface *types.Type) {
+ // TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe
+ // others) to stop clobbering these.
+ oldpos, oldfn := base.Pos, ir.CurFunc
+ defer func() { base.Pos, ir.CurFunc = oldpos, oldfn }()
+
+ if typ == nil || (typ.IsPtr() && typ.Elem() == nil) || typ.IsUntyped() || iface == nil || !iface.IsInterface() || iface.IsEmptyInterface() {
+ base.Fatalf("writeITab(%v, %v)", typ, iface)
}
+ sigs := iface.AllMethods().Slice()
+ entries := make([]*obj.LSym, 0, len(sigs))
+
+ // both sigs and methods are sorted by name,
+ // so we can find the intersection in a single pass
+ for _, m := range methods(typ) {
+ if m.name == sigs[0].Sym {
+ entries = append(entries, m.isym)
+ if m.isym == nil {
+ panic("NO ISYM")
+ }
+ sigs = sigs[1:]
+ if len(sigs) == 0 {
+ break
+ }
+ }
+ if sigs[0].Sym.Name == "==" {
+ sigs = sigs[1:]
+ if len(sigs) == 0 {
+ break
+ }
+ }
+ }
+ if len(sigs) != 0 {
+ base.Fatalf("incomplete itab")
+ }
+
+ // dump empty itab symbol into i.sym
+ // type itab struct {
+ // inter *interfacetype
+ // _type *_type
+ // hash uint32
+ // _ [4]byte
+ // fun [1]uintptr // variable sized
+ // }
+ o := objw.SymPtr(lsym, 0, writeType(iface), 0)
+ o = objw.SymPtr(lsym, o, writeType(typ), 0)
+ o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash
+ o += 4 // skip unused field
+ for _, fn := range entries {
+ o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
+ }
+ // Nothing writes static itabs, so they are read only.
+ objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+ lsym.Set(obj.AttrContentAddressable, true)
+}
+
+func WriteTabs() {
// process ptabs
if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
ot := 0
@@ -1453,7 +1413,7 @@ func WriteBasicTypes() {
type typeAndStr struct {
t *types.Type
- short string
+ short string // "short" here means NameString
regular string
}
@@ -1466,8 +1426,13 @@ func (a typesByString) Less(i, j int) bool {
}
// When the only difference between the types is whether
// they refer to byte or uint8, such as **byte vs **uint8,
- // the types' ShortStrings can be identical.
+ // the types' NameStrings can be identical.
// To preserve deterministic sort ordering, sort these by String().
+ //
+ // TODO(mdempsky): This all seems suspect. Using LinkString would
+ // avoid naming collisions, and there shouldn't be a reason to care
+ // about "byte" vs "uint8": they share the same runtime type
+ // descriptor anyway.
if a[i].regular != a[j].regular {
return a[i].regular < a[j].regular
}
@@ -1741,6 +1706,49 @@ func CollectPTabs() {
}
}
+// NeedEmit reports whether typ is a type that we need to emit code
+// for (e.g., runtime type descriptors, method wrappers).
+func NeedEmit(typ *types.Type) bool {
+ // TODO(mdempsky): Export data should keep track of which anonymous
+ // and instantiated types were emitted, so at least downstream
+ // packages can skip re-emitting them.
+ //
+ // Perhaps we can just generalize the linker-symbol indexing to
+ // track the index of arbitrary types, not just defined types, and
+ // use its presence to detect this. The same idea would work for
+ // instantiated generic functions too.
+
+ switch sym := typ.Sym(); {
+ case sym == nil:
+ // Anonymous type; possibly never seen before or ever again.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ case sym.Pkg == types.LocalPkg:
+ // Local defined type; our responsibility.
+ return true
+
+ case base.Ctxt.Pkgpath == "runtime" && (sym.Pkg == types.BuiltinPkg || sym.Pkg == ir.Pkgs.Unsafe):
+ // Package runtime is responsible for including code for builtin
+ // types (predeclared and package unsafe).
+ return true
+
+ case typ.IsFullyInstantiated():
+ // Instantiated type; possibly instantiated with unique type arguments.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ case typ.HasShape():
+ // Shape type; need to emit even though it lives in the .shape package.
+ // TODO: make sure the linker deduplicates them (see dupok in writeType above).
+ return true
+
+ default:
+ // Should have been emitted by an imported package.
+ return false
+ }
+}
+
// Generate a wrapper function to convert from
// a receiver of type T to a receiver of type U.
// That is,
@@ -1761,7 +1769,45 @@ func CollectPTabs() {
//
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
-func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
+//
+// Also wraps methods on instantiated generic types for use in itab entries.
+// For an instantiated generic type G[int], we generate wrappers like:
+// G[int] pointer shaped:
+// func (x G[int]) f(arg) {
+// .inst.G[int].f(dictionary, x, arg)
+// }
+// G[int] not pointer shaped:
+// func (x *G[int]) f(arg) {
+// .inst.G[int].f(dictionary, *x, arg)
+// }
+// These wrappers are always fully stenciled.
+func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
+ orig := rcvr
+ if forItab && !types.IsDirectIface(rcvr) {
+ rcvr = rcvr.PtrTo()
+ }
+
+ generic := false
+ // We don't need a dictionary if we are reaching a method (possibly via an
+ // embedded field) which is an interface method.
+ if !types.IsInterfaceMethod(method.Type) {
+ rcvr1 := rcvr
+ if rcvr1.IsPtr() {
+ rcvr1 = rcvr.Elem()
+ }
+ if len(rcvr1.RParams()) > 0 {
+ // If rcvr has rparams, remember method as generic, which
+ // means we need to add a dictionary to the wrapper.
+ generic = true
+ targs := rcvr1.RParams()
+ for _, t := range targs {
+ if t.HasShape() {
+ base.Fatalf("method on type instantiated with shapes targ:%+v rcvr:%+v", t, rcvr)
+ }
+ }
+ }
+ }
+
newnam := ir.MethodSym(rcvr, method.Sym)
lsym := newnam.Linksym()
if newnam.Siggen() {
@@ -1769,19 +1815,18 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
}
newnam.SetSiggen(true)
- if types.Identical(rcvr, method.Type.Recv().Type) {
+ // Except in quirks mode, unified IR creates its own wrappers.
+ if base.Debug.Unified != 0 && base.Debug.UnifiedQuirks == 0 {
return lsym
}
- // Only generate (*T).M wrappers for T.M in T's own package.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
+ // For generic methods, we need to generate the wrapper even if the receiver
+ // types are identical, because we want to add the dictionary.
+ if !generic && types.Identical(rcvr, method.Type.Recv().Type) {
return lsym
}
- // Only generate I.M wrappers for I in I's own package
- // but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
+ if !NeedEmit(rcvr) || rcvr.IsPtr() && !NeedEmit(rcvr.Elem()) {
return lsym
}
@@ -1802,9 +1847,10 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
+ indirect := rcvr.IsPtr() && rcvr.Elem() == methodrcvr
// generate nil pointer check for better error
- if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
+ if indirect {
// generating wrapper from *T to T.
n := ir.NewIfStmt(base.Pos, nil, nil, nil)
n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
@@ -1814,7 +1860,6 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
}
dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
-
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
// bad scenario is when a local call is made to the wrapper: the wrapper will
@@ -1826,7 +1871,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
// Disable tailcall for RegabiArgs for now. The IR does not connect the
// arguments with the OTAILCALL node, and the arguments are not marshaled
// correctly.
- if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs {
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs && !generic {
// generate tail call: adjust pointer receiver and jump to embedded method.
left := dot.X // skip final .M
if !left.Type().IsPtr() {
@@ -1837,8 +1882,72 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
- call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
- call.Args = ir.ParamNames(tfn.Type())
+ var call *ir.CallExpr
+
+ if generic && dot.X != nthis {
+ // TODO: for now, we don't try to generate dictionary wrappers for
+ // any methods involving embedded fields, because we're not
+ // generating the needed dictionaries in instantiateMethods.
+ generic = false
+ }
+
+ if generic {
+ var args []ir.Node
+ var targs []*types.Type
+ if rcvr.IsPtr() {
+ targs = rcvr.Elem().RParams()
+ } else {
+ targs = rcvr.RParams()
+ }
+ if strings.HasPrefix(ir.MethodSym(orig, method.Sym).Name, ".inst.") {
+ fmt.Printf("%s\n", ir.MethodSym(orig, method.Sym).Name)
+ panic("multiple .inst.")
+ }
+ // The wrapper for an auto-generated pointer/non-pointer
+ // receiver method should share the same dictionary as the
+ // corresponding original (user-written) method.
+ baseOrig := orig
+ if baseOrig.IsPtr() && !method.Type.Recv().Type.IsPtr() {
+ baseOrig = baseOrig.Elem()
+ } else if !baseOrig.IsPtr() && method.Type.Recv().Type.IsPtr() {
+ baseOrig = types.NewPtr(baseOrig)
+ }
+ args = append(args, getDictionary(ir.MethodSym(baseOrig, method.Sym), targs))
+ if indirect {
+ args = append(args, ir.NewStarExpr(base.Pos, dot.X))
+ } else if methodrcvr.IsPtr() && methodrcvr.Elem() == dot.X.Type() {
+ // Case where method call is via a non-pointer
+ // embedded field with a pointer method.
+ args = append(args, typecheck.NodAddrAt(base.Pos, dot.X))
+ } else {
+ args = append(args, dot.X)
+ }
+ args = append(args, ir.ParamNames(tfn.Type())...)
+
+ // Target method uses shaped names.
+ targs2 := make([]*types.Type, len(targs))
+ for i, t := range targs {
+ targs2[i] = typecheck.Shapify(t)
+ }
+ targs = targs2
+
+ sym := typecheck.MakeInstName(ir.MethodSym(methodrcvr, method.Sym), targs, true)
+ if sym.Def == nil {
+ // Currently we make sure that we have all the instantiations
+ // we need by generating them all in ../noder/stencil.go:instantiateMethods
+ // TODO: maybe there's a better, more incremental way to generate
+ // only the instantiations we need?
+ base.Fatalf("instantiation %s not found", sym.Name)
+ }
+ target := ir.AsNode(sym.Def)
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, target, args)
+ // Fill-in the generic method node that was not filled in
+ // in instantiateMethod.
+ method.Nname = fn.Nname
+ } else {
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ }
call.IsDDD = tfn.Type().IsVariadic()
if method.Type.NumResults() > 0 {
ret := ir.NewReturnStmt(base.Pos, nil)
@@ -1858,13 +1967,10 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
ir.CurFunc = fn
typecheck.Stmts(fn.Body)
- // Inline calls within (*T).M wrappers. This is safe because we only
- // generate those wrappers within the same compilation unit as (T).M.
- // TODO(mdempsky): Investigate why we can't enable this more generally.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
+ if AfterGlobalEscapeAnalysis {
inline.InlineCalls(fn)
+ escape.Batch([]*ir.Func{fn}, false)
}
- escape.Batch([]*ir.Func{fn}, false)
ir.CurFunc = nil
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
@@ -1872,11 +1978,21 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
return lsym
}
+// AfterGlobalEscapeAnalysis tracks whether package gc has already
+// performed the main, global escape analysis pass. If so,
+// methodWrapper takes responsibility for escape analyzing any
+// generated wrappers.
+var AfterGlobalEscapeAnalysis bool
+
var ZeroSize int64
// MarkTypeUsedInInterface marks that type t is converted to an interface.
// This information is used in the linker in dead method elimination.
func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) {
+ if t.HasShape() {
+ // Shape types shouldn't be put in interfaces, so we shouldn't ever get here.
+ base.Fatalf("shape types have no methods %+v", t)
+ }
tsym := TypeLinksym(t)
// Emit a marker relocation. The linker will know the type is converted
// to an interface if "from" is reachable.
@@ -1897,9 +2013,55 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
tsym := TypeLinksym(ityp)
r := obj.Addrel(ir.CurFunc.LSym)
r.Sym = tsym
- // dot.Xoffset is the method index * PtrSize (the offset of code pointer
+ // dot.Offset() is the method index * PtrSize (the offset of code pointer
// in itab).
midx := dot.Offset() / int64(types.PtrSize)
r.Add = InterfaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
+
+// MarkUsedIfaceMethodIndex marks that that method number ix (in the AllMethods list)
+// of interface type ityp is used, and should be attached to lsym.
+func MarkUsedIfaceMethodIndex(lsym *obj.LSym, ityp *types.Type, ix int) {
+ tsym := TypeLinksym(ityp)
+ r := obj.Addrel(lsym)
+ r.Sym = tsym
+ r.Add = InterfaceMethodOffset(ityp, int64(ix))
+ r.Type = objabi.R_USEIFACEMETHOD
+}
+
+// getDictionary returns the dictionary for the given named generic function
+// or method, with the given type arguments.
+func getDictionary(gf *types.Sym, targs []*types.Type) ir.Node {
+ if len(targs) == 0 {
+ base.Fatalf("%s should have type arguments", gf.Name)
+ }
+ for _, t := range targs {
+ if t.HasShape() {
+ base.Fatalf("dictionary for %s should only use concrete types: %+v", gf.Name, t)
+ }
+ }
+
+ sym := typecheck.MakeDictName(gf, targs, true)
+
+ // Initialize the dictionary, if we haven't yet already.
+ if lsym := sym.Linksym(); len(lsym.P) == 0 {
+ base.Fatalf("Dictionary should have already been generated: %s.%s", sym.Pkg.Path, sym.Name)
+ }
+
+ // Make a node referencing the dictionary symbol.
+ n := typecheck.NewName(sym)
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+
+ // Return the address of the dictionary.
+ np := typecheck.NodAddr(n)
+ // Note: treat dictionary pointers as uintptrs, so they aren't pointers
+ // with respect to GC. That saves on stack scanning work, write barriers, etc.
+ // We can get away with it because dictionaries are global variables.
+ np.SetType(types.Types[types.TUINTPTR])
+ np.SetTypecheck(1)
+ return np
+}
diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go
index 338248a7cf..846ed8fb38 100644
--- a/src/cmd/compile/internal/riscv64/galign.go
+++ b/src/cmd/compile/internal/riscv64/galign.go
@@ -16,7 +16,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.MAXWIDTH = 1 << 50
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.ZeroRange = zeroRange
arch.SSAMarkMoves = ssaMarkMoves
diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go
index b004a2db0a..d880834c22 100644
--- a/src/cmd/compile/internal/s390x/galign.go
+++ b/src/cmd/compile/internal/s390x/galign.go
@@ -16,7 +16,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index a8393a1999..32e3a0860e 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -149,12 +149,6 @@ type Frontend interface {
// for the parts of that compound type.
SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
- // DerefItab dereferences an itab function
- // entry, given the symbol of the itab and
- // the byte offset of the function pointer.
- // It may return nil.
- DerefItab(sym *obj.LSym, offset int64) *obj.LSym
-
// Line returns a string describing the given position.
Line(src.XPos) string
@@ -177,7 +171,7 @@ type Frontend interface {
}
// NewConfig returns a new configuration object for the given architecture.
-func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
+func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat bool) *Config {
c := &Config{arch: arch, Types: types}
c.useAvg = true
c.useHmul = true
@@ -196,7 +190,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.floatParamRegs = paramFloatRegAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
- c.hasGReg = buildcfg.Experiment.RegabiG
+ c.hasGReg = true
case "386":
c.PtrSize = 4
c.RegSize = 4
@@ -228,6 +222,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.registers = registersARM64[:]
c.gpRegMask = gpRegMaskARM64
c.fpRegMask = fpRegMaskARM64
+ c.intParamRegs = paramIntRegARM64
+ c.floatParamRegs = paramFloatRegARM64
c.FPReg = framepointerRegARM64
c.LinkReg = linkRegARM64
c.hasGReg = true
@@ -324,6 +320,10 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.optimize = optimize
c.useSSE = true
c.UseFMA = true
+ c.SoftFloat = softfloat
+ if softfloat {
+ c.floatParamRegs = nil // no FP registers in softfloat mode
+ }
c.ABI0 = abi.NewABIConfig(0, 0, ctxt.FixedFrameSize())
c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.FixedFrameSize())
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 7e973ab205..a8c6c26dad 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -215,7 +215,7 @@ func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool {
return false
}
return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
- t.Size() > x.regSize && t.IsInteger()
+ (t.Size() > x.regSize && (t.IsInteger() || (x.f.Config.SoftFloat && t.IsFloat())))
}
// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
@@ -380,6 +380,12 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
// The OpLoad was created to load the single field of the IData
// This case removes that StructSelect.
if leafType != selector.Type {
+ if x.f.Config.SoftFloat && selector.Type.IsFloat() {
+ if x.debug {
+ x.Printf("---OpLoad, break\n")
+ }
+ break // softfloat pass will take care of that
+ }
x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
}
leaf.copyOf(selector)
@@ -525,11 +531,11 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
case OpComplexReal:
ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
- locs = x.splitSlots(ls, ".real", 0, leafType)
+ locs = x.splitSlots(ls, ".real", 0, selector.Type)
case OpComplexImag:
- ls := x.rewriteSelect(leaf, selector.Args[0], offset+leafType.Width, regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part.
- locs = x.splitSlots(ls, ".imag", leafType.Width, leafType)
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+selector.Type.Width, regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part.
+ locs = x.splitSlots(ls, ".imag", selector.Type.Width, selector.Type)
case OpStringLen, OpSliceLen:
ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_slice_len)
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
index 8ed8a0c4a6..6d3c0f3ccb 100644
--- a/src/cmd/compile/internal/ssa/export_test.go
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -39,7 +39,7 @@ func testConfigArch(tb testing.TB, arch string) *Conf {
tb.Fatal("testTypes is 64-bit only")
}
c := &Conf{
- config: NewConfig(arch, testTypes, ctxt, true),
+ config: NewConfig(arch, testTypes, ctxt, true, false),
tb: tb,
}
return c
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 4cd00732fc..45c0238317 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -460,7 +460,7 @@
(IsInBounds idx len) => (SETB (CMPQ idx len))
(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
(NilCheck ...) => (LoweredNilCheck ...)
-(GetG mem) && !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
+(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
(GetCallerSP ...) => (LoweredGetCallerSP ...)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 62699f290c..530e48bcb2 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -2868,3 +2868,12 @@
&& isInlinableMemmove(dst, src, sz, config)
&& clobber(s1, s2, s3, call)
=> (Move [sz] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index 18a5666b40..5de0b5f020 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -482,9 +482,9 @@ func init() {
{name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
// function calls
- {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
- {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
- {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
@@ -759,15 +759,17 @@ func init() {
}
archs = append(archs, arch{
- name: "ARM64",
- pkg: "cmd/internal/obj/arm64",
- genfile: "../../arm64/ssa.go",
- ops: ops,
- blocks: blocks,
- regnames: regNamesARM64,
- gpregmask: gp,
- fpregmask: fp,
- framepointerreg: -1, // not used
- linkreg: int8(num["R30"]),
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ ParamIntRegNames: "R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15",
+ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R30"]),
})
}
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 1c37fbe0db..df15c2edda 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -20664,7 +20664,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLstatic",
auxType: auxCallOff,
- argLen: 1,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@@ -20674,7 +20674,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLclosure",
auxType: auxCallOff,
- argLen: 3,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@@ -20688,7 +20688,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLinter",
auxType: auxCallOff,
- argLen: 2,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@@ -36400,8 +36400,8 @@ var registersARM64 = [...]Register{
{62, arm64.REG_F31, -1, "F31"},
{63, 0, -1, "SB"},
}
-var paramIntRegARM64 = []int8(nil)
-var paramFloatRegARM64 = []int8(nil)
+var paramIntRegARM64 = []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+var paramFloatRegARM64 = []int8{31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46}
var gpRegMaskARM64 = regMask(670826495)
var fpRegMaskARM64 = regMask(9223372034707292160)
var specialRegMaskARM64 = regMask(0)
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 375c4d5a56..115d563933 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -745,27 +745,21 @@ func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)
}
-// de-virtualize an InterCall
-// 'sym' is the symbol for the itab
-func devirt(v *Value, aux Aux, sym Sym, offset int64) *AuxCall {
- f := v.Block.Func
- n, ok := sym.(*obj.LSym)
- if !ok {
+// loadLSymOffset simulates reading a word at an offset into a
+// read-only symbol's runtime memory. If it would read a pointer to
+// another symbol, that symbol is returned. Otherwise, it returns nil.
+func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {
+ if lsym.Type != objabi.SRODATA {
return nil
}
- lsym := f.fe.DerefItab(n, offset)
- if f.pass.debug > 0 {
- if lsym != nil {
- f.Warnl(v.Pos, "de-virtualizing call")
- } else {
- f.Warnl(v.Pos, "couldn't de-virtualize call")
+
+ for _, r := range lsym.R {
+ if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 {
+ return r.Sym
}
}
- if lsym == nil {
- return nil
- }
- va := aux.(*AuxCall)
- return StaticAuxCall(lsym, va.abiInfo)
+
+ return nil
}
// de-virtualize an InterLECall
@@ -776,18 +770,14 @@ func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
return nil
}
- f := v.Block.Func
- lsym := f.fe.DerefItab(n, offset)
- if f.pass.debug > 0 {
+ lsym := loadLSymOffset(n, offset)
+ if f := v.Block.Func; f.pass.debug > 0 {
if lsym != nil {
f.Warnl(v.Pos, "de-virtualizing call")
} else {
f.Warnl(v.Pos, "couldn't de-virtualize call")
}
}
- if lsym == nil {
- return nil
- }
return lsym
}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 5045ba7351..89d32c0657 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -3,7 +3,6 @@
package ssa
-import "internal/buildcfg"
import "math"
import "cmd/internal/obj"
import "cmd/compile/internal/types"
@@ -29339,11 +29338,11 @@ func rewriteValueAMD64_OpFloor(v *Value) bool {
func rewriteValueAMD64_OpGetG(v *Value) bool {
v_0 := v.Args[0]
// match: (GetG mem)
- // cond: !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)
+ // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal
// result: (LoweredGetG mem)
for {
mem := v_0
- if !(!(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)) {
+ if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
break
}
v.reset(OpAMD64LoweredGetG)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 3cdc4d36cb..f7840c5503 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -25997,7 +25997,7 @@ func rewriteValueARM64_OpSelectN(v *Value) bool {
break
}
call := v_0
- if call.Op != OpARM64CALLstatic {
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 1 {
break
}
sym := auxToCall(call.Aux)
@@ -26031,6 +26031,34 @@ func rewriteValueARM64_OpSelectN(v *Value) bool {
v.AddArg3(dst, src, mem)
return true
}
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpARM64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
return false
}
func rewriteValueARM64_OpSlicemask(v *Value) bool {
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index 4e3e5e75e3..c5130b2ee5 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -220,7 +220,7 @@ func schedule(f *Func) {
// unless they are phi values (which must be first).
// OpArg also goes first -- if it is stack it register allocates
// to a LoadReg, if it is register it is from the beginning anyway.
- if c.Op == OpPhi || c.Op == OpArg {
+ if score[c.ID] == ScorePhi || score[c.ID] == ScoreArg {
continue
}
score[c.ID] = ScoreControl
diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go
index a8a8f83629..351f824a9f 100644
--- a/src/cmd/compile/internal/ssa/softfloat.go
+++ b/src/cmd/compile/internal/ssa/softfloat.go
@@ -63,6 +63,7 @@ func softfloat(f *Func) {
v.Aux = f.Config.Types.UInt32
case 8:
v.Aux = f.Config.Types.UInt64
+ newInt64 = true
default:
v.Fatalf("bad float type with size %d", size)
}
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
index e460adaf95..6d8c53e722 100644
--- a/src/cmd/compile/internal/ssagen/abi.go
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -152,6 +152,9 @@ func (s *SymABIs) GenABIWrappers() {
// Apply definitions.
defABI, hasDefABI := s.defs[symName]
if hasDefABI {
+ if len(fn.Body) != 0 {
+ base.ErrorfAt(fn.Pos(), "%v defined in both Go and assembly", fn)
+ }
fn.ABI = defABI
}
diff --git a/src/cmd/compile/internal/ssagen/arch.go b/src/cmd/compile/internal/ssagen/arch.go
index 7215f42c05..483e45cad4 100644
--- a/src/cmd/compile/internal/ssagen/arch.go
+++ b/src/cmd/compile/internal/ssagen/arch.go
@@ -29,8 +29,7 @@ type ArchInfo struct {
// at function entry, and it is ok to clobber registers.
ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
- Ginsnop func(*objw.Progs) *obj.Prog
- Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
+ Ginsnop func(*objw.Progs) *obj.Prog
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*State, *ssa.Block)
@@ -42,10 +41,10 @@ type ArchInfo struct {
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *State, b, next *ssa.Block)
- // LoadRegResults emits instructions that loads register-assigned results
- // into registers. They are already in memory (PPARAMOUT nodes).
- // Used in open-coded defer return path.
- LoadRegResults func(s *State, f *ssa.Func)
+ // LoadRegResult emits instructions that loads register-assigned result
+ // at n+off (n is PPARAMOUT) to register reg. The result is already in
+ // memory. Used in open-coded defer return path.
+ LoadRegResult func(s *State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
// SpillArgReg emits instructions that spill reg to n+off.
SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
index 62567535d7..93157bfa11 100644
--- a/src/cmd/compile/internal/ssagen/pgen.go
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -114,7 +114,10 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
}
}
- sort.Sort(byStackVar(fn.Dcl))
+ // Use sort.Stable instead of sort.Sort so stack layout (and thus
+ // compiler output) is less sensitive to frontend changes that
+ // introduce or remove unused variables.
+ sort.Stable(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index dfa76006de..b0f2585e3a 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -87,8 +87,7 @@ func InitConfig() {
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
- ssaConfig.SoftFloat = Arch.SoftFloat
+ ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
@@ -279,18 +278,6 @@ func regAbiForFuncType(ft *types.Func) bool {
return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
}
-// getParam returns the Field of ith param of node n (which is a
-// function/method/interface call), where the receiver of a method call is
-// considered as the 0th parameter. This does not include the receiver of an
-// interface call.
-func getParam(n *ir.CallExpr, i int) *types.Field {
- t := n.X.Type()
- if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- }
- return t.Params().Field(i)
-}
-
// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
func dvarint(x *obj.LSym, off int, v int64) int {
if v < 0 || v > 1e9 {
@@ -324,66 +311,21 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
-// - Max total argument size among all the defers
// - Offset of the deferBits variable
// - Number of defers in the function
// - Information about each defer call, in reverse order of appearance in the function:
-// - Total argument size of the call
// - Offset of the closure value to call
-// - Number of arguments (including interface receiver or method receiver as first arg)
-// - Information about each argument
-// - Offset of the stored defer argument in this function's frame
-// - Size of the argument
-// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
-
- // Compute maxargsize (max size of arguments for all defers)
- // first, so we can output it first to the funcdata
- var maxargsize int64
- for i := len(s.openDefers) - 1; i >= 0; i-- {
- r := s.openDefers[i]
- argsize := r.n.X.Type().ArgWidth() // TODO register args: but maybe use of abi0 will make this easy
- if argsize > maxargsize {
- maxargsize = argsize
- }
- }
- off = dvarint(x, off, maxargsize)
off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.X.Type().ArgWidth())
off = dvarint(x, off, -r.closureNode.FrameOffset())
- numArgs := len(r.argNodes)
- if r.rcvrNode != nil {
- // If there's an interface receiver, treat/place it as the first
- // arg. (If there is a method receiver, it's already included as
- // first arg in r.argNodes.)
- numArgs++
- }
- off = dvarint(x, off, int64(numArgs))
- argAdjust := 0 // presence of receiver offsets the parameter count.
- if r.rcvrNode != nil {
- off = dvarint(x, off, -okOffset(r.rcvrNode.FrameOffset()))
- off = dvarint(x, off, s.config.PtrSize)
- off = dvarint(x, off, 0) // This is okay because defer records use ABI0 (for now)
- argAdjust++
- }
-
- // TODO(register args) assume abi0 for this?
- ab := s.f.ABI0
- pri := ab.ABIAnalyzeFuncType(r.n.X.Type().FuncType())
- for j, arg := range r.argNodes {
- f := getParam(r.n, j)
- off = dvarint(x, off, -okOffset(arg.FrameOffset()))
- off = dvarint(x, off, f.Type.Size())
- off = dvarint(x, off, okOffset(pri.InParam(j+argAdjust).FrameOffset(pri)))
- }
}
}
@@ -580,7 +522,7 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
}
// Populate closure variables.
- if !fn.ClosureCalled() {
+ if fn.Needctxt() {
clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
for _, n := range fn.ClosureVars {
@@ -650,7 +592,6 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// it mimics the behavior of the former ABI (everything stored) and because it's not 100%
// clear if naming conventions are respected in autogenerated code.
// TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
- // TODO non-amd64 architectures have link registers etc that may require adjustment here.
for _, p := range params.InParams() {
typs, offs := p.RegisterTypesAndOffsets()
for i, t := range typs {
@@ -865,16 +806,6 @@ type openDeferInfo struct {
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
closureNode *ir.Name
- // If defer call is interface call, the address of the argtmp where the
- // receiver is stored
- rcvr *ssa.Value
- // The node representing the argtmp where the receiver is stored
- rcvrNode *ir.Name
- // The addresses of the argtmps where the evaluated arguments of the defer
- // function call are stored.
- argVals []*ssa.Value
- // The nodes representing the argtmps where the args of the defer are stored
- argNodes []*ir.Name
}
type state struct {
@@ -1491,7 +1422,12 @@ func (s *state) stmt(n ir.Node) {
case ir.OAS2DOTTYPE:
n := n.(*ir.AssignListStmt)
- res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
+ var res, resok *ssa.Value
+ if n.Rhs[0].Op() == ir.ODOTTYPE2 {
+ res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
+ } else {
+ res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
+ }
deref := false
if !TypeOK(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
@@ -2748,6 +2684,11 @@ func (s *state) expr(n ir.Node) *ssa.Value {
res, _ := s.dottype(n, false)
return res
+ case ir.ODYNAMICDOTTYPE:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ res, _ := s.dynamicDottype(n, false)
+ return res
+
// binary ops
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
n := n.(*ir.BinaryExpr)
@@ -3183,7 +3124,7 @@ func (s *state) expr(n ir.Node) *ssa.Value {
}
fallthrough
- case ir.OCALLINTER, ir.OCALLMETH:
+ case ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callResult(n, callNormal)
@@ -3191,6 +3132,14 @@ func (s *state) expr(n ir.Node) *ssa.Value {
n := n.(*ir.CallExpr)
return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
+ case ir.OGETCALLERPC:
+ n := n.(*ir.CallExpr)
+ return s.newValue0(ssa.OpGetCallerPC, n.Type())
+
+ case ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ return s.newValue0(ssa.OpGetCallerSP, n.Type())
+
case ir.OAPPEND:
return s.append(n.(*ir.CallExpr), false)
@@ -3703,6 +3652,16 @@ func softfloatInit() {
// TODO: do not emit sfcall if operation can be optimized to constant in later
// opt phase
func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
+ f2i := func(t *types.Type) *types.Type {
+ switch t.Kind() {
+ case types.TFLOAT32:
+ return types.Types[types.TUINT32]
+ case types.TFLOAT64:
+ return types.Types[types.TUINT64]
+ }
+ return t
+ }
+
if callDef, ok := softFloatOps[op]; ok {
switch op {
case ssa.OpLess32F,
@@ -3715,7 +3674,19 @@ func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
}
- result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
+ // runtime functions take uints for floats and returns uints.
+ // Convert to uints so we use the right calling convention.
+ for i, a := range args {
+ if a.Type.IsFloat() {
+ args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
+ }
+ }
+
+ rt := types.Types[callDef.rtype]
+ result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
+ if rt.IsFloat() {
+ result = s.newValue1(ssa.OpCopy, rt, result)
+ }
if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
result = s.newValue1(ssa.OpNot, result.Type, result)
}
@@ -4687,17 +4658,14 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
return args
}
-// openDeferRecord adds code to evaluate and store the args for an open-code defer
+// openDeferRecord adds code to evaluate and store the function for an open-code defer
// call, and records info about the defer, so we can generate proper code on the
// exit paths. n is the sub-node of the defer node that is the actual function
-// call. We will also record funcdata information on where the args are stored
+// call. We will also record funcdata information on where the function is stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
- var args []*ssa.Value
- var argNodes []*ir.Name
-
- if buildcfg.Experiment.RegabiDefer && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
s.Fatalf("defer call with arguments or results: %v", n)
}
@@ -4705,48 +4673,20 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
n: n,
}
fn := n.X
- if n.Op() == ir.OCALLFUNC {
- // We must always store the function value in a stack slot for the
- // runtime panic code to use. But in the defer exit code, we will
- // call the function directly if it is a static function.
- closureVal := s.expr(fn)
- closure := s.openDeferSave(nil, fn.Type(), closureVal)
- opendefer.closureNode = closure.Aux.(*ir.Name)
- if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
- opendefer.closure = closure
- }
- } else if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- } else {
- if fn.Op() != ir.ODOTINTER {
- base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
- }
- fn := fn.(*ir.SelectorExpr)
- closure, rcvr := s.getClosureAndRcvr(fn)
- opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
- // Important to get the receiver type correct, so it is recognized
- // as a pointer for GC purposes.
- opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
- }
- for _, argn := range n.Args {
- var v *ssa.Value
- if TypeOK(argn.Type()) {
- v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
- } else {
- v = s.openDeferSave(argn, argn.Type(), nil)
- }
- args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*ir.Name))
+ // We must always store the function value in a stack slot for the
+ // runtime panic code to use. But in the defer exit code, we will
+ // call the function directly if it is a static function.
+ closureVal := s.expr(fn)
+ closure := s.openDeferSave(fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(*ir.Name)
+ if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
+ opendefer.closure = closure
}
- opendefer.argVals = args
- opendefer.argNodes = argNodes
index := len(s.openDefers)
s.openDefers = append(s.openDefers, opendefer)
// Update deferBits only after evaluation and storage to stack of
- // args/receiver/interface is successful.
+ // the function is successful.
bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
s.vars[deferBitsVar] = newDeferBits
@@ -4755,57 +4695,47 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
// openDeferSave generates SSA nodes to store a value (with type t) for an
// open-coded defer at an explicit autotmp location on the stack, so it can be
-// reloaded and used for the appropriate call on exit. If type t is SSAable, then
-// val must be non-nil (and n should be nil) and val is the value to be stored. If
-// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
-// evaluated (via s.addr() below) to get the value that is to be stored. The
-// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
- canSSA := TypeOK(t)
- var pos src.XPos
- if canSSA {
- pos = val.Pos
- } else {
- pos = n.Pos()
+// reloaded and used for the appropriate call on exit. Type t must be a function type
+// (therefore SSAable). val is the value to be stored. The function returns an SSA
+// value representing a pointer to the autotmp location.
+func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
+ if !TypeOK(t) {
+ s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
}
- argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.SetOpenDeferSlot(true)
- var addrArgTemp *ssa.Value
- // Use OpVarLive to make sure stack slots for the args, etc. are not
- // removed by dead-store elimination
+ if !t.HasPointers() {
+ s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
+ }
+ pos := val.Pos
+ temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
+ temp.SetOpenDeferSlot(true)
+ var addrTemp *ssa.Value
+ // Use OpVarLive to make sure stack slot for the closure is not removed by
+ // dead-store elimination
if s.curBlock.ID != s.f.Entry.ID {
- // Force the argtmp storing this defer function/receiver/arg to be
- // declared in the entry block, so that it will be live for the
- // defer exit code (which will actually access it only if the
- // associated defer call has been activated).
- s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
- s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
- addrArgTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
+ // Force the tmp storing this defer function to be declared in the entry
+ // block, so that it will be live for the defer exit code (which will
+ // actually access it only if the associated defer call has been activated).
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
- addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
- }
- if t.HasPointers() {
- // Since we may use this argTemp during exit depending on the
- // deferBits, we must define it unconditionally on entry.
- // Therefore, we must make sure it is zeroed out in the entry
- // block if it contains pointers, else GC may wrongly follow an
- // uninitialized pointer value.
- argTemp.SetNeedzero(true)
- }
- if !canSSA {
- a := s.addr(n)
- s.move(t, addrArgTemp, a)
- return addrArgTemp
- }
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
+ addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
+ }
+ // Since we may use this temp during exit depending on the
+ // deferBits, we must define it unconditionally on entry.
+ // Therefore, we must make sure it is zeroed out in the entry
+ // block if it contains pointers, else GC may wrongly follow an
+ // uninitialized pointer value.
+ temp.SetNeedzero(true)
// We are storing to the stack, hence we can avoid the full checks in
// storeType() (no write barrier) and do a simple store().
- s.store(t, addrArgTemp, val)
- return addrArgTemp
+ s.store(t, addrTemp, val)
+ return addrTemp
}
// openDeferExit generates SSA for processing all the open coded defers at exit.
@@ -4849,45 +4779,26 @@ func (s *state) openDeferExit() {
s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
- // closure/receiver/args that were stored in argtmps at the point
- // of the defer statement.
+ // closure that were stored in argtmps at the point of the defer
+ // statement.
fn := r.n.X
stksize := fn.Type().ArgWidth()
- var ACArgs []*types.Type
- var ACResults []*types.Type
var callArgs []*ssa.Value
- if r.rcvr != nil {
- // rcvr in case of OCALLINTER
- v := s.load(r.rcvr.Type.Elem(), r.rcvr)
- ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
- callArgs = append(callArgs, v)
- }
- for j, argAddrVal := range r.argVals {
- f := getParam(r.n, j)
- ACArgs = append(ACArgs, f.Type)
- var a *ssa.Value
- if !TypeOK(f.Type) {
- a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
- } else {
- a = s.load(f.Type, argAddrVal)
- }
- callArgs = append(callArgs, a)
- }
var call *ssa.Value
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
- aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
- aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
call.AddArgs(callArgs...)
call.AuxInt = stksize
- s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
@@ -4895,16 +4806,6 @@ func (s *state) openDeferExit() {
if r.closureNode != nil {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
- if r.rcvrNode != nil {
- if r.rcvrNode.Type().HasPointers() {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
- }
- }
- for _, argNode := range r.argNodes {
- if argNode.Type().HasPointers() {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
- }
- }
s.endBlock()
s.startBlock(bEnd)
@@ -4952,7 +4853,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
}
- if buildcfg.Experiment.RegabiDefer && k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
@@ -4986,8 +4887,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
- case ir.OCALLMETH:
- base.Fatalf("OCALLMETH missed by walkCall")
case ir.OCALLINTER:
if fn.Op() != ir.ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
@@ -5023,55 +4922,31 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
var call *ssa.Value
if k == callDeferStack {
// Make a defer struct d on the stack.
- t := deferstruct(stksize)
+ if stksize != 0 {
+ s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
+ }
+
+ t := deferstruct()
d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
- // Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
- // 0: siz
- s.store(types.Types[types.TUINT32],
- s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
- s.constInt32(types.Types[types.TUINT32], int32(stksize)))
- // 1: started, set in deferprocStack
- // 2: heap, set in deferprocStack
- // 3: openDefer
- // 4: sp, set in deferprocStack
- // 5: pc, set in deferprocStack
- // 6: fn
+ // Must match deferstruct() below and src/runtime/runtime2.go:_defer.
+ // 0: started, set in deferprocStack
+ // 1: heap, set in deferprocStack
+ // 2: openDefer
+ // 3: sp, set in deferprocStack
+ // 4: pc, set in deferprocStack
+ // 5: fn
s.store(closure.Type,
- s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
+ s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
closure)
- // 7: panic, set in deferprocStack
- // 8: link, set in deferprocStack
- // 9: framepc
- // 10: varp
- // 11: fd
-
- // Then, store all the arguments of the defer call.
- ft := fn.Type()
- off := t.FieldOff(12) // TODO register args: be sure this isn't a hardcoded param stack offset.
- args := n.Args
- i0 := 0
-
- // Set receiver (for interface calls). Always a pointer.
- if rcvr != nil {
- p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
- s.store(types.Types[types.TUINTPTR], p, rcvr)
- i0 = 1
- }
- // Set receiver (for method calls).
- if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- }
- // Set other args.
- // This code is only used when RegabiDefer is not enabled, and arguments are always
- // passed on stack.
- for i, f := range ft.Params().Fields().Slice() {
- s.storeArgWithBase(args[0], f.Type, addr, off+params.InParam(i+i0).FrameOffset(params))
- args = args[1:]
- }
+ // 6: panic, set in deferprocStack
+ // 7: link, set in deferprocStack
+ // 8: fd
+ // 9: varp
+ // 10: framepc
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
@@ -5079,26 +4954,18 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
- if stksize < int64(types.PtrSize) {
- // We need room for both the call to deferprocStack and the call to
- // the deferred function.
- stksize = int64(types.PtrSize)
- }
- call.AuxInt = stksize
+ call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
- // Write argsize and closure (args to newproc/deferproc).
- argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
- ACArgs = append(ACArgs, types.Types[types.TUINT32]) // not argExtra
- callArgs = append(callArgs, argsize)
- ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
+ // Write closure (arg to newproc/deferproc).
+ ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
callArgs = append(callArgs, closure)
- stksize += 2 * int64(types.PtrSize)
- argStart += 2 * int64(types.PtrSize)
+ stksize += int64(types.PtrSize)
+ argStart += int64(types.PtrSize)
}
// Set receiver (for interface calls).
@@ -5109,9 +4976,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Write args.
t := n.X.Type()
args := n.Args
- if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- }
for _, p := range params.InParams() { // includes receiver for interface calls
ACArgs = append(ACArgs, p.Type)
@@ -5314,9 +5178,13 @@ func (s *state) addr(n ir.Node) *ssa.Value {
case ir.OCALLFUNC, ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callAddr(n, callNormal)
- case ir.ODOTTYPE:
- n := n.(*ir.TypeAssertExpr)
- v, _ := s.dottype(n, false)
+ case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
+ var v *ssa.Value
+ if n.Op() == ir.ODOTTYPE {
+ v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
+ } else {
+ v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
+ }
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
@@ -6210,14 +6078,38 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *
func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.X) // input interface
target := s.reflectType(n.Type()) // target type
- byteptr := s.f.Config.Types.BytePtr
+ var targetItab *ssa.Value
+ if n.Itab != nil {
+ targetItab = s.expr(n.Itab)
+ }
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok)
+}
+
+func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.X)
+ target := s.expr(n.T)
+ var itab *ssa.Value
+ if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
+ byteptr := s.f.Config.Types.BytePtr
+ itab = target
+ target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)) // itab.typ
+ }
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, itab, commaok)
+}
- if n.Type().IsInterface() {
- if n.Type().IsEmptyInterface() {
+// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
+// and src is the type we're asserting from.
+// target is the *runtime._type of dst.
+// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
+// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
+func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
+ byteptr := s.f.Config.Types.BytePtr
+ if dst.IsInterface() {
+ if dst.IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos(), "type assertion inlined")
+ base.WarnfAt(pos, "type assertion inlined")
}
// Get itab/type field from input.
@@ -6225,7 +6117,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
- if n.X.Type().IsEmptyInterface() && commaok {
+ if src.IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
@@ -6247,7 +6139,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
- if n.X.Type().IsEmptyInterface() {
+ if src.IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
@@ -6255,7 +6147,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
typ := s.load(byteptr, off)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
- res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
+ res = s.newValue2(ssa.OpIMake, dst, typ, idata)
return
}
@@ -6277,62 +6169,62 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, byteptr, iface)
- res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
+ res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
resok = cond
delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos(), "type assertion not inlined")
+ base.WarnfAt(pos, "type assertion not inlined")
}
if !commaok {
fn := ir.Syms.AssertI2I
- if n.X.Type().IsEmptyInterface() {
+ if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I
}
data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
tab := s.newValue1(ssa.OpITab, byteptr, iface)
tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
- return s.newValue2(ssa.OpIMake, n.Type(), tab, data), nil
+ return s.newValue2(ssa.OpIMake, dst, tab, data), nil
}
fn := ir.Syms.AssertI2I2
- if n.X.Type().IsEmptyInterface() {
+ if src.IsEmptyInterface() {
fn = ir.Syms.AssertE2I2
}
- res = s.rtcall(fn, true, []*types.Type{n.Type()}, target, iface)[0]
- resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(n.Type()))
+ res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
+ resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
return
}
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos(), "type assertion inlined")
+ base.WarnfAt(pos, "type assertion inlined")
}
// Converting to a concrete type.
- direct := types.IsDirectIface(n.Type())
+ direct := types.IsDirectIface(dst)
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos(), "type assertion inlined")
+ base.WarnfAt(pos, "type assertion inlined")
}
- var targetITab *ssa.Value
- if n.X.Type().IsEmptyInterface() {
+ var wantedFirstWord *ssa.Value
+ if src.IsEmptyInterface() {
// Looking for pointer to target type.
- targetITab = target
+ wantedFirstWord = target
} else {
// Looking for pointer to itab for target type and source interface.
- targetITab = s.expr(n.Itab)
+ wantedFirstWord = targetItab
}
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
- if commaok && !TypeOK(n.Type()) {
+ if commaok && !TypeOK(dst) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
- tmp, addr = s.temp(n.Pos(), n.Type())
+ tmp, addr = s.temp(pos, dst)
}
- cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, targetITab)
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
@@ -6346,8 +6238,8 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
- taddr := s.reflectType(n.X.Type())
- if n.X.Type().IsEmptyInterface() {
+ taddr := s.reflectType(src)
+ if src.IsEmptyInterface() {
s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
@@ -6356,10 +6248,10 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// on success, return data from interface
s.startBlock(bOk)
if direct {
- return s.newValue1(ssa.OpIData, n.Type(), iface), nil
+ return s.newValue1(ssa.OpIData, dst, iface), nil
}
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
- return s.load(n.Type(), p), nil
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ return s.load(dst, p), nil
}
// commaok is the more complicated case because we have
@@ -6373,14 +6265,14 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
s.startBlock(bOk)
if tmp == nil {
if direct {
- s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
+ s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
- s.vars[valVar] = s.load(n.Type(), p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ s.vars[valVar] = s.load(dst, p)
}
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
- s.move(n.Type(), addr, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ s.move(dst, addr, p)
}
s.vars[okVar] = s.constBool(true)
s.endBlock()
@@ -6389,9 +6281,9 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
- s.vars[valVar] = s.zeroVal(n.Type())
+ s.vars[valVar] = s.zeroVal(dst)
} else {
- s.zero(n.Type(), addr)
+ s.zero(dst, addr)
}
s.vars[okVar] = s.constBool(false)
s.endBlock()
@@ -6400,10 +6292,10 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
// merge point
s.startBlock(bEnd)
if tmp == nil {
- res = s.variable(valVar, n.Type())
+ res = s.variable(valVar, dst)
delete(s.vars, valVar)
} else {
- res = s.load(n.Type(), addr)
+ res = s.load(dst, addr)
s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
}
resok = s.variable(okVar, types.Types[types.TBOOL])
@@ -6921,8 +6813,12 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// recovers a panic, it will return to caller with right results.
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
- if f.OwnAux.ABIInfo().OutRegistersUsed() != 0 {
- Arch.LoadRegResults(&s, f)
+ for _, o := range f.OwnAux.ABIInfo().OutParams() {
+ n := o.Name.(*ir.Name)
+ rts, offs := o.RegisterTypesAndOffsets()
+ for i := range o.Registers {
+ Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
+ }
}
pp.Prog(obj.ARET)
@@ -7460,18 +7356,6 @@ func (s *State) PrepareCall(v *ssa.Value) {
call, ok := v.Aux.(*ssa.AuxCall)
- if ok && call.Fn == ir.Syms.Deferreturn {
- // Deferred calls will appear to be returning to
- // the CALL deferreturn(SB) that we are about to emit.
- // However, the stack trace code will show the line
- // of the instruction byte before the return PC.
- // To avoid that being an unrelated instruction,
- // insert an actual hardware NOP that will have the right line number.
- // This is different from obj.ANOP, which is a virtual no-op
- // that doesn't make it into the instruction stream.
- Arch.Ginsnopdefer(s.pp)
- }
-
if ok {
// Record call graph information for nowritebarrierrec
// analysis.
@@ -7542,10 +7426,6 @@ func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
-func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
- return reflectdata.ITabSym(it, offset)
-}
-
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
@@ -7676,9 +7556,8 @@ func max8(a, b int8) int8 {
return b
}
-// deferstruct makes a runtime._defer structure, with additional space for
-// stksize bytes of args.
-func deferstruct(stksize int64) *types.Type {
+// deferstruct makes a runtime._defer structure.
+func deferstruct() *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
@@ -7686,13 +7565,9 @@ func deferstruct(stksize int64) *types.Type {
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
- argtype := types.NewArray(types.Types[types.TUINT8], stksize)
- argtype.Width = stksize
- argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
- // cmd/compile/internal/gc/ssa.go:(*state).call.
+ // (*state).call above.
fields := []*types.Field{
- makefield("siz", types.Types[types.TUINT32]),
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
@@ -7704,10 +7579,9 @@ func deferstruct(stksize int64) *types.Type {
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
- makefield("framepc", types.Types[types.TUINTPTR]),
- makefield("varp", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
- makefield("args", argtype),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
}
// build struct holding the above fields
diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go
index 8936c4f5b4..0730d346b2 100644
--- a/src/cmd/compile/internal/staticdata/embed.go
+++ b/src/cmd/compile/internal/staticdata/embed.go
@@ -108,13 +108,6 @@ func WriteEmbed(v *ir.Name) {
// TODO(mdempsky): User errors should be reported by the frontend.
commentPos := (*v.Embed)[0].Pos
- if !types.AllowsGoVersion(types.LocalPkg, 1, 16) {
- prevPos := base.Pos
- base.Pos = commentPos
- base.ErrorfVers("go1.16", "go:embed")
- base.Pos = prevPos
- return
- }
if base.Flag.Cfg.Embed.Patterns == nil {
base.ErrorfAt(commentPos, "invalid go:embed: build system did not supply embed configuration")
return
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index 0c97b6de74..9329a46989 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -403,10 +403,10 @@ func (s *Schedule) initplan(n ir.Node) {
base.Fatalf("initplan structlit")
}
a := a.(*ir.StructKeyExpr)
- if a.Field.IsBlank() {
+ if a.Sym().IsBlank() {
continue
}
- s.addvalue(p, a.Offset, a.Value)
+ s.addvalue(p, a.Field.Offset, a.Value)
}
case ir.OMAPLIT:
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index e7b8840b33..acffd84885 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -604,7 +604,7 @@ func (p *parser) typeDecl(group *Group) Decl {
} else {
// x is the array length expression
if debug && x == nil {
- panic("internal error: nil expression")
+ panic("length expression is nil")
}
d.Type = p.arrayType(pos, x)
}
@@ -1100,7 +1100,7 @@ loop:
complit_ok = true
}
case *IndexExpr:
- if p.xnest >= 0 {
+ if p.xnest >= 0 && !isValue(t) {
// x is possibly a composite literal type
complit_ok = true
}
@@ -1127,6 +1127,21 @@ loop:
return x
}
+// isValue reports whether x syntactically must be a value (and not a type) expression.
+func isValue(x Expr) bool {
+ switch x := x.(type) {
+ case *BasicLit, *CompositeLit, *FuncLit, *SliceExpr, *AssertExpr, *TypeSwitchGuard, *CallExpr:
+ return true
+ case *Operation:
+ return x.Op != Mul || x.Y != nil // *T may be a type
+ case *ParenExpr:
+ return isValue(x.X)
+ case *IndexExpr:
+ return isValue(x.X) || isValue(x.Index)
+ }
+ return false
+}
+
// Element = Expression | LiteralValue .
func (p *parser) bare_complitexpr() Expr {
if trace {
@@ -1443,6 +1458,18 @@ func (p *parser) interfaceType() *InterfaceType {
}
return false
}
+
+ default:
+ if p.mode&AllowGenerics != 0 {
+ pos := p.pos()
+ if t := p.typeOrNil(); t != nil {
+ f := new(Field)
+ f.pos = pos
+ f.Type = t
+ typ.MethodList = append(typ.MethodList, p.embeddedElem(f))
+ return false
+ }
+ }
}
if p.mode&AllowGenerics != 0 {
diff --git a/src/cmd/compile/internal/syntax/positions.go b/src/cmd/compile/internal/syntax/positions.go
index b00f86c67c..93596559a0 100644
--- a/src/cmd/compile/internal/syntax/positions.go
+++ b/src/cmd/compile/internal/syntax/positions.go
@@ -12,7 +12,7 @@ func StartPos(n Node) Pos {
for m := n; ; {
switch n := m.(type) {
case nil:
- panic("internal error: nil")
+ panic("nil node")
// packages
case *File:
@@ -124,7 +124,7 @@ func EndPos(n Node) Pos {
for m := n; ; {
switch n := m.(type) {
case nil:
- panic("internal error: nil")
+ panic("nil node")
// packages
case *File:
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/linalg.go2 b/src/cmd/compile/internal/syntax/testdata/go2/linalg.go2
index 0d27603a58..822d0287e7 100644
--- a/src/cmd/compile/internal/syntax/testdata/go2/linalg.go2
+++ b/src/cmd/compile/internal/syntax/testdata/go2/linalg.go2
@@ -9,10 +9,10 @@ import "math"
// Numeric is type bound that matches any numeric type.
// It would likely be in a constraints package in the standard library.
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64 |
+ complex64 | ~complex128
}
func DotProduct[T Numeric](s1, s2 []T) T {
@@ -42,14 +42,14 @@ func AbsDifference[T NumericAbs[T]](a, b T) T {
// OrderedNumeric is a type bound that matches numeric types that support the < operator.
type OrderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64
}
// Complex is a type bound that matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// OrderedAbs is a helper type that defines an Abs method for
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/smoketest.go2 b/src/cmd/compile/internal/syntax/testdata/go2/smoketest.go2
index e5cfba0612..42efb42527 100644
--- a/src/cmd/compile/internal/syntax/testdata/go2/smoketest.go2
+++ b/src/cmd/compile/internal/syntax/testdata/go2/smoketest.go2
@@ -46,12 +46,12 @@ type _ struct{ T[int] }
// interfaces
type _ interface{
m()
- type int
+ ~int
}
type _ interface{
- type int, float, string
- type complex128
+ ~int | ~float | ~string
+ ~complex128
underlying(underlying underlying) underlying
}
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2 b/src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2
index 6e2104a515..f3deb703b6 100644
--- a/src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2
+++ b/src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2
@@ -175,12 +175,12 @@ type _ interface {
// Interface type lists can contain any type, incl. *Named types.
// Verify that we use the underlying type to compute the operational type.
type MyInt int
-func add1[T interface{type MyInt}](x T) T {
+func add1[T interface{ ~MyInt }](x T) T {
return x + 1
}
type MyString string
-func double[T interface{type MyInt, MyString}](x T) T {
+func double[T interface{ ~MyInt | ~MyString }](x T) T {
return x + x
}
@@ -189,15 +189,15 @@ func double[T interface{type MyInt, MyString}](x T) T {
// type lists.
type E0 interface {
- type int, bool, string
+ ~int | ~bool | ~string
}
type E1 interface {
- type int, float64, string
+ ~int | ~float64 | ~string
}
type E2 interface {
- type float64
+ ~float64
}
type I0 interface {
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2 b/src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2
index f78037f0f5..111f7c1004 100644
--- a/src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2
+++ b/src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2
@@ -48,22 +48,22 @@ func swapswap[A, B any](a A, b B) (A, B) {
type F[A, B any] func(A, B) (B, A)
-func min[T interface{ type int }](x, y T) T {
+func min[T interface{ ~int }](x, y T) T {
if x < y {
return x
}
return y
}
-func _[T interface{type int, float32}](x, y T) bool { return x < y }
+func _[T interface{ ~int | ~float32 }](x, y T) bool { return x < y }
func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
-func _[T interface{type int, float32, bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{ ~int | ~float32 | ~bool }](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C2[T]](x, y T) bool { return x < y }
type C1[T any] interface{}
-type C2[T any] interface{ type int, float32 }
+type C2[T any] interface{ ~int | ~float32 }
func new[T any]() *T {
var x T
@@ -91,40 +91,40 @@ var _ = f3[int, rune, bool](1, struct{x rune}{}, nil)
// indexing
func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type [10]int, *[20]int, map[string]int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type string, []byte }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int, [1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string, []rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[string]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
// slicing
// TODO(gri) implement this
-func _[T interface{ type string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
// len/cap built-ins
func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = len(x) }
-func _[T interface{ type [10]int }](x T) { _ = len(x) }
-func _[T interface{ type []byte }](x T) { _ = len(x) }
-func _[T interface{ type map[int]int }](x T) { _ = len(x) }
-func _[T interface{ type chan int }](x T) { _ = len(x) }
-func _[T interface{ type string, []byte, chan int }](x T) { _ = len(x) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type [10]int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte }](x T) { _ = cap(x) }
-func _[T interface{ type map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type chan int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte, chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
// range iteration
@@ -132,7 +132,7 @@ func _[T interface{}](x T) {
for range x /* ERROR cannot range */ {}
}
-func _[T interface{ type string, []string }](x T) {
+func _[T interface{ ~string | ~[]string }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i }
@@ -144,23 +144,23 @@ func _[T interface{ type string, []string }](x T) {
}
-func _[T interface{ type string, []rune, map[int]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[int]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x { _ = i; _ = e }
}
-func _[T interface{ type string, []rune, map[string]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[string]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x /* ERROR must have the same key type */ { _ = e }
}
-func _[T interface{ type string, chan int }](x T) {
+func _[T interface{ ~string | ~chan int }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
}
-func _[T interface{ type string, chan<-int }](x T) {
+func _[T interface{ ~string | ~chan<-int }](x T) {
for i := range x /* ERROR send-only channel */ { _ = i }
}
@@ -388,7 +388,7 @@ func _[T any](x T) {
}
}
-func _[T interface{type int}](x T) {
+func _[T interface{ ~int }](x T) {
_ = x /* ERROR not an interface */ .(int)
switch x /* ERROR not an interface */ .(type) {
}
diff --git a/src/cmd/compile/internal/syntax/testdata/interface.go2 b/src/cmd/compile/internal/syntax/testdata/interface.go2
index a817327a43..b399d75148 100644
--- a/src/cmd/compile/internal/syntax/testdata/interface.go2
+++ b/src/cmd/compile/internal/syntax/testdata/interface.go2
@@ -25,7 +25,6 @@ type _ interface {
~int | ~string
}
-
type _ interface {
m()
~int
@@ -34,3 +33,48 @@ type _ interface {
~int | ~string
type bool, int, float64
}
+
+type _ interface {
+ int
+ []byte
+ [10]int
+ struct{}
+ *int
+ func()
+ interface{}
+ map[string]int
+ chan T
+ chan<- T
+ <-chan T
+ T[int]
+}
+
+type _ interface {
+ int | string
+ []byte | string
+ [10]int | string
+ struct{} | string
+ *int | string
+ func() | string
+ interface{} | string
+ map[string]int | string
+ chan T | string
+ chan<- T | string
+ <-chan T | string
+ T[int] | string
+}
+
+type _ interface {
+ ~int | string
+ ~[]byte | string
+ ~[10]int | string
+ ~struct{} | string
+ ~*int | string
+ ~func() | string
+ ~interface{} | string
+ ~map[string]int | string
+ ~chan T | string
+ ~chan<- T | string
+ ~<-chan T | string
+ ~T[int] | string
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue46558.src b/src/cmd/compile/internal/syntax/testdata/issue46558.src
new file mode 100644
index 0000000000..a22b600825
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue46558.src
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func F(s string) {
+ switch s[0] {
+ case 'a':
+ case s[2] { // ERROR unexpected {
+ case 'b':
+ }
+ }
+} // ERROR non-declaration statement
diff --git a/src/cmd/compile/internal/syntax/walk.go b/src/cmd/compile/internal/syntax/walk.go
index c26e97a0d8..b025844204 100644
--- a/src/cmd/compile/internal/syntax/walk.go
+++ b/src/cmd/compile/internal/syntax/walk.go
@@ -8,31 +8,73 @@ package syntax
import "fmt"
-// Walk traverses a syntax in pre-order: It starts by calling f(root);
-// root must not be nil. If f returns false (== "continue"), Walk calls
+// Inspect traverses an AST in pre-order: It starts by calling
+// f(node); node must not be nil. If f returns true, Inspect invokes f
+// recursively for each of the non-nil children of node, followed by a
+// call of f(nil).
+//
+// See Walk for caveats about shared nodes.
+func Inspect(root Node, f func(Node) bool) {
+ Walk(root, inspector(f))
+}
+
+type inspector func(Node) bool
+
+func (v inspector) Visit(node Node) Visitor {
+ if v(node) {
+ return v
+ }
+ return nil
+}
+
+// Crawl traverses a syntax in pre-order: It starts by calling f(root);
+// root must not be nil. If f returns false (== "continue"), Crawl calls
// f recursively for each of the non-nil children of that node; if f
-// returns true (== "stop"), Walk does not traverse the respective node's
+// returns true (== "stop"), Crawl does not traverse the respective node's
// children.
+//
+// See Walk for caveats about shared nodes.
+//
+// Deprecated: Use Inspect instead.
+func Crawl(root Node, f func(Node) bool) {
+ Inspect(root, func(node Node) bool {
+ return node != nil && !f(node)
+ })
+}
+
+// Walk traverses an AST in pre-order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, Walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
// Some nodes may be shared among multiple parent nodes (e.g., types in
// field lists such as type T in "a, b, c T"). Such shared nodes are
// walked multiple times.
// TODO(gri) Revisit this design. It may make sense to walk those nodes
// only once. A place where this matters is types2.TestResolveIdents.
-func Walk(root Node, f func(Node) bool) {
- w := walker{f}
- w.node(root)
+func Walk(root Node, v Visitor) {
+ walker{v}.node(root)
+}
+
+// A Visitor's Visit method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.Visit(nil).
+type Visitor interface {
+ Visit(node Node) (w Visitor)
}
type walker struct {
- f func(Node) bool
+ v Visitor
}
-func (w *walker) node(n Node) {
+func (w walker) node(n Node) {
if n == nil {
- panic("invalid syntax tree: nil node")
+ panic("nil node")
}
- if w.f(n) {
+ w.v = w.v.Visit(n)
+ if w.v == nil {
return
}
@@ -285,33 +327,35 @@ func (w *walker) node(n Node) {
default:
panic(fmt.Sprintf("internal error: unknown node type %T", n))
}
+
+ w.v.Visit(nil)
}
-func (w *walker) declList(list []Decl) {
+func (w walker) declList(list []Decl) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) exprList(list []Expr) {
+func (w walker) exprList(list []Expr) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) stmtList(list []Stmt) {
+func (w walker) stmtList(list []Stmt) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) nameList(list []*Name) {
+func (w walker) nameList(list []*Name) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) fieldList(list []*Field) {
+func (w walker) fieldList(list []*Field) {
for _, n := range list {
w.node(n)
}
diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go
index b752c48612..839546bcb8 100644
--- a/src/cmd/compile/internal/test/abiutils_test.go
+++ b/src/cmd/compile/internal/test/abiutils_test.go
@@ -33,6 +33,8 @@ func TestMain(m *testing.M) {
base.Ctxt.DiagFunc = base.Errorf
base.Ctxt.DiagFlush = base.FlushErrors
base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
+ types.LocalPkg = types.NewPkg("", "local")
+ types.LocalPkg.Prefix = `""`
types.PtrSize = ssagen.Arch.LinkArch.PtrSize
types.RegSize = ssagen.Arch.LinkArch.RegSize
typecheck.InitUniverse()
@@ -309,8 +311,8 @@ func TestABIUtilsInterfaces(t *testing.T) {
ei := types.Types[types.TINTER] // interface{}
pei := types.NewPtr(ei) // *interface{}
fldt := mkFuncType(types.FakeRecvType(), []*types.Type{},
- []*types.Type{types.UntypedString})
- field := types.NewField(src.NoXPos, nil, fldt)
+ []*types.Type{types.Types[types.TSTRING]})
+ field := types.NewField(src.NoXPos, typecheck.Lookup("F"), fldt)
nei := types.NewInterface(types.LocalPkg, []*types.Field{field})
i16 := types.Types[types.TINT16]
tb := types.Types[types.TBOOL]
@@ -322,12 +324,12 @@ func TestABIUtilsInterfaces(t *testing.T) {
IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; bool }
IN 1: R{ I3 I4 } spilloffset: 8 typ: interface {}
IN 2: R{ I5 I6 } spilloffset: 24 typ: interface {}
- IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { () untyped string }
+ IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { F() string }
IN 4: R{ } offset: 0 typ: *interface {}
- IN 5: R{ } offset: 8 typ: interface { () untyped string }
+ IN 5: R{ } offset: 8 typ: interface { F() string }
IN 6: R{ } offset: 24 typ: int16
OUT 0: R{ I0 I1 } spilloffset: -1 typ: interface {}
- OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { () untyped string }
+ OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { F() string }
OUT 2: R{ I4 } spilloffset: -1 typ: *interface {}
offsetToSpillArea: 32 spillAreaSize: 56
`)
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index 6f100033cf..bbdbe0c37c 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -42,13 +42,10 @@ func TestIntendedInlining(t *testing.T) {
"bucketMask",
"bucketShift",
"chanbuf",
- "deferArgs",
- "deferclass",
"evacuated",
"fastlog2",
"fastrand",
"float64bits",
- "funcPC",
"getArgInfoFast",
"getm",
"getMCache",
@@ -65,7 +62,6 @@ func TestIntendedInlining(t *testing.T) {
"subtract1",
"subtractb",
"tophash",
- "totaldefersize",
"(*bmap).keys",
"(*bmap).overflow",
"(*waitq).enqueue",
diff --git a/src/cmd/compile/internal/typecheck/bexport.go b/src/cmd/compile/internal/typecheck/bexport.go
index 4a84bb13fa..cc7f91f937 100644
--- a/src/cmd/compile/internal/typecheck/bexport.go
+++ b/src/cmd/compile/internal/typecheck/bexport.go
@@ -96,6 +96,9 @@ func predeclared() []*types.Type {
// any type, for builtin export data
types.Types[types.TANY],
+
+ // comparable
+ types.ComparableType,
}
}
return predecl
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
index 833b17b414..3f177d9173 100644
--- a/src/cmd/compile/internal/typecheck/builtin.go
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -71,137 +71,135 @@ var runtimeDecls = [...]struct {
{"slicecopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
- {"convI2I", funcTag, 57},
- {"convT16", funcTag, 59},
- {"convT32", funcTag, 61},
- {"convT64", funcTag, 62},
- {"convTstring", funcTag, 63},
- {"convTslice", funcTag, 66},
- {"convT2E", funcTag, 67},
- {"convT2Enoptr", funcTag, 67},
- {"convT2I", funcTag, 67},
- {"convT2Inoptr", funcTag, 67},
- {"assertE2I", funcTag, 68},
- {"assertE2I2", funcTag, 57},
- {"assertI2I", funcTag, 68},
- {"assertI2I2", funcTag, 57},
- {"panicdottypeE", funcTag, 69},
- {"panicdottypeI", funcTag, 69},
- {"panicnildottype", funcTag, 70},
- {"ifaceeq", funcTag, 72},
- {"efaceeq", funcTag, 72},
- {"fastrand", funcTag, 73},
- {"makemap64", funcTag, 75},
- {"makemap", funcTag, 76},
- {"makemap_small", funcTag, 77},
- {"mapaccess1", funcTag, 78},
- {"mapaccess1_fast32", funcTag, 79},
- {"mapaccess1_fast64", funcTag, 80},
- {"mapaccess1_faststr", funcTag, 81},
- {"mapaccess1_fat", funcTag, 82},
- {"mapaccess2", funcTag, 83},
- {"mapaccess2_fast32", funcTag, 84},
- {"mapaccess2_fast64", funcTag, 85},
- {"mapaccess2_faststr", funcTag, 86},
- {"mapaccess2_fat", funcTag, 87},
- {"mapassign", funcTag, 78},
- {"mapassign_fast32", funcTag, 79},
- {"mapassign_fast32ptr", funcTag, 88},
- {"mapassign_fast64", funcTag, 80},
- {"mapassign_fast64ptr", funcTag, 88},
- {"mapassign_faststr", funcTag, 81},
- {"mapiterinit", funcTag, 89},
- {"mapdelete", funcTag, 89},
- {"mapdelete_fast32", funcTag, 90},
- {"mapdelete_fast64", funcTag, 91},
- {"mapdelete_faststr", funcTag, 92},
- {"mapiternext", funcTag, 93},
- {"mapclear", funcTag, 94},
- {"makechan64", funcTag, 96},
- {"makechan", funcTag, 97},
- {"chanrecv1", funcTag, 99},
- {"chanrecv2", funcTag, 100},
- {"chansend1", funcTag, 102},
+ {"convI2I", funcTag, 58},
+ {"convT", funcTag, 59},
+ {"convTnoptr", funcTag, 59},
+ {"convT16", funcTag, 61},
+ {"convT32", funcTag, 63},
+ {"convT64", funcTag, 64},
+ {"convTstring", funcTag, 65},
+ {"convTslice", funcTag, 68},
+ {"assertE2I", funcTag, 69},
+ {"assertE2I2", funcTag, 70},
+ {"assertI2I", funcTag, 69},
+ {"assertI2I2", funcTag, 70},
+ {"panicdottypeE", funcTag, 71},
+ {"panicdottypeI", funcTag, 71},
+ {"panicnildottype", funcTag, 72},
+ {"ifaceeq", funcTag, 73},
+ {"efaceeq", funcTag, 73},
+ {"fastrand", funcTag, 74},
+ {"makemap64", funcTag, 76},
+ {"makemap", funcTag, 77},
+ {"makemap_small", funcTag, 78},
+ {"mapaccess1", funcTag, 79},
+ {"mapaccess1_fast32", funcTag, 80},
+ {"mapaccess1_fast64", funcTag, 81},
+ {"mapaccess1_faststr", funcTag, 82},
+ {"mapaccess1_fat", funcTag, 83},
+ {"mapaccess2", funcTag, 84},
+ {"mapaccess2_fast32", funcTag, 85},
+ {"mapaccess2_fast64", funcTag, 86},
+ {"mapaccess2_faststr", funcTag, 87},
+ {"mapaccess2_fat", funcTag, 88},
+ {"mapassign", funcTag, 79},
+ {"mapassign_fast32", funcTag, 80},
+ {"mapassign_fast32ptr", funcTag, 89},
+ {"mapassign_fast64", funcTag, 81},
+ {"mapassign_fast64ptr", funcTag, 89},
+ {"mapassign_faststr", funcTag, 82},
+ {"mapiterinit", funcTag, 90},
+ {"mapdelete", funcTag, 90},
+ {"mapdelete_fast32", funcTag, 91},
+ {"mapdelete_fast64", funcTag, 92},
+ {"mapdelete_faststr", funcTag, 93},
+ {"mapiternext", funcTag, 94},
+ {"mapclear", funcTag, 95},
+ {"makechan64", funcTag, 97},
+ {"makechan", funcTag, 98},
+ {"chanrecv1", funcTag, 100},
+ {"chanrecv2", funcTag, 101},
+ {"chansend1", funcTag, 103},
{"closechan", funcTag, 30},
- {"writeBarrier", varTag, 104},
- {"typedmemmove", funcTag, 105},
- {"typedmemclr", funcTag, 106},
- {"typedslicecopy", funcTag, 107},
- {"selectnbsend", funcTag, 108},
- {"selectnbrecv", funcTag, 109},
- {"selectsetpc", funcTag, 110},
- {"selectgo", funcTag, 111},
+ {"writeBarrier", varTag, 105},
+ {"typedmemmove", funcTag, 106},
+ {"typedmemclr", funcTag, 107},
+ {"typedslicecopy", funcTag, 108},
+ {"selectnbsend", funcTag, 109},
+ {"selectnbrecv", funcTag, 110},
+ {"selectsetpc", funcTag, 111},
+ {"selectgo", funcTag, 112},
{"block", funcTag, 9},
- {"makeslice", funcTag, 112},
- {"makeslice64", funcTag, 113},
- {"makeslicecopy", funcTag, 114},
- {"growslice", funcTag, 116},
- {"unsafeslice", funcTag, 117},
- {"unsafeslice64", funcTag, 118},
- {"unsafeslicecheckptr", funcTag, 118},
- {"memmove", funcTag, 119},
- {"memclrNoHeapPointers", funcTag, 120},
- {"memclrHasPointers", funcTag, 120},
- {"memequal", funcTag, 121},
- {"memequal0", funcTag, 122},
- {"memequal8", funcTag, 122},
- {"memequal16", funcTag, 122},
- {"memequal32", funcTag, 122},
- {"memequal64", funcTag, 122},
- {"memequal128", funcTag, 122},
- {"f32equal", funcTag, 123},
- {"f64equal", funcTag, 123},
- {"c64equal", funcTag, 123},
- {"c128equal", funcTag, 123},
- {"strequal", funcTag, 123},
- {"interequal", funcTag, 123},
- {"nilinterequal", funcTag, 123},
- {"memhash", funcTag, 124},
- {"memhash0", funcTag, 125},
- {"memhash8", funcTag, 125},
- {"memhash16", funcTag, 125},
- {"memhash32", funcTag, 125},
- {"memhash64", funcTag, 125},
- {"memhash128", funcTag, 125},
- {"f32hash", funcTag, 125},
- {"f64hash", funcTag, 125},
- {"c64hash", funcTag, 125},
- {"c128hash", funcTag, 125},
- {"strhash", funcTag, 125},
- {"interhash", funcTag, 125},
- {"nilinterhash", funcTag, 125},
- {"int64div", funcTag, 126},
- {"uint64div", funcTag, 127},
- {"int64mod", funcTag, 126},
- {"uint64mod", funcTag, 127},
- {"float64toint64", funcTag, 128},
- {"float64touint64", funcTag, 129},
- {"float64touint32", funcTag, 130},
- {"int64tofloat64", funcTag, 131},
- {"uint64tofloat64", funcTag, 132},
- {"uint32tofloat64", funcTag, 133},
- {"complex128div", funcTag, 134},
- {"getcallerpc", funcTag, 135},
- {"getcallersp", funcTag, 135},
+ {"makeslice", funcTag, 113},
+ {"makeslice64", funcTag, 114},
+ {"makeslicecopy", funcTag, 115},
+ {"growslice", funcTag, 117},
+ {"unsafeslice", funcTag, 118},
+ {"unsafeslice64", funcTag, 119},
+ {"unsafeslicecheckptr", funcTag, 119},
+ {"memmove", funcTag, 120},
+ {"memclrNoHeapPointers", funcTag, 121},
+ {"memclrHasPointers", funcTag, 121},
+ {"memequal", funcTag, 122},
+ {"memequal0", funcTag, 123},
+ {"memequal8", funcTag, 123},
+ {"memequal16", funcTag, 123},
+ {"memequal32", funcTag, 123},
+ {"memequal64", funcTag, 123},
+ {"memequal128", funcTag, 123},
+ {"f32equal", funcTag, 124},
+ {"f64equal", funcTag, 124},
+ {"c64equal", funcTag, 124},
+ {"c128equal", funcTag, 124},
+ {"strequal", funcTag, 124},
+ {"interequal", funcTag, 124},
+ {"nilinterequal", funcTag, 124},
+ {"memhash", funcTag, 125},
+ {"memhash0", funcTag, 126},
+ {"memhash8", funcTag, 126},
+ {"memhash16", funcTag, 126},
+ {"memhash32", funcTag, 126},
+ {"memhash64", funcTag, 126},
+ {"memhash128", funcTag, 126},
+ {"f32hash", funcTag, 126},
+ {"f64hash", funcTag, 126},
+ {"c64hash", funcTag, 126},
+ {"c128hash", funcTag, 126},
+ {"strhash", funcTag, 126},
+ {"interhash", funcTag, 126},
+ {"nilinterhash", funcTag, 126},
+ {"int64div", funcTag, 127},
+ {"uint64div", funcTag, 128},
+ {"int64mod", funcTag, 127},
+ {"uint64mod", funcTag, 128},
+ {"float64toint64", funcTag, 129},
+ {"float64touint64", funcTag, 130},
+ {"float64touint32", funcTag, 131},
+ {"int64tofloat64", funcTag, 132},
+ {"uint64tofloat64", funcTag, 133},
+ {"uint32tofloat64", funcTag, 134},
+ {"complex128div", funcTag, 135},
+ {"getcallerpc", funcTag, 136},
+ {"getcallersp", funcTag, 136},
{"racefuncenter", funcTag, 31},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
- {"racereadrange", funcTag, 136},
- {"racewriterange", funcTag, 136},
- {"msanread", funcTag, 136},
- {"msanwrite", funcTag, 136},
- {"msanmove", funcTag, 137},
- {"checkptrAlignment", funcTag, 138},
- {"checkptrArithmetic", funcTag, 140},
- {"libfuzzerTraceCmp1", funcTag, 141},
- {"libfuzzerTraceCmp2", funcTag, 142},
- {"libfuzzerTraceCmp4", funcTag, 143},
- {"libfuzzerTraceCmp8", funcTag, 144},
- {"libfuzzerTraceConstCmp1", funcTag, 141},
- {"libfuzzerTraceConstCmp2", funcTag, 142},
- {"libfuzzerTraceConstCmp4", funcTag, 143},
- {"libfuzzerTraceConstCmp8", funcTag, 144},
+ {"racereadrange", funcTag, 137},
+ {"racewriterange", funcTag, 137},
+ {"msanread", funcTag, 137},
+ {"msanwrite", funcTag, 137},
+ {"msanmove", funcTag, 138},
+ {"checkptrAlignment", funcTag, 139},
+ {"checkptrArithmetic", funcTag, 141},
+ {"libfuzzerTraceCmp1", funcTag, 142},
+ {"libfuzzerTraceCmp2", funcTag, 143},
+ {"libfuzzerTraceCmp4", funcTag, 144},
+ {"libfuzzerTraceCmp8", funcTag, 145},
+ {"libfuzzerTraceConstCmp1", funcTag, 142},
+ {"libfuzzerTraceConstCmp2", funcTag, 143},
+ {"libfuzzerTraceConstCmp4", funcTag, 144},
+ {"libfuzzerTraceConstCmp8", funcTag, 145},
{"x86HasPOPCNT", varTag, 6},
{"x86HasSSE41", varTag, 6},
{"x86HasFMA", varTag, 6},
@@ -224,7 +222,7 @@ func params(tlist ...*types.Type) []*types.Field {
}
func runtimeTypes() []*types.Type {
- var typs [145]*types.Type
+ var typs [146]*types.Type
typs[0] = types.ByteType
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
@@ -282,93 +280,94 @@ func runtimeTypes() []*types.Type {
typs[54] = newSig(params(typs[3], typs[15], typs[3], typs[15], typs[5]), params(typs[15]))
typs[55] = newSig(params(typs[28], typs[15]), params(typs[46], typs[15]))
typs[56] = newSig(params(typs[28]), params(typs[15]))
- typs[57] = newSig(params(typs[1], typs[2]), params(typs[2]))
- typs[58] = types.Types[types.TUINT16]
- typs[59] = newSig(params(typs[58]), params(typs[7]))
- typs[60] = types.Types[types.TUINT32]
+ typs[57] = types.NewPtr(typs[5])
+ typs[58] = newSig(params(typs[1], typs[57]), params(typs[57]))
+ typs[59] = newSig(params(typs[1], typs[3]), params(typs[7]))
+ typs[60] = types.Types[types.TUINT16]
typs[61] = newSig(params(typs[60]), params(typs[7]))
- typs[62] = newSig(params(typs[24]), params(typs[7]))
- typs[63] = newSig(params(typs[28]), params(typs[7]))
- typs[64] = types.Types[types.TUINT8]
- typs[65] = types.NewSlice(typs[64])
- typs[66] = newSig(params(typs[65]), params(typs[7]))
- typs[67] = newSig(params(typs[1], typs[3]), params(typs[2]))
- typs[68] = newSig(params(typs[1], typs[1]), params(typs[1]))
- typs[69] = newSig(params(typs[1], typs[1], typs[1]), nil)
- typs[70] = newSig(params(typs[1]), nil)
- typs[71] = types.NewPtr(typs[5])
- typs[72] = newSig(params(typs[71], typs[7], typs[7]), params(typs[6]))
- typs[73] = newSig(nil, params(typs[60]))
- typs[74] = types.NewMap(typs[2], typs[2])
- typs[75] = newSig(params(typs[1], typs[22], typs[3]), params(typs[74]))
- typs[76] = newSig(params(typs[1], typs[15], typs[3]), params(typs[74]))
- typs[77] = newSig(nil, params(typs[74]))
- typs[78] = newSig(params(typs[1], typs[74], typs[3]), params(typs[3]))
- typs[79] = newSig(params(typs[1], typs[74], typs[60]), params(typs[3]))
- typs[80] = newSig(params(typs[1], typs[74], typs[24]), params(typs[3]))
- typs[81] = newSig(params(typs[1], typs[74], typs[28]), params(typs[3]))
- typs[82] = newSig(params(typs[1], typs[74], typs[3], typs[1]), params(typs[3]))
- typs[83] = newSig(params(typs[1], typs[74], typs[3]), params(typs[3], typs[6]))
- typs[84] = newSig(params(typs[1], typs[74], typs[60]), params(typs[3], typs[6]))
- typs[85] = newSig(params(typs[1], typs[74], typs[24]), params(typs[3], typs[6]))
- typs[86] = newSig(params(typs[1], typs[74], typs[28]), params(typs[3], typs[6]))
- typs[87] = newSig(params(typs[1], typs[74], typs[3], typs[1]), params(typs[3], typs[6]))
- typs[88] = newSig(params(typs[1], typs[74], typs[7]), params(typs[3]))
- typs[89] = newSig(params(typs[1], typs[74], typs[3]), nil)
- typs[90] = newSig(params(typs[1], typs[74], typs[60]), nil)
- typs[91] = newSig(params(typs[1], typs[74], typs[24]), nil)
- typs[92] = newSig(params(typs[1], typs[74], typs[28]), nil)
- typs[93] = newSig(params(typs[3]), nil)
- typs[94] = newSig(params(typs[1], typs[74]), nil)
- typs[95] = types.NewChan(typs[2], types.Cboth)
- typs[96] = newSig(params(typs[1], typs[22]), params(typs[95]))
- typs[97] = newSig(params(typs[1], typs[15]), params(typs[95]))
- typs[98] = types.NewChan(typs[2], types.Crecv)
- typs[99] = newSig(params(typs[98], typs[3]), nil)
- typs[100] = newSig(params(typs[98], typs[3]), params(typs[6]))
- typs[101] = types.NewChan(typs[2], types.Csend)
- typs[102] = newSig(params(typs[101], typs[3]), nil)
- typs[103] = types.NewArray(typs[0], 3)
- typs[104] = types.NewStruct(types.NoPkg, []*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[103]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
- typs[105] = newSig(params(typs[1], typs[3], typs[3]), nil)
- typs[106] = newSig(params(typs[1], typs[3]), nil)
- typs[107] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
- typs[108] = newSig(params(typs[101], typs[3]), params(typs[6]))
- typs[109] = newSig(params(typs[3], typs[98]), params(typs[6], typs[6]))
- typs[110] = newSig(params(typs[71]), nil)
- typs[111] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
- typs[112] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
- typs[113] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
- typs[114] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
- typs[115] = types.NewSlice(typs[2])
- typs[116] = newSig(params(typs[1], typs[115], typs[15]), params(typs[115]))
- typs[117] = newSig(params(typs[1], typs[7], typs[15]), nil)
- typs[118] = newSig(params(typs[1], typs[7], typs[22]), nil)
- typs[119] = newSig(params(typs[3], typs[3], typs[5]), nil)
- typs[120] = newSig(params(typs[7], typs[5]), nil)
- typs[121] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
- typs[122] = newSig(params(typs[3], typs[3]), params(typs[6]))
- typs[123] = newSig(params(typs[7], typs[7]), params(typs[6]))
- typs[124] = newSig(params(typs[7], typs[5], typs[5]), params(typs[5]))
- typs[125] = newSig(params(typs[7], typs[5]), params(typs[5]))
- typs[126] = newSig(params(typs[22], typs[22]), params(typs[22]))
- typs[127] = newSig(params(typs[24], typs[24]), params(typs[24]))
- typs[128] = newSig(params(typs[20]), params(typs[22]))
- typs[129] = newSig(params(typs[20]), params(typs[24]))
- typs[130] = newSig(params(typs[20]), params(typs[60]))
- typs[131] = newSig(params(typs[22]), params(typs[20]))
- typs[132] = newSig(params(typs[24]), params(typs[20]))
- typs[133] = newSig(params(typs[60]), params(typs[20]))
- typs[134] = newSig(params(typs[26], typs[26]), params(typs[26]))
- typs[135] = newSig(nil, params(typs[5]))
- typs[136] = newSig(params(typs[5], typs[5]), nil)
- typs[137] = newSig(params(typs[5], typs[5], typs[5]), nil)
- typs[138] = newSig(params(typs[7], typs[1], typs[5]), nil)
- typs[139] = types.NewSlice(typs[7])
- typs[140] = newSig(params(typs[7], typs[139]), nil)
- typs[141] = newSig(params(typs[64], typs[64]), nil)
- typs[142] = newSig(params(typs[58], typs[58]), nil)
+ typs[62] = types.Types[types.TUINT32]
+ typs[63] = newSig(params(typs[62]), params(typs[7]))
+ typs[64] = newSig(params(typs[24]), params(typs[7]))
+ typs[65] = newSig(params(typs[28]), params(typs[7]))
+ typs[66] = types.Types[types.TUINT8]
+ typs[67] = types.NewSlice(typs[66])
+ typs[68] = newSig(params(typs[67]), params(typs[7]))
+ typs[69] = newSig(params(typs[1], typs[1]), params(typs[1]))
+ typs[70] = newSig(params(typs[1], typs[2]), params(typs[2]))
+ typs[71] = newSig(params(typs[1], typs[1], typs[1]), nil)
+ typs[72] = newSig(params(typs[1]), nil)
+ typs[73] = newSig(params(typs[57], typs[7], typs[7]), params(typs[6]))
+ typs[74] = newSig(nil, params(typs[62]))
+ typs[75] = types.NewMap(typs[2], typs[2])
+ typs[76] = newSig(params(typs[1], typs[22], typs[3]), params(typs[75]))
+ typs[77] = newSig(params(typs[1], typs[15], typs[3]), params(typs[75]))
+ typs[78] = newSig(nil, params(typs[75]))
+ typs[79] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3]))
+ typs[80] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3]))
+ typs[81] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3]))
+ typs[82] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3]))
+ typs[83] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3]))
+ typs[84] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3], typs[6]))
+ typs[85] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3], typs[6]))
+ typs[86] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3], typs[6]))
+ typs[87] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3], typs[6]))
+ typs[88] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3], typs[6]))
+ typs[89] = newSig(params(typs[1], typs[75], typs[7]), params(typs[3]))
+ typs[90] = newSig(params(typs[1], typs[75], typs[3]), nil)
+ typs[91] = newSig(params(typs[1], typs[75], typs[62]), nil)
+ typs[92] = newSig(params(typs[1], typs[75], typs[24]), nil)
+ typs[93] = newSig(params(typs[1], typs[75], typs[28]), nil)
+ typs[94] = newSig(params(typs[3]), nil)
+ typs[95] = newSig(params(typs[1], typs[75]), nil)
+ typs[96] = types.NewChan(typs[2], types.Cboth)
+ typs[97] = newSig(params(typs[1], typs[22]), params(typs[96]))
+ typs[98] = newSig(params(typs[1], typs[15]), params(typs[96]))
+ typs[99] = types.NewChan(typs[2], types.Crecv)
+ typs[100] = newSig(params(typs[99], typs[3]), nil)
+ typs[101] = newSig(params(typs[99], typs[3]), params(typs[6]))
+ typs[102] = types.NewChan(typs[2], types.Csend)
+ typs[103] = newSig(params(typs[102], typs[3]), nil)
+ typs[104] = types.NewArray(typs[0], 3)
+ typs[105] = types.NewStruct(types.NoPkg, []*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
+ typs[106] = newSig(params(typs[1], typs[3], typs[3]), nil)
+ typs[107] = newSig(params(typs[1], typs[3]), nil)
+ typs[108] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
+ typs[109] = newSig(params(typs[102], typs[3]), params(typs[6]))
+ typs[110] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6]))
+ typs[111] = newSig(params(typs[57]), nil)
+ typs[112] = newSig(params(typs[1], typs[1], typs[57], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
+ typs[113] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
+ typs[114] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
+ typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
+ typs[116] = types.NewSlice(typs[2])
+ typs[117] = newSig(params(typs[1], typs[116], typs[15]), params(typs[116]))
+ typs[118] = newSig(params(typs[1], typs[7], typs[15]), nil)
+ typs[119] = newSig(params(typs[1], typs[7], typs[22]), nil)
+ typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil)
+ typs[121] = newSig(params(typs[7], typs[5]), nil)
+ typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
+ typs[123] = newSig(params(typs[3], typs[3]), params(typs[6]))
+ typs[124] = newSig(params(typs[7], typs[7]), params(typs[6]))
+ typs[125] = newSig(params(typs[7], typs[5], typs[5]), params(typs[5]))
+ typs[126] = newSig(params(typs[7], typs[5]), params(typs[5]))
+ typs[127] = newSig(params(typs[22], typs[22]), params(typs[22]))
+ typs[128] = newSig(params(typs[24], typs[24]), params(typs[24]))
+ typs[129] = newSig(params(typs[20]), params(typs[22]))
+ typs[130] = newSig(params(typs[20]), params(typs[24]))
+ typs[131] = newSig(params(typs[20]), params(typs[62]))
+ typs[132] = newSig(params(typs[22]), params(typs[20]))
+ typs[133] = newSig(params(typs[24]), params(typs[20]))
+ typs[134] = newSig(params(typs[62]), params(typs[20]))
+ typs[135] = newSig(params(typs[26], typs[26]), params(typs[26]))
+ typs[136] = newSig(nil, params(typs[5]))
+ typs[137] = newSig(params(typs[5], typs[5]), nil)
+ typs[138] = newSig(params(typs[5], typs[5], typs[5]), nil)
+ typs[139] = newSig(params(typs[7], typs[1], typs[5]), nil)
+ typs[140] = types.NewSlice(typs[7])
+ typs[141] = newSig(params(typs[7], typs[140]), nil)
+ typs[142] = newSig(params(typs[66], typs[66]), nil)
typs[143] = newSig(params(typs[60], typs[60]), nil)
- typs[144] = newSig(params(typs[24], typs[24]), nil)
+ typs[144] = newSig(params(typs[62], typs[62]), nil)
+ typs[145] = newSig(params(typs[24], typs[24]), nil)
return typs[:]
}
diff --git a/src/cmd/compile/internal/typecheck/builtin/runtime.go b/src/cmd/compile/internal/typecheck/builtin/runtime.go
index 2b29ea3c08..605b904288 100644
--- a/src/cmd/compile/internal/typecheck/builtin/runtime.go
+++ b/src/cmd/compile/internal/typecheck/builtin/runtime.go
@@ -84,10 +84,15 @@ func decoderune(string, int) (retv rune, retk int)
func countrunes(string) int
// Non-empty-interface to non-empty-interface conversion.
-func convI2I(typ *byte, elem any) (ret any)
+func convI2I(typ *byte, itab *uintptr) (ret *uintptr)
-// Specialized type-to-interface conversion.
-// These return only a data pointer.
+// Convert non-interface type to the data word of a (empty or nonempty) interface.
+func convT(typ *byte, elem *any) unsafe.Pointer
+
+// Same as convT, for types with no pointers in them.
+func convTnoptr(typ *byte, elem *any) unsafe.Pointer
+
+// Specialized versions of convT for specific types.
// These functions take concrete types in the runtime. But they may
// be used for a wider range of types, which have the same memory
// layout as the parameter type. The compiler converts the
@@ -99,14 +104,6 @@ func convT64(val uint64) unsafe.Pointer
func convTstring(val string) unsafe.Pointer
func convTslice(val []uint8) unsafe.Pointer
-// Type to empty-interface conversion.
-func convT2E(typ *byte, elem *any) (ret any)
-func convT2Enoptr(typ *byte, elem *any) (ret any)
-
-// Type to non-empty-interface conversion.
-func convT2I(tab *byte, elem *any) (ret any)
-func convT2Inoptr(tab *byte, elem *any) (ret any)
-
// interface type assertions x.(T)
func assertE2I(inter *byte, typ *byte) *byte
func assertE2I2(inter *byte, eface any) (ret any)
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
index 761b043794..f8150d249a 100644
--- a/src/cmd/compile/internal/typecheck/const.go
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -901,7 +901,7 @@ func evalunsafe(n ir.Node) int64 {
switch tsel.Op() {
case ir.ODOT, ir.ODOTPTR:
break
- case ir.OCALLPART:
+ case ir.OMETHVALUE:
base.Errorf("invalid expression %v: argument is a method value", n)
return 0
default:
diff --git a/src/cmd/compile/internal/typecheck/crawler.go b/src/cmd/compile/internal/typecheck/crawler.go
new file mode 100644
index 0000000000..9a348b9f37
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/crawler.go
@@ -0,0 +1,231 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// crawlExports crawls the type/object graph rooted at the given list
+// of exported objects. Any functions that are found to be potentially
+// callable by importers are marked with ExportInline so that
+// iexport.go knows to re-export their inline body.
+func crawlExports(exports []*ir.Name) {
+ p := crawler{
+ marked: make(map[*types.Type]bool),
+ embedded: make(map[*types.Type]bool),
+ }
+ for _, n := range exports {
+ p.markObject(n)
+ }
+}
+
+type crawler struct {
+ marked map[*types.Type]bool // types already seen by markType
+ embedded map[*types.Type]bool // types already seen by markEmbed
+}
+
+// markObject visits a reachable object.
+func (p *crawler) markObject(n *ir.Name) {
+ if n.Op() == ir.ONAME && n.Class == ir.PFUNC {
+ p.markInlBody(n)
+ }
+
+ // If a declared type name is reachable, users can embed it in their
+ // own types, which makes even its unexported methods reachable.
+ if n.Op() == ir.OTYPE {
+ p.markEmbed(n.Type())
+ }
+
+ p.markType(n.Type())
+}
+
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *crawler) markType(t *types.Type) {
+ if t.IsInstantiatedGeneric() {
+ // Re-instantiated types don't add anything new, so don't follow them.
+ return
+ }
+ if p.marked[t] {
+ return
+ }
+ p.marked[t] = true
+
+ // If this is a defined type, mark all of its associated
+ // methods. Skip interface types because t.Methods contains
+ // only their unexpanded method set (i.e., exclusive of
+ // interface embeddings), and the switch statement below
+ // handles their full method set.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ if types.IsExported(m.Sym.Name) {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+ }
+
+ // Recursively mark any types that can be produced given a
+ // value of type t: dereferencing a pointer; indexing or
+ // iterating over an array, slice, or map; receiving from a
+ // channel; accessing a struct field or interface method; or
+ // calling a function.
+ //
+ // Notably, we don't mark function parameter types, because
+ // the user already needs some way to construct values of
+ // those types.
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
+ p.markType(t.Elem())
+
+ case types.TCHAN:
+ if t.ChanDir().CanRecv() {
+ p.markType(t.Elem())
+ }
+
+ case types.TMAP:
+ p.markType(t.Key())
+ p.markType(t.Elem())
+
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TFUNC:
+ for _, f := range t.Results().FieldSlice() {
+ p.markType(f.Type)
+ }
+
+ case types.TINTER:
+ // TODO(danscales) - will have to deal with the types in interface
+ // elements here when implemented in types2 and represented in types1.
+ for _, f := range t.AllMethods().Slice() {
+ if types.IsExported(f.Sym.Name) {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TTYPEPARAM:
+ // No other type that needs to be followed.
+ }
+}
+
+// markEmbed is similar to markType, but handles finding methods that
+// need to be re-exported because t can be embedded in user code
+// (possibly transitively).
+func (p *crawler) markEmbed(t *types.Type) {
+ if t.IsPtr() {
+ // Defined pointer type; not allowed to embed anyway.
+ if t.Sym() != nil {
+ return
+ }
+ t = t.Elem()
+ }
+
+ if t.IsInstantiatedGeneric() {
+ // Re-instantiated types don't add anything new, so don't follow them.
+ return
+ }
+
+ if p.embedded[t] {
+ return
+ }
+ p.embedded[t] = true
+
+ // If t is a defined type, then re-export all of its methods. Unlike
+ // in markType, we include even unexported methods here, because we
+ // still need to generate wrappers for them, even if the user can't
+ // refer to them directly.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+
+ // If t is a struct, recursively visit its embedded fields.
+ if t.IsStruct() {
+ for _, f := range t.FieldSlice() {
+ if f.Embedded != 0 {
+ p.markEmbed(f.Type)
+ }
+ }
+ }
+}
+
+// markInlBody marks n's inline body for export and recursively
+// ensures all called functions are marked too.
+func (p *crawler) markInlBody(n *ir.Name) {
+ if n == nil {
+ return
+ }
+ if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+ base.Fatalf("markInlBody: unexpected %v, %v, %v", n, n.Op(), n.Class)
+ }
+ fn := n.Func
+ if fn == nil {
+ base.Fatalf("markInlBody: missing Func on %v", n)
+ }
+ if fn.Inl == nil {
+ return
+ }
+
+ if fn.ExportInline() {
+ return
+ }
+ fn.SetExportInline(true)
+
+ ImportedBody(fn)
+
+ var doFlood func(n ir.Node)
+ doFlood = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OMETHEXPR, ir.ODOTMETH:
+ p.markInlBody(ir.MethodExprName(n))
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ switch n.Class {
+ case ir.PFUNC:
+ p.markInlBody(n)
+ Export(n)
+ case ir.PEXTERN:
+ Export(n)
+ }
+ p.checkGenericType(n.Type())
+ case ir.OTYPE:
+ p.checkGenericType(n.Type())
+ case ir.OMETHVALUE:
+ // Okay, because we don't yet inline indirect
+ // calls to method values.
+ case ir.OCLOSURE:
+ // VisitList doesn't visit closure bodies, so force a
+ // recursive call to VisitList on the body of the closure.
+ ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+ }
+ }
+
+ // Recursively identify all referenced functions for
+ // reexport. We want to include even non-called functions,
+ // because after inlining they might be callable.
+ ir.VisitList(fn.Inl.Body, doFlood)
+}
+
+// checkGenerictype ensures that we call markType() on any base generic type that
+// is written to the export file (even if not explicitly marked
+// for export), so its methods will be available for inlining if needed.
+func (p *crawler) checkGenericType(t *types.Type) {
+ if t != nil && t.HasTParam() {
+ if t.OrigSym != nil {
+ // Convert to the base generic type.
+ t = t.OrigSym.Def.Type()
+ }
+ p.markType(t)
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
index 5b771e3c0b..11e20f0f07 100644
--- a/src/cmd/compile/internal/typecheck/dcl.go
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -6,7 +6,7 @@ package typecheck
import (
"fmt"
- "strconv"
+ "sync"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -363,12 +363,10 @@ func funcargs(nt *ir.FuncType) {
}
// declare the out arguments.
- gen := len(nt.Params)
- for _, n := range nt.Results {
+ for i, n := range nt.Results {
if n.Sym == nil {
// Name so that escape analysis can track it. ~r stands for 'result'.
- n.Sym = LookupNum("~r", gen)
- gen++
+ n.Sym = LookupNum("~r", i)
}
if n.Sym.IsBlank() {
// Give it a name so we can assign to it during return. ~b stands for 'blank'.
@@ -377,8 +375,7 @@ func funcargs(nt *ir.FuncType) {
// func g() int
// f is allowed to use a plain 'return' with no arguments, while g is not.
// So the two cases must be distinguished.
- n.Sym = LookupNum("~b", gen)
- gen++
+ n.Sym = LookupNum("~b", i)
}
funcarg(n, ir.PPARAMOUT)
@@ -431,6 +428,7 @@ func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
n := ir.NewNameAt(pos, s)
s.Def = n
n.SetType(t)
+ n.SetTypecheck(1)
n.Class = ir.PAUTO
n.SetEsc(ir.EscNever)
n.Curfn = curfn
@@ -443,15 +441,39 @@ func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
return n
}
+var (
+ autotmpnamesmu sync.Mutex
+ autotmpnames []string
+)
+
// autotmpname returns the name for an autotmp variable numbered n.
func autotmpname(n int) string {
- // Give each tmp a different name so that they can be registerized.
- // Add a preceding . to avoid clashing with legal names.
- const prefix = ".autotmp_"
- // Start with a buffer big enough to hold a large n.
- b := []byte(prefix + " ")[:len(prefix)]
- b = strconv.AppendInt(b, int64(n), 10)
- return types.InternString(b)
+ autotmpnamesmu.Lock()
+ defer autotmpnamesmu.Unlock()
+
+ // Grow autotmpnames, if needed.
+ if n >= len(autotmpnames) {
+ autotmpnames = append(autotmpnames, make([]string, n+1-len(autotmpnames))...)
+ autotmpnames = autotmpnames[:cap(autotmpnames)]
+ }
+
+ s := autotmpnames[n]
+ if s == "" {
+ // Give each tmp a different name so that they can be registerized.
+ // Add a preceding . to avoid clashing with legal names.
+ prefix := ".autotmp_%d"
+
+ // In quirks mode, pad out the number to stabilize variable
+ // sorting. This ensures autotmps 8 and 9 sort the same way even
+ // if they get renumbered to 9 and 10, respectively.
+ if base.Debug.UnifiedQuirks != 0 {
+ prefix = ".autotmp_%06d"
+ }
+
+ s = fmt.Sprintf(prefix, n)
+ autotmpnames[n] = s
+ }
+ return s
}
// f is method type, with receiver.
diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go
index 63d0a1ec6c..30726d4327 100644
--- a/src/cmd/compile/internal/typecheck/export.go
+++ b/src/cmd/compile/internal/typecheck/export.go
@@ -15,22 +15,22 @@ import (
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
-func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
+func importalias(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(pos, s, ir.OTYPE, ir.PEXTERN, t)
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
-func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
- n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
+func importconst(pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
+ n := importobj(pos, s, ir.OLITERAL, ir.PEXTERN, t)
n.SetVal(val)
return n
}
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
-func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
+func importfunc(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ n := importobj(pos, s, ir.ONAME, ir.PFUNC, t)
n.Func = ir.NewFunc(pos)
n.Func.Nname = n
return n
@@ -38,8 +38,8 @@ func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
- n := importsym(ipkg, pos, s, op, ctxt)
+func importobj(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
+ n := importsym(pos, s, op, ctxt)
n.SetType(t)
if ctxt == ir.PFUNC {
n.Sym().SetFunc(true)
@@ -47,7 +47,7 @@ func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Cl
return n
}
-func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
+func importsym(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
if n := s.PkgDef(); n != nil {
base.Fatalf("importsym of symbol that already exists: %v", n)
}
@@ -61,14 +61,14 @@ func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Cl
// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
-func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name {
- n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN)
+func importtype(pos src.XPos, s *types.Sym) *ir.Name {
+ n := importsym(pos, s, ir.OTYPE, ir.PEXTERN)
n.SetType(types.NewNamed(n))
return n
}
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
-func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
+func importvar(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(pos, s, ir.ONAME, ir.PEXTERN, t)
}
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index 24d141e8a2..7e974dfda8 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -311,14 +311,23 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
f := t.Field(i)
s := f.Sym
- if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
- base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+
+ // Do the test for assigning to unexported fields.
+ // But if this is an instantiated function, then
+ // the function has already been typechecked. In
+ // that case, don't do the test, since it can fail
+ // for the closure structs created in
+ // walkClosure(), because the instantiated
+ // function is compiled as if in the source
+ // package of the generic function.
+ if !(ir.CurFunc != nil && strings.Index(ir.CurFunc.Nname.Sym().Name, "[") >= 0) {
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ }
}
// No pushtype allowed here. Must name fields for that.
n1 = AssignConv(n1, f.Type, "field value")
- sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
- sk.Offset = f.Offset
- ls[i] = sk
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
}
if len(ls) < t.NumFields() {
base.Errorf("too few values in %v", n)
@@ -328,77 +337,33 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
// keyed list
ls := n.List
- for i, l := range ls {
- ir.SetPos(l)
-
- if l.Op() == ir.OKEY {
- kv := l.(*ir.KeyExpr)
- key := kv.Key
-
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
- s = Lookup(s.Name)
- }
-
- // An OXDOT uses the Sym field to hold
- // the field to the right of the dot,
- // so s will be non-nil, but an OXDOT
- // is never a valid struct literal key.
- if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
- base.Errorf("invalid field name %v in struct initializer", key)
- continue
- }
-
- l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
- ls[i] = l
- }
-
- if l.Op() != ir.OSTRUCTKEY {
- if !errored {
- base.Errorf("mixture of field:value and value initializers")
- errored = true
- }
- ls[i] = Expr(ls[i])
- continue
- }
- l := l.(*ir.StructKeyExpr)
-
- f := Lookdot1(nil, l.Field, t, t.Fields(), 0)
- if f == nil {
- if ci := Lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
- if visible(ci.Sym) {
- base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym)
- } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
- base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t)
- } else {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+ for i, n := range ls {
+ ir.SetPos(n)
+
+ sk, ok := n.(*ir.StructKeyExpr)
+ if !ok {
+ kv, ok := n.(*ir.KeyExpr)
+ if !ok {
+ if !errored {
+ base.Errorf("mixture of field:value and value initializers")
+ errored = true
}
+ ls[i] = Expr(n)
continue
}
- var f *types.Field
- p, _ := dotpath(l.Field, t, &f, true)
- if p == nil || f.IsMethod() {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+
+ sk = tcStructLitKey(t, kv)
+ if sk == nil {
continue
}
- // dotpath returns the parent embedded types in reverse order.
- var ep []string
- for ei := len(p) - 1; ei >= 0; ei-- {
- ep = append(ep, p[ei].field.Sym.Name)
- }
- ep = append(ep, l.Field.Name)
- base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
- continue
+
+ fielddup(sk.Sym().Name, hash)
}
- fielddup(f.Sym.Name, hash)
- l.Offset = f.Offset
// No pushtype allowed here. Tried and rejected.
- l.Value = Expr(l.Value)
- l.Value = AssignConv(l.Value, f.Type, "field value")
+ sk.Value = Expr(sk.Value)
+ sk.Value = AssignConv(sk.Value, sk.Field.Type, "field value")
+ ls[i] = sk
}
}
@@ -409,6 +374,60 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
return n
}
+// tcStructLitKey typechecks an OKEY node that appeared within a
+// struct literal.
+func tcStructLitKey(typ *types.Type, kv *ir.KeyExpr) *ir.StructKeyExpr {
+ key := kv.Key
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ sym := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
+ sym = Lookup(sym.Name)
+ }
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ if sym == nil || sym.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || sym.IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
+ return nil
+ }
+
+ if f := Lookdot1(nil, sym, typ, typ.Fields(), 0); f != nil {
+ return ir.NewStructKeyExpr(kv.Pos(), f, kv.Value)
+ }
+
+ if ci := Lookdot1(nil, sym, typ, typ.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if visible(ci.Sym) {
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", sym, typ, ci.Sym)
+ } else if nonexported(sym) && sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", sym, typ)
+ } else {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ }
+ return nil
+ }
+
+ var f *types.Field
+ p, _ := dotpath(sym, typ, &f, true)
+ if p == nil || f.IsMethod() {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ return nil
+ }
+
+ // dotpath returns the parent embedded types in reverse order.
+ var ep []string
+ for ei := len(p) - 1; ei >= 0; ei-- {
+ ep = append(ep, p[ei].field.Sym.Name)
+ }
+ ep = append(ep, sym.Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), typ)
+ return nil
+}
+
// tcConv typechecks an OCONV node.
func tcConv(n *ir.ConvExpr) ir.Node {
types.CheckSize(n.Type()) // ensure width is calculated for backend
@@ -522,8 +541,8 @@ func tcDot(n *ir.SelectorExpr, top int) ir.Node {
}
if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
- n.SetOp(ir.OCALLPART)
- n.SetType(MethodValueWrapper(n).Type())
+ n.SetOp(ir.OMETHVALUE)
+ n.SetType(NewMethodType(n.Type(), nil))
}
return n
}
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index fbcc784627..7dec65c1d6 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -8,28 +8,29 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/src"
"fmt"
"go/constant"
"go/token"
)
-// package all the arguments that match a ... T parameter into a []T.
-func MakeDotArgs(typ *types.Type, args []ir.Node) ir.Node {
+// MakeDotArgs package all the arguments that match a ... T parameter into a []T.
+func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
var n ir.Node
if len(args) == 0 {
- n = NodNil()
+ n = ir.NewNilExpr(pos)
n.SetType(typ)
} else {
- lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
- lit.List.Append(args...)
+ args = append([]ir.Node(nil), args...)
+ lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(typ), args)
lit.SetImplicit(true)
n = lit
}
n = Expr(n)
if n.Type() == nil {
- base.Fatalf("mkdotargslice: typecheck failed")
+ base.FatalfAt(pos, "mkdotargslice: typecheck failed")
}
return n
}
@@ -47,7 +48,7 @@ func FixVariadicCall(call *ir.CallExpr) {
args := call.Args
extra := args[vi:]
- slice := MakeDotArgs(vt, extra)
+ slice := MakeDotArgs(call.Pos(), vt, extra)
for i := range extra {
extra[i] = nil // allow GC
}
@@ -56,6 +57,25 @@ func FixVariadicCall(call *ir.CallExpr) {
call.IsDDD = true
}
+// FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...).
+func FixMethodCall(call *ir.CallExpr) {
+ if call.X.Op() != ir.ODOTMETH {
+ return
+ }
+
+ dot := call.X.(*ir.SelectorExpr)
+
+ fn := Expr(ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym))
+
+ args := make([]ir.Node, 1+len(call.Args))
+ args[0] = dot.X
+ copy(args[1:], call.Args)
+
+ call.SetOp(ir.OCALLFUNC)
+ call.X = fn
+ call.Args = args
+}
+
// ClosureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
@@ -73,8 +93,25 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type {
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
+
+ // Make sure the .F field is in the same package as the rest of the
+ // fields. This deals with closures in instantiated functions, which are
+ // compiled as if from the source package of the generic function.
+ var pkg *types.Pkg
+ if len(clo.Func.ClosureVars) == 0 {
+ pkg = types.LocalPkg
+ } else {
+ for _, v := range clo.Func.ClosureVars {
+ if pkg == nil {
+ pkg = v.Sym().Pkg
+ } else if pkg != v.Sym().Pkg {
+ base.Fatalf("Closure variables from multiple packages")
+ }
+ }
+ }
+
fields := []*types.Field{
- types.NewField(base.Pos, Lookup(".F"), types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, pkg.Lookup(".F"), types.Types[types.TUINTPTR]),
}
for _, v := range clo.Func.ClosureVars {
typ := v.Type()
@@ -88,10 +125,10 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type {
return typ
}
-// PartialCallType returns the struct type used to hold all the information
-// needed in the closure for n (n must be a OCALLPART node).
-// The address of a variable of the returned type can be cast to a func.
-func PartialCallType(n *ir.SelectorExpr) *types.Type {
+// MethodValueType returns the struct type used to hold all the information
+// needed in the closure for a OMETHVALUE node. The address of a variable of
+// the returned type can be cast to a func.
+func MethodValueType(n *ir.SelectorExpr) *types.Type {
t := types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]),
types.NewField(base.Pos, Lookup("R"), n.X.Type()),
@@ -181,153 +218,38 @@ func fnpkg(fn *ir.Name) *types.Pkg {
return fn.Sym().Pkg
}
-// ClosureName generates a new unique name for a closure within
-// outerfunc.
-func ClosureName(outerfunc *ir.Func) *types.Sym {
- outer := "glob."
- prefix := "func"
- gen := &globClosgen
-
- if outerfunc != nil {
- if outerfunc.OClosure != nil {
- prefix = ""
- }
-
- outer = ir.FuncName(outerfunc)
-
- // There may be multiple functions named "_". In those
- // cases, we can't use their individual Closgens as it
- // would lead to name clashes.
- if !ir.IsBlank(outerfunc.Nname) {
- gen = &outerfunc.Closgen
- }
- }
-
- *gen++
- return Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
-}
-
-// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int32
-
-// MethodValueWrapper returns the DCLFUNC node representing the
-// wrapper function (*-fm) needed for the given method value. If the
-// wrapper function hasn't already been created yet, it's created and
-// added to Target.Decls.
-//
-// TODO(mdempsky): Move into walk. This isn't part of type checking.
-func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func {
- if dot.Op() != ir.OCALLPART {
- base.Fatalf("MethodValueWrapper: unexpected %v (%v)", dot, dot.Op())
- }
-
- t0 := dot.Type()
- meth := dot.Sel
- rcvrtype := dot.X.Type()
- sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
-
- if sym.Uniq() {
- return sym.Def.(*ir.Func)
- }
- sym.SetUniq(true)
-
- savecurfn := ir.CurFunc
- saveLineNo := base.Pos
- ir.CurFunc = nil
-
- // Set line number equal to the line number where the method is declared.
- if pos := dot.Selection.Pos; pos.IsKnown() {
- base.Pos = pos
- }
- // Note: !dot.Selection.Pos.IsKnown() happens for method expressions where
- // the method is implicitly declared. The Error method of the
- // built-in error type is one such method. We leave the line
- // number at the use of the method expression in this
- // case. See issue 29389.
-
- tfn := ir.NewFuncType(base.Pos, nil,
- NewFuncParams(t0.Params(), true),
- NewFuncParams(t0.Results(), false))
-
- fn := DeclFunc(sym, tfn)
- fn.SetDupok(true)
- fn.SetNeedctxt(true)
- fn.SetWrapper(true)
-
- // Declare and initialize variable holding receiver.
- ptr := ir.NewNameAt(base.Pos, Lookup(".this"))
- ptr.Class = ir.PAUTOHEAP
- ptr.SetType(rcvrtype)
- ptr.Curfn = fn
- ptr.SetIsClosureVar(true)
- ptr.SetByval(true)
- fn.ClosureVars = append(fn.ClosureVars, ptr)
-
- call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil)
- call.Args = ir.ParamNames(tfn.Type())
- call.IsDDD = tfn.Type().IsVariadic()
-
- var body ir.Node = call
- if t0.NumResults() != 0 {
- ret := ir.NewReturnStmt(base.Pos, nil)
- ret.Results = []ir.Node{call}
- body = ret
- }
-
- fn.Body = []ir.Node{body}
- FinishFuncBody()
-
- Func(fn)
- // Need to typecheck the body of the just-generated wrapper.
- // typecheckslice() requires that Curfn is set when processing an ORETURN.
- ir.CurFunc = fn
- Stmts(fn.Body)
- sym.Def = fn
- Target.Decls = append(Target.Decls, fn)
- ir.CurFunc = savecurfn
- base.Pos = saveLineNo
-
- return fn
-}
-
// tcClosure typechecks an OCLOSURE node. It also creates the named
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
-func tcClosure(clo *ir.ClosureExpr, top int) {
+func tcClosure(clo *ir.ClosureExpr, top int) ir.Node {
fn := clo.Func
+
+ // We used to allow IR builders to typecheck the underlying Func
+ // themselves, but that led to too much variety and inconsistency
+ // around who's responsible for naming the function, typechecking
+ // it, or adding it to Target.Decls.
+ //
+ // It's now all or nothing. Callers are still allowed to do these
+ // themselves, but then they assume responsibility for all of them.
+ if fn.Typecheck() == 1 {
+ base.FatalfAt(fn.Pos(), "underlying closure func already typechecked: %v", fn)
+ }
+
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
fn.Iota = x
}
- fn.SetClosureCalled(top&ctxCallee != 0)
-
- // Do not typecheck fn twice, otherwise, we will end up pushing
- // fn to Target.Decls multiple times, causing InitLSym called twice.
- // See #30709
- if fn.Typecheck() == 1 {
- clo.SetType(fn.Type())
- return
- }
-
- // Don't give a name and add to Target.Decls if we are typechecking an inlined
- // body in ImportedBody(), since we only want to create the named function
- // when the closure is actually inlined (and then we force a typecheck
- // explicitly in (*inlsubst).node()).
- if !inTypeCheckInl {
- fn.Nname.SetSym(ClosureName(ir.CurFunc))
- ir.MarkFunc(fn.Nname)
- }
+ ir.NameClosure(clo, ir.CurFunc)
Func(fn)
- clo.SetType(fn.Type())
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to Target.Decls.
- if ir.CurFunc != nil && clo.Type() != nil {
+ if ir.CurFunc != nil {
oldfn := ir.CurFunc
ir.CurFunc = fn
Stmts(fn.Body)
@@ -353,14 +275,17 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
}
fn.ClosureVars = fn.ClosureVars[:out]
- if base.Flag.W > 1 {
- s := fmt.Sprintf("New closure func: %s", ir.FuncName(fn))
- ir.Dump(s, fn)
- }
- if !inTypeCheckInl {
- // Add function to Target.Decls once only when we give it a name
- Target.Decls = append(Target.Decls, fn)
+ clo.SetType(fn.Type())
+
+ target := Target
+ if inTypeCheckInl {
+ // We're typechecking an imported function, so it's not actually
+ // part of Target. Skip adding it to Target.Decls so we don't
+ // compile it again.
+ target = nil
}
+
+ return ir.UseClosure(clo, target)
}
// type check function definition
@@ -390,10 +315,6 @@ func tcFunc(n *ir.Func) {
// tcCall typechecks an OCALL node.
func tcCall(n *ir.CallExpr, top int) ir.Node {
- n.Use = ir.CallUseExpr
- if top == ctxStmt {
- n.Use = ir.CallUseStmt
- }
Stmts(n.Init()) // imported rewritten f(g()) calls (#30907)
n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee)
if n.X.Diag() {
@@ -509,6 +430,7 @@ func tcCall(n *ir.CallExpr, top int) ir.Node {
}
typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) })
+ FixMethodCall(n)
if t.NumResults() == 0 {
return n
}
@@ -979,6 +901,21 @@ func tcRecover(n *ir.CallExpr) ir.Node {
return n
}
+// tcRecoverFP typechecks an ORECOVERFP node.
+func tcRecoverFP(n *ir.CallExpr) ir.Node {
+ if len(n.Args) != 1 {
+ base.FatalfAt(n.Pos(), "wrong number of arguments: %v", n)
+ }
+
+ n.Args[0] = Expr(n.Args[0])
+ if !n.Args[0].Type().IsPtrShaped() {
+ base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.Args[0])
+ }
+
+ n.SetType(types.Types[types.TINTER])
+ return n
+}
+
// tcUnsafeAdd typechecks an OUNSAFEADD node.
func tcUnsafeAdd(n *ir.BinaryExpr) *ir.BinaryExpr {
if !types.AllowsGoVersion(curpkg(), 1, 17) {
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index 64d68ef625..75b4931c31 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -173,6 +173,8 @@
// }
//
//
+// TODO(danscales): fill in doc for 'type TypeParamType' and 'type InstType'
+//
// type Signature struct {
// Params []Param
// Results []Param
@@ -202,7 +204,6 @@
package typecheck
import (
- "bufio"
"bytes"
"crypto/md5"
"encoding/binary"
@@ -221,9 +222,18 @@ import (
)
// Current indexed export format version. Increase with each format change.
-// 1: added column details to Pos
// 0: Go1.11 encoding
-const iexportVersion = 1
+// 1: added column details to Pos
+// 2: added information for generic function/types (currently unstable)
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ // TODO: before release, change this back to 2. Kept at previous version
+ // for now (for testing).
+ iexportVersionGenerics = iexportVersionPosCol
+
+ iexportVersionCurrent = iexportVersionGenerics
+)
// predeclReserved is the number of type offsets reserved for types
// implicitly declared in the universe block.
@@ -244,6 +254,9 @@ const (
signatureType
structType
interfaceType
+ typeParamType
+ instType
+ unionType
)
const (
@@ -251,13 +264,22 @@ const (
magic = 0x6742937dc293105
)
-func WriteExports(out *bufio.Writer) {
+// WriteExports writes the indexed export format to out. If extensions
+// is true, then the compiler-only extensions are included.
+func WriteExports(out io.Writer, extensions bool) {
+ if extensions {
+ // If we're exporting inline bodies, invoke the crawler to mark
+ // which bodies to include.
+ crawlExports(Target.Exports)
+ }
+
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
declIndex: map[*types.Sym]uint64{},
inlineIndex: map[*types.Sym]uint64{},
typIndex: map[*types.Type]uint64{},
+ extensions: extensions,
}
for i, pt := range predeclared() {
@@ -293,7 +315,7 @@ func WriteExports(out *bufio.Writer) {
// Assemble header.
var hdr intWriter
hdr.WriteByte('i')
- hdr.uint64(iexportVersion)
+ hdr.uint64(iexportVersionCurrent)
hdr.uint64(uint64(p.strings.Len()))
hdr.uint64(dataLen)
@@ -379,6 +401,8 @@ type iexporter struct {
declIndex map[*types.Sym]uint64
inlineIndex map[*types.Sym]uint64
typIndex map[*types.Type]uint64
+
+ extensions bool
}
// stringOff returns the offset of s within the string section.
@@ -449,7 +473,9 @@ func (p *iexporter) doDecl(n *ir.Name) {
w.tag('V')
w.pos(n.Pos())
w.typ(n.Type())
- w.varExt(n)
+ if w.p.extensions {
+ w.varExt(n)
+ }
case ir.PFUNC:
if ir.IsMethod(n) {
@@ -457,10 +483,25 @@ func (p *iexporter) doDecl(n *ir.Name) {
}
// Function.
- w.tag('F')
+ if n.Type().TParams().NumFields() == 0 {
+ w.tag('F')
+ } else {
+ w.tag('G')
+ }
w.pos(n.Pos())
+ // The tparam list of the function type is the
+ // declaration of the type params. So, write out the type
+ // params right now. Then those type params will be
+ // referenced via their type offset (via typOff) in all
+ // other places in the signature and function that they
+ // are used.
+ if n.Type().TParams().NumFields() > 0 {
+ w.tparamList(n.Type().TParams().FieldSlice())
+ }
w.signature(n.Type())
- w.funcExt(n)
+ if w.p.extensions {
+ w.funcExt(n)
+ }
default:
base.Fatalf("unexpected class: %v, %v", n, n.Class)
@@ -476,10 +517,25 @@ func (p *iexporter) doDecl(n *ir.Name) {
w.tag('C')
w.pos(n.Pos())
w.value(n.Type(), n.Val())
- w.constExt(n)
+ if w.p.extensions {
+ w.constExt(n)
+ }
case ir.OTYPE:
- if types.IsDotAlias(n.Sym()) {
+ if n.Type().IsTypeParam() && n.Type().Underlying() == n.Type() {
+ // Even though it has local scope, a typeparam requires a
+ // declaration via its package and unique name, because it
+ // may be referenced within its type bound during its own
+ // definition.
+ w.tag('P')
+ // A typeparam has a name, and has a type bound rather
+ // than an underlying type.
+ w.pos(n.Pos())
+ w.typ(n.Type().Bound())
+ break
+ }
+
+ if n.Alias() {
// Alias.
w.tag('A')
w.pos(n.Pos())
@@ -488,9 +544,18 @@ func (p *iexporter) doDecl(n *ir.Name) {
}
// Defined type.
- w.tag('T')
+ if len(n.Type().RParams()) == 0 {
+ w.tag('T')
+ } else {
+ w.tag('U')
+ }
w.pos(n.Pos())
+ if len(n.Type().RParams()) > 0 {
+ // Export type parameters, if any, needed for this type
+ w.typeList(n.Type().RParams())
+ }
+
underlying := n.Type().Underlying()
if underlying == types.ErrorType.Underlying() {
// For "type T error", use error as the
@@ -505,22 +570,29 @@ func (p *iexporter) doDecl(n *ir.Name) {
t := n.Type()
if t.IsInterface() {
- w.typeExt(t)
+ if w.p.extensions {
+ w.typeExt(t)
+ }
break
}
- ms := t.Methods()
- w.uint64(uint64(ms.Len()))
- for _, m := range ms.Slice() {
+ // Sort methods, for consistency with types2.
+ methods := append([]*types.Field(nil), t.Methods().Slice()...)
+ sort.Sort(types.MethodsByName(methods))
+
+ w.uint64(uint64(len(methods)))
+ for _, m := range methods {
w.pos(m.Pos)
w.selector(m.Sym)
w.param(m.Type.Recv())
w.signature(m.Type)
}
- w.typeExt(t)
- for _, m := range ms.Slice() {
- w.methExt(m)
+ if w.p.extensions {
+ w.typeExt(t)
+ for _, m := range methods {
+ w.methExt(m)
+ }
}
default:
@@ -803,8 +875,46 @@ func (w *exportWriter) startType(k itag) {
}
func (w *exportWriter) doTyp(t *types.Type) {
- if t.Sym() != nil {
- if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == ir.Pkgs.Unsafe {
+ s := t.Sym()
+ if s != nil && t.OrigSym != nil {
+ assert(base.Flag.G > 0)
+ // This is an instantiated type - could be a re-instantiation like
+ // Value[T2] or a full instantiation like Value[int].
+ if strings.Index(s.Name, "[") < 0 {
+ base.Fatalf("incorrect name for instantiated type")
+ }
+ w.startType(instType)
+ w.pos(t.Pos())
+ // Export the type arguments for the instantiated type. The
+ // instantiated type could be in a method header (e.g. "func (v
+ // *Value[T2]) set (...) { ... }"), so the type args are "new"
+ // typeparams. Or the instantiated type could be in a
+ // function/method body, so the type args are either concrete
+ // types or existing typeparams from the function/method header.
+ w.typeList(t.RParams())
+ // Export a reference to the base type.
+ baseType := t.OrigSym.Def.(*ir.Name).Type()
+ w.typ(baseType)
+ return
+ }
+
+ // The 't.Underlying() == t' check is to confirm this is a base typeparam
+ // type, rather than a defined type with typeparam underlying type, like:
+ // type orderedAbs[T any] T
+ if t.IsTypeParam() && t.Underlying() == t {
+ assert(base.Flag.G > 0)
+ if s.Pkg == types.BuiltinPkg || s.Pkg == ir.Pkgs.Unsafe {
+ base.Fatalf("builtin type missing from typIndex: %v", t)
+ }
+ // Write out the first use of a type param as a qualified ident.
+ // This will force a "declaration" of the type param.
+ w.startType(typeParamType)
+ w.qualifiedIdent(t.Obj().(*ir.Name))
+ return
+ }
+
+ if s != nil {
+ if s.Pkg == types.BuiltinPkg || s.Pkg == ir.Pkgs.Unsafe {
base.Fatalf("builtin type missing from typIndex: %v", t)
}
@@ -865,6 +975,12 @@ func (w *exportWriter) doTyp(t *types.Type) {
}
}
+ // Sort methods and embedded types, for consistency with types2.
+ // Note: embedded types may be anonymous, and types2 sorts them
+ // with sort.Stable too.
+ sort.Sort(types.MethodsByName(methods))
+ sort.Stable(types.EmbeddedsByName(embeddeds))
+
w.startType(interfaceType)
w.setPkg(t.Pkg(), true)
@@ -881,6 +997,19 @@ func (w *exportWriter) doTyp(t *types.Type) {
w.signature(f.Type)
}
+ case types.TUNION:
+ assert(base.Flag.G > 0)
+ // TODO(danscales): possibly put out the tilde bools in more
+ // compact form.
+ w.startType(unionType)
+ nt := t.NumTerms()
+ w.uint64(uint64(nt))
+ for i := 0; i < nt; i++ {
+ typ, tilde := t.Term(i)
+ w.bool(tilde)
+ w.typ(typ)
+ }
+
default:
base.Fatalf("unexpected type: %v", t)
}
@@ -906,6 +1035,23 @@ func (w *exportWriter) signature(t *types.Type) {
}
}
+func (w *exportWriter) typeList(ts []*types.Type) {
+ w.uint64(uint64(len(ts)))
+ for _, rparam := range ts {
+ w.typ(rparam)
+ }
+}
+
+func (w *exportWriter) tparamList(fs []*types.Field) {
+ w.uint64(uint64(len(fs)))
+ for _, f := range fs {
+ if !f.Type.IsTypeParam() {
+ base.Fatalf("unexpected non-typeparam")
+ }
+ w.typ(f.Type)
+ }
+}
+
func (w *exportWriter) paramList(fs []*types.Field) {
w.uint64(uint64(len(fs)))
for _, f := range fs {
@@ -948,26 +1094,50 @@ func constTypeOf(typ *types.Type) constant.Kind {
}
func (w *exportWriter) value(typ *types.Type, v constant.Value) {
- ir.AssertValidTypeForConst(typ, v)
w.typ(typ)
+ var kind constant.Kind
+ var valType *types.Type
+
+ if typ.IsTypeParam() {
+ // A constant will have a TYPEPARAM type if it appears in a place
+ // where it must match that typeparam type (e.g. in a binary
+ // operation with a variable of that typeparam type). If so, then
+ // we must write out its actual constant kind as well, so its
+ // constant val can be read in properly during import.
+ kind = v.Kind()
+ w.int64(int64(kind))
+
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ ir.AssertValidTypeForConst(typ, v)
+ kind = constTypeOf(typ)
+ valType = typ
+ }
- // Each type has only one admissible constant representation,
- // so we could type switch directly on v.U here. However,
- // switching on the type increases symmetry with import logic
- // and provides a useful consistency check.
+ // Each type has only one admissible constant representation, so we could
+ // type switch directly on v.Kind() here. However, switching on the type
+ // (in the non-typeparam case) increases symmetry with import logic and
+ // provides a useful consistency check.
- switch constTypeOf(typ) {
+ switch kind {
case constant.Bool:
w.bool(constant.BoolVal(v))
case constant.String:
w.string(constant.StringVal(v))
case constant.Int:
- w.mpint(v, typ)
+ w.mpint(v, valType)
case constant.Float:
- w.mpfloat(v, typ)
+ w.mpfloat(v, valType)
case constant.Complex:
- w.mpfloat(constant.Real(v), typ)
- w.mpfloat(constant.Imag(v), typ)
+ w.mpfloat(constant.Real(v), valType)
+ w.mpfloat(constant.Imag(v), valType)
}
}
@@ -1185,10 +1355,14 @@ func (w *exportWriter) funcExt(n *ir.Name) {
}
}
- // Inline body.
+ // Write out inline body or body of a generic function/method.
+ if n.Type().HasTParam() && n.Func.Body != nil && n.Func.Inl == nil {
+ base.FatalfAt(n.Pos(), "generic function is not marked inlineable")
+ }
if n.Func.Inl != nil {
w.uint64(1 + uint64(n.Func.Inl.Cost))
- if n.Func.ExportInline() {
+ w.bool(n.Func.Inl.CanDelayResults)
+ if n.Func.ExportInline() || n.Type().HasTParam() {
w.p.doInline(n)
}
@@ -1432,7 +1606,12 @@ func (w *exportWriter) commList(cases []*ir.CommClause) {
w.uint64(uint64(len(cases)))
for _, cas := range cases {
w.pos(cas.Pos())
- w.node(cas.Comm)
+ defaultCase := cas.Comm == nil
+ w.bool(defaultCase)
+ if !defaultCase {
+ // Only call w.node for non-default cause (cas.Comm is non-nil)
+ w.node(cas.Comm)
+ }
w.stmtList(cas.Body)
}
}
@@ -1460,7 +1639,9 @@ func (w *exportWriter) expr(n ir.Node) {
// (somewhat closely following the structure of exprfmt in fmt.go)
case ir.ONIL:
n := n.(*ir.NilExpr)
- if !n.Type().HasNil() {
+ // If n is a typeparam, it will have already been checked
+ // for proper use by the types2 typechecker.
+ if !n.Type().IsTypeParam() && !n.Type().HasNil() {
base.Fatalf("unexpected type for nil: %v", n.Type())
}
w.op(ir.ONIL)
@@ -1469,7 +1650,11 @@ func (w *exportWriter) expr(n ir.Node) {
case ir.OLITERAL:
w.op(ir.OLITERAL)
- w.pos(n.Pos())
+ if ir.HasUniquePos(n) {
+ w.pos(n.Pos())
+ } else {
+ w.pos(src.NoXPos)
+ }
w.value(n.Type(), n.Val())
case ir.ONAME:
@@ -1488,6 +1673,16 @@ func (w *exportWriter) expr(n ir.Node) {
// We don't need a type here, as the type will be provided at the
// declaration of n.
w.op(ir.ONAME)
+
+ // This handles the case where we haven't yet transformed a call
+ // to a builtin, so we must write out the builtin as a name in the
+ // builtin package.
+ isBuiltin := n.BuiltinOp != ir.OXXX
+ w.bool(isBuiltin)
+ if isBuiltin {
+ w.string(n.Sym().Name)
+ break
+ }
w.localName(n)
// case OPACK, ONONAME:
@@ -1585,12 +1780,11 @@ func (w *exportWriter) expr(n ir.Node) {
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
n := n.(*ir.SelectorExpr)
if go117ExportTypes {
- if n.Op() == ir.OXDOT {
- base.Fatalf("shouldn't encounter XDOT in new exporter")
- }
+ // For go117ExportTypes, we usually see all ops except
+ // OXDOT, but we can see OXDOT for generic functions.
w.op(n.Op())
} else {
w.op(ir.OXDOT)
@@ -1600,11 +1794,16 @@ func (w *exportWriter) expr(n ir.Node) {
w.exoticSelector(n.Sel)
if go117ExportTypes {
w.exoticType(n.Type())
- if n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR || n.Op() == ir.ODOTINTER {
+ if n.Op() == ir.OXDOT {
+ // n.Selection for method references will be
+ // reconstructed during import.
+ w.bool(n.Selection != nil)
+ } else if n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR || n.Op() == ir.ODOTINTER {
w.exoticField(n.Selection)
}
- // n.Selection is not required for OMETHEXPR, ODOTMETH, and OCALLPART. It will
- // be reconstructed during import.
+ // n.Selection is not required for OMETHEXPR, ODOTMETH, and OMETHVALUE. It will
+ // be reconstructed during import. n.Selection is computed during
+ // transformDot() for OXDOT.
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
@@ -1629,7 +1828,7 @@ func (w *exportWriter) expr(n ir.Node) {
w.expr(n.X)
w.expr(n.Index)
if go117ExportTypes {
- w.typ(n.Type())
+ w.exoticType(n.Type())
if n.Op() == ir.OINDEXMAP {
w.bool(n.Assigned)
}
@@ -1677,7 +1876,7 @@ func (w *exportWriter) expr(n ir.Node) {
w.op(ir.OEND)
}
- case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR:
+ case ir.OCONV, ir.OCONVIFACE, ir.OCONVIDATA, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR:
n := n.(*ir.ConvExpr)
if go117ExportTypes {
w.op(n.Op())
@@ -1732,7 +1931,6 @@ func (w *exportWriter) expr(n ir.Node) {
w.bool(n.IsDDD)
if go117ExportTypes {
w.exoticType(n.Type())
- w.uint64(uint64(n.Use))
}
case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
@@ -1759,8 +1957,16 @@ func (w *exportWriter) expr(n ir.Node) {
w.op(ir.OEND)
}
+ case ir.OLINKSYMOFFSET:
+ n := n.(*ir.LinksymOffsetExpr)
+ w.op(ir.OLINKSYMOFFSET)
+ w.pos(n.Pos())
+ w.string(n.Linksym.Name)
+ w.uint64(uint64(n.Offset_))
+ w.typ(n.Type())
+
// unary expressions
- case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV, ir.OIDATA:
n := n.(*ir.UnaryExpr)
w.op(n.Op())
w.pos(n.Pos())
@@ -1796,7 +2002,7 @@ func (w *exportWriter) expr(n ir.Node) {
// binary expressions
case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
- ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR, ir.OEFACE:
n := n.(*ir.BinaryExpr)
w.op(n.Op())
w.pos(n.Pos())
@@ -1829,6 +2035,26 @@ func (w *exportWriter) expr(n ir.Node) {
// if exporting, DCLCONST should just be removed as its usage
// has already been replaced with literals
+ case ir.OFUNCINST:
+ n := n.(*ir.InstExpr)
+ w.op(ir.OFUNCINST)
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.uint64(uint64(len(n.Targs)))
+ for _, targ := range n.Targs {
+ w.typ(targ.Type())
+ }
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ w.op(ir.OSELRECV2)
+ w.pos(n.Pos())
+ w.exprList(n.Lhs)
+ w.exprList(n.Rhs)
+
default:
base.Fatalf("cannot export %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
@@ -1864,11 +2090,8 @@ func (w *exportWriter) fieldList(list ir.Nodes) {
for _, n := range list {
n := n.(*ir.StructKeyExpr)
w.pos(n.Pos())
- w.selector(n.Field)
+ w.exoticField(n.Field)
w.expr(n.Value)
- if go117ExportTypes {
- w.uint64(uint64(n.Offset))
- }
}
}
@@ -1902,8 +2125,15 @@ func (w *exportWriter) localIdent(s *types.Sym) {
return
}
- // TODO(mdempsky): Fix autotmp hack.
- if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
+ // The name of autotmp variables isn't important; they just need to
+ // be unique. To stabilize the export data, simply write out "$" as
+ // a marker and let the importer generate its own unique name.
+ if strings.HasPrefix(name, ".autotmp_") {
+ w.string("$autotmp")
+ return
+ }
+
+ if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".dict") { // TODO: just use autotmp names for dictionaries?
base.Fatalf("unexpected dot in identifier: %v", name)
}
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
index 37f5a7bba0..2e3fdbc1bc 100644
--- a/src/cmd/compile/internal/typecheck/iimport.go
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -8,10 +8,10 @@
package typecheck
import (
+ "bytes"
"encoding/binary"
"fmt"
"go/constant"
- "io"
"math/big"
"os"
"strings"
@@ -19,8 +19,6 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
- "cmd/internal/bio"
- "cmd/internal/goobj"
"cmd/internal/obj"
"cmd/internal/src"
)
@@ -94,7 +92,7 @@ func importReaderFor(sym *types.Sym, importers map[*types.Sym]iimporterAndOffset
}
type intReader struct {
- *bio.Reader
+ *strings.Reader
pkg *types.Pkg
}
@@ -116,33 +114,34 @@ func (r *intReader) uint64() uint64 {
return i
}
-func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
- ird := &intReader{in, pkg}
+func ReadImports(pkg *types.Pkg, data string) {
+ ird := &intReader{strings.NewReader(data), pkg}
version := ird.uint64()
- if version != iexportVersion {
- base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ switch version {
+ case /* iexportVersionGenerics, */ iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGenerics {
+ base.Errorf("import %q: unstable export format version %d, just recompile", pkg.Path, version)
+ } else {
+ base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ }
base.ErrorExit()
}
- sLen := ird.uint64()
- dLen := ird.uint64()
-
- // Map string (and data) section into memory as a single large
- // string. This reduces heap fragmentation and allows
- // returning individual substrings very efficiently.
- data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
- if err != nil {
- base.Errorf("import %q: mapping input: %v", pkg.Path, err)
- base.ErrorExit()
- }
- stringData := data[:sLen]
- declData := data[sLen:]
+ sLen := int64(ird.uint64())
+ dLen := int64(ird.uint64())
- in.MustSeek(int64(sLen+dLen), os.SEEK_CUR)
+ // TODO(mdempsky): Replace os.SEEK_CUR with io.SeekCurrent after
+ // #44505 is fixed.
+ whence, _ := ird.Seek(0, os.SEEK_CUR)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ ird.Seek(sLen+dLen, os.SEEK_CUR)
p := &iimporter{
- ipkg: pkg,
+ exportVersion: version,
+ ipkg: pkg,
pkgCache: map[uint64]*types.Pkg{},
posBaseCache: map[uint64]*src.PosBase{},
@@ -200,18 +199,11 @@ func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintT
}
}
}
-
- // Fingerprint.
- _, err = io.ReadFull(in, fingerprint[:])
- if err != nil {
- base.Errorf("import %s: error reading fingerprint", pkg.Path)
- base.ErrorExit()
- }
- return fingerprint
}
type iimporter struct {
- ipkg *types.Pkg
+ exportVersion uint64
+ ipkg *types.Pkg
pkgCache map[uint64]*types.Pkg
posBaseCache map[uint64]*src.PosBase
@@ -273,6 +265,7 @@ type importReader struct {
// Slice of all dcls for function, including any interior closures
allDcls []*ir.Name
allClosureVars []*ir.Name
+ autotmpgen int
}
func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader {
@@ -302,37 +295,53 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
case 'A':
typ := r.typ()
- return importalias(r.p.ipkg, pos, sym, typ)
+ return importalias(pos, sym, typ)
case 'C':
typ := r.typ()
val := r.value(typ)
- n := importconst(r.p.ipkg, pos, sym, typ, val)
+ n := importconst(pos, sym, typ, val)
r.constExt(n)
return n
- case 'F':
- typ := r.signature(nil)
+ case 'F', 'G':
+ var tparams []*types.Field
+ if tag == 'G' {
+ tparams = r.tparamList()
+ }
+ typ := r.signature(nil, tparams)
- n := importfunc(r.p.ipkg, pos, sym, typ)
+ n := importfunc(pos, sym, typ)
r.funcExt(n)
return n
- case 'T':
+ case 'T', 'U':
+ var rparams []*types.Type
+ if tag == 'U' {
+ rparams = r.typeList()
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
- n := importtype(r.p.ipkg, pos, sym)
+ n := importtype(pos, sym)
t := n.Type()
+ if tag == 'U' {
+ t.SetRParams(rparams)
+ }
// We also need to defer width calculations until
// after the underlying type has been assigned.
types.DeferCheckSize()
+ deferDoInst()
underlying := r.typ()
t.SetUnderlying(underlying)
- types.ResumeCheckSize()
if underlying.IsInterface() {
+ // Finish up all type instantiations and CheckSize calls
+ // now that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
r.typeExt(t)
return n
}
@@ -342,7 +351,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
mpos := r.pos()
msym := r.selector()
recv := r.param()
- mtyp := r.signature(recv)
+ mtyp := r.signature(recv, nil)
// MethodSym already marked m.Sym as a function.
m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym))
@@ -358,16 +367,43 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
}
t.Methods().Set(ms)
+ // Finish up all instantiations and CheckSize calls now
+ // that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
+
r.typeExt(t)
for _, m := range ms {
r.methExt(m)
}
return n
+ case 'P':
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.(*ir.Name)
+ }
+ // The typeparam index is set at the point where the containing type
+ // param list is imported.
+ t := types.NewTypeParam(sym, 0)
+ // Nname needed to save the pos.
+ nname := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(t)
+ t.SetNod(nname)
+
+ t.SetBound(r.typ())
+ return nname
+
case 'V':
typ := r.typ()
- n := importvar(r.p.ipkg, pos, sym, typ)
+ n := importvar(pos, sym, typ)
r.varExt(n)
return n
@@ -377,27 +413,47 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
}
}
-func (p *importReader) value(typ *types.Type) constant.Value {
- switch constTypeOf(typ) {
+func (r *importReader) value(typ *types.Type) constant.Value {
+ var kind constant.Kind
+ var valType *types.Type
+
+ if typ.IsTypeParam() {
+ // If a constant had a typeparam type, then we wrote out its
+ // actual constant kind as well.
+ kind = constant.Kind(r.int64())
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ kind = constTypeOf(typ)
+ valType = typ
+ }
+
+ switch kind {
case constant.Bool:
- return constant.MakeBool(p.bool())
+ return constant.MakeBool(r.bool())
case constant.String:
- return constant.MakeString(p.string())
+ return constant.MakeString(r.string())
case constant.Int:
var i big.Int
- p.mpint(&i, typ)
+ r.mpint(&i, valType)
return constant.Make(&i)
case constant.Float:
- return p.float(typ)
+ return r.float(valType)
case constant.Complex:
- return makeComplex(p.float(typ), p.float(typ))
+ return makeComplex(r.float(valType), r.float(valType))
}
base.Fatalf("unexpected value type: %v", typ)
panic("unreachable")
}
-func (p *importReader) mpint(x *big.Int, typ *types.Type) {
+func (r *importReader) mpint(x *big.Int, typ *types.Type) {
signed, maxBytes := intSize(typ)
maxSmall := 256 - maxBytes
@@ -408,7 +464,7 @@ func (p *importReader) mpint(x *big.Int, typ *types.Type) {
maxSmall = 256
}
- n, _ := p.ReadByte()
+ n, _ := r.ReadByte()
if uint(n) < maxSmall {
v := int64(n)
if signed {
@@ -429,30 +485,30 @@ func (p *importReader) mpint(x *big.Int, typ *types.Type) {
base.Fatalf("weird decoding: %v, %v => %v", n, signed, v)
}
b := make([]byte, v)
- p.Read(b)
+ r.Read(b)
x.SetBytes(b)
if signed && n&1 != 0 {
x.Neg(x)
}
}
-func (p *importReader) float(typ *types.Type) constant.Value {
+func (r *importReader) float(typ *types.Type) constant.Value {
var mant big.Int
- p.mpint(&mant, typ)
+ r.mpint(&mant, typ)
var f big.Float
f.SetInt(&mant)
if f.Sign() != 0 {
- f.SetMantExp(&f, int(p.int64()))
+ f.SetMantExp(&f, int(r.int64()))
}
return constant.Make(&f)
}
-func (p *importReader) mprat(orig constant.Value) constant.Value {
- if !p.bool() {
+func (r *importReader) mprat(orig constant.Value) constant.Value {
+ if !r.bool() {
return orig
}
var rat big.Rat
- rat.SetString(p.string())
+ rat.SetString(r.string())
return constant.Make(&rat)
}
@@ -462,8 +518,15 @@ func (r *importReader) ident(selector bool) *types.Sym {
return nil
}
pkg := r.currPkg
- if selector && types.IsExported(name) {
- pkg = types.LocalPkg
+ if selector {
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ } else {
+ if name == "$autotmp" {
+ name = autotmpname(r.autotmpgen)
+ r.autotmpgen++
+ }
}
return pkg.Lookup(name)
}
@@ -503,7 +566,14 @@ func (r *importReader) pos() src.XPos {
}
func (r *importReader) typ() *types.Type {
- return r.p.typAt(r.uint64())
+ // If this is a top-level type call, defer type instantiations until the
+ // type is fully constructed.
+ types.DeferCheckSize()
+ deferDoInst()
+ t := r.p.typAt(r.uint64())
+ resumeDoInst()
+ types.ResumeCheckSize()
+ return t
}
func (r *importReader) exoticType() *types.Type {
@@ -641,7 +711,13 @@ func (p *iimporter) typAt(off uint64) *types.Type {
// are pushed to compile queue, then draining from the queue for compiling.
// During this process, the size calculation is disabled, so it is not safe for
// calculating size during SSA generation anymore. See issue #44732.
- types.CheckSize(t)
+ //
+ // No need to calc sizes for re-instantiated generic types, and
+ // they are not necessarily resolved until the top-level type is
+ // defined (because of recursive types).
+ if t.OrigSym == nil || !t.HasTParam() {
+ types.CheckSize(t)
+ }
p.typCache[off] = t
}
return t
@@ -680,7 +756,7 @@ func (r *importReader) typ1() *types.Type {
case signatureType:
r.setPkg()
- return r.signature(nil)
+ return r.signature(nil, nil)
case structType:
r.setPkg()
@@ -718,16 +794,64 @@ func (r *importReader) typ1() *types.Type {
for i := range methods {
pos := r.pos()
sym := r.selector()
- typ := r.signature(fakeRecvField())
+ typ := r.signature(fakeRecvField(), nil)
methods[i] = types.NewField(pos, sym, typ)
}
+ if len(embeddeds)+len(methods) == 0 {
+ return types.Types[types.TINTER]
+ }
+
t := types.NewInterface(r.currPkg, append(embeddeds, methods...))
// Ensure we expand the interface in the frontend (#25055).
types.CheckSize(t)
return t
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ // Similar to code for defined types, since we "declared"
+ // typeparams to deal with recursion (typeparam is used within its
+ // own type bound).
+ ident := r.qualifiedIdent()
+ if ident.Sym().Def != nil {
+ return ident.Sym().Def.(*ir.Name).Type()
+ }
+ n := expandDecl(ident)
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
+ }
+ return n.Type()
+
+ case instType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ pos := r.pos()
+ len := r.uint64()
+ targs := make([]*types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ t := Instantiate(pos, baseType, targs)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ nt := int(r.uint64())
+ terms := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range terms {
+ tildes[i] = r.bool()
+ terms[i] = r.typ()
+ }
+ return types.NewUnion(terms, tildes)
}
}
@@ -735,13 +859,42 @@ func (r *importReader) kind() itag {
return itag(r.uint64())
}
-func (r *importReader) signature(recv *types.Field) *types.Type {
+func (r *importReader) signature(recv *types.Field, tparams []*types.Field) *types.Type {
params := r.paramList()
results := r.paramList()
if n := len(params); n > 0 {
params[n-1].SetIsDDD(r.bool())
}
- return types.NewSignature(r.currPkg, recv, nil, params, results)
+ return types.NewSignature(r.currPkg, recv, tparams, params, results)
+}
+
+func (r *importReader) typeList() []*types.Type {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ ts := make([]*types.Type, n)
+ for i := range ts {
+ ts[i] = r.typ()
+ if ts[i].IsTypeParam() {
+ ts[i].SetIndex(i)
+ }
+ }
+ return ts
+}
+
+func (r *importReader) tparamList() []*types.Field {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ fs := make([]*types.Field, n)
+ for i := range fs {
+ typ := r.typ()
+ typ.SetIndex(i)
+ fs[i] = types.NewField(typ.Pos(), typ.Sym(), typ)
+ }
+ return fs
}
func (r *importReader) paramList() []*types.Field {
@@ -809,7 +962,9 @@ func (r *importReader) funcExt(n *ir.Name) {
n.Func.ABI = obj.ABI(r.uint64())
- n.SetPragma(ir.PragmaFlag(r.uint64()))
+ // Make sure //go:noinline pragma is imported (so stenciled functions have
+ // same noinline status as the corresponding generic function.)
+ n.Func.Pragma = ir.PragmaFlag(r.uint64())
// Escape analysis.
for _, fs := range &types.RecvsParams {
@@ -821,7 +976,8 @@ func (r *importReader) funcExt(n *ir.Name) {
// Inline body.
if u := r.uint64(); u > 0 {
n.Func.Inl = &ir.Inline{
- Cost: int32(u - 1),
+ Cost: int32(u - 1),
+ CanDelayResults: r.bool(),
}
n.Func.Endlineno = r.pos()
}
@@ -852,7 +1008,13 @@ func (r *importReader) symIdx(s *types.Sym) {
func (r *importReader) typeExt(t *types.Type) {
t.SetNotInHeap(r.bool())
- i, pi := r.int64(), r.int64()
+ SetBaseTypeIndex(t, r.int64(), r.int64())
+}
+
+func SetBaseTypeIndex(t *types.Type, i, pi int64) {
+ if t.Obj() == nil {
+ base.Fatalf("SetBaseTypeIndex on non-defined type %v", t)
+ }
if i != -1 && pi != -1 {
typeSymIdx[t] = [2]int64{i, pi}
}
@@ -860,6 +1022,7 @@ func (r *importReader) typeExt(t *types.Type) {
// Map imported type T to the index of type descriptor symbols of T and *T,
// so we can use index to reference the symbol.
+// TODO(mdempsky): Store this information directly in the Type's Name.
var typeSymIdx = make(map[*types.Type][2]int64)
func BaseTypeIndex(t *types.Type) int64 {
@@ -936,6 +1099,10 @@ func (r *importReader) funcBody(fn *ir.Func) {
fn.Inl.Body = body
r.curfn = outerfn
+ if base.Flag.W >= 3 {
+ fmt.Printf("Imported for %v", fn)
+ ir.DumpList("", fn.Inl.Body)
+ }
}
func (r *importReader) readNames(fn *ir.Func) []*ir.Name {
@@ -1032,7 +1199,13 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause {
func (r *importReader) commList() []*ir.CommClause {
cases := make([]*ir.CommClause, r.uint64())
for i := range cases {
- cases[i] = ir.NewCommStmt(r.pos(), r.node(), r.stmtList())
+ pos := r.pos()
+ defaultCase := r.bool()
+ var comm ir.Node
+ if !defaultCase {
+ comm = r.node()
+ }
+ cases[i] = ir.NewCommStmt(pos, comm, r.stmtList())
}
return cases
}
@@ -1095,6 +1268,10 @@ func (r *importReader) node() ir.Node {
return n
case ir.ONAME:
+ isBuiltin := r.bool()
+ if isBuiltin {
+ return types.BuiltinPkg.Lookup(r.string()).Def.(*ir.Name)
+ }
return r.localName()
// case OPACK, ONONAME:
@@ -1117,28 +1294,18 @@ func (r *importReader) node() ir.Node {
case ir.OCLOSURE:
//println("Importing CLOSURE")
pos := r.pos()
- typ := r.signature(nil)
+ typ := r.signature(nil, nil)
// All the remaining code below is similar to (*noder).funcLit(), but
// with Dcls and ClosureVars lists already set up
- fn := ir.NewFunc(pos)
- fn.SetIsHiddenClosure(true)
- fn.Nname = ir.NewNameAt(pos, ir.BlankNode.Sym())
- fn.Nname.Func = fn
- fn.Nname.Ntype = ir.TypeNode(typ)
- fn.Nname.Defn = fn
+ fn := ir.NewClosureFunc(pos, true)
fn.Nname.SetType(typ)
cvars := make([]*ir.Name, r.int64())
for i := range cvars {
cvars[i] = ir.CaptureName(r.pos(), fn, r.localName().Canonical())
- if go117ExportTypes {
- if cvars[i].Type() != nil || cvars[i].Defn == nil {
- base.Fatalf("bad import of closure variable")
- }
- // Closure variable should have Defn set, which is its captured
- // variable, and it gets the same type as the captured variable.
- cvars[i].SetType(cvars[i].Defn.Type())
+ if go117ExportTypes && cvars[i].Defn == nil {
+ base.Fatalf("bad import of closure variable")
}
}
fn.ClosureVars = cvars
@@ -1159,12 +1326,10 @@ func (r *importReader) node() ir.Node {
ir.FinishCaptureNames(pos, r.curfn, fn)
- clo := ir.NewClosureExpr(pos, fn)
- fn.OClosure = clo
+ clo := fn.OClosure
if go117ExportTypes {
clo.SetType(typ)
}
-
return clo
case ir.OSTRUCTLIT:
@@ -1202,35 +1367,54 @@ func (r *importReader) node() ir.Node {
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- case ir.OXDOT:
- // see parser.new_dotname
- if go117ExportTypes {
- base.Fatalf("shouldn't encounter XDOT in new importer")
- }
- return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.exoticSelector())
-
- case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
- if !go117ExportTypes {
- // unreachable - mapped to case OXDOT by exporter
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
+ // For !go117ExportTypes, we should only see OXDOT.
+ // For go117ExportTypes, we usually see all the other ops, but can see
+ // OXDOT for generic functions.
+ if op != ir.OXDOT && !go117ExportTypes {
goto error
}
pos := r.pos()
expr := r.expr()
sel := r.exoticSelector()
n := ir.NewSelectorExpr(pos, op, expr, sel)
- n.SetType(r.exoticType())
- switch op {
- case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER:
- n.Selection = r.exoticField()
- case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
- // These require a Lookup to link to the correct declaration.
- rcvrType := expr.Type()
- typ := n.Type()
- n.Selection = Lookdot(n, rcvrType, 1)
- if op == ir.OCALLPART || op == ir.OMETHEXPR {
- // Lookdot clobbers the opcode and type, undo that.
- n.SetOp(op)
- n.SetType(typ)
+ if go117ExportTypes {
+ n.SetType(r.exoticType())
+ switch op {
+ case ir.OXDOT:
+ hasSelection := r.bool()
+ // We reconstruct n.Selection for method calls on
+ // generic types and method calls due to type param
+ // bounds. Otherwise, n.Selection is nil.
+ if hasSelection {
+ n1 := ir.NewSelectorExpr(pos, op, expr, sel)
+ AddImplicitDots(n1)
+ var m *types.Field
+ if n1.X.Type().IsTypeParam() {
+ genType := n1.X.Type().Bound()
+ m = Lookdot1(n1, sel, genType, genType.AllMethods(), 1)
+ } else {
+ genType := types.ReceiverBaseType(n1.X.Type())
+ if genType.IsInstantiatedGeneric() {
+ genType = genType.OrigSym.Def.Type()
+ }
+ m = Lookdot1(n1, sel, genType, genType.Methods(), 1)
+ }
+ assert(m != nil)
+ n.Selection = m
+ }
+ case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER:
+ n.Selection = r.exoticField()
+ case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
+ // These require a Lookup to link to the correct declaration.
+ rcvrType := expr.Type()
+ typ := n.Type()
+ n.Selection = Lookdot(n, rcvrType, 1)
+ if op == ir.OMETHVALUE || op == ir.OMETHEXPR {
+ // Lookdot clobbers the opcode and type, undo that.
+ n.SetOp(op)
+ n.SetType(typ)
+ }
}
}
return n
@@ -1247,7 +1431,7 @@ func (r *importReader) node() ir.Node {
n := ir.NewIndexExpr(r.pos(), r.expr(), r.expr())
if go117ExportTypes {
n.SetOp(op)
- n.SetType(r.typ())
+ n.SetType(r.exoticType())
if op == ir.OINDEXMAP {
n.Assigned = r.bool()
}
@@ -1267,7 +1451,7 @@ func (r *importReader) node() ir.Node {
}
return n
- case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR:
+ case ir.OCONV, ir.OCONVIFACE, ir.OCONVIDATA, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR:
if !go117ExportTypes && op != ir.OCONV {
// unreachable - mapped to OCONV case by exporter
goto error
@@ -1318,7 +1502,6 @@ func (r *importReader) node() ir.Node {
n.IsDDD = r.bool()
if go117ExportTypes {
n.SetType(r.exoticType())
- n.Use = ir.CallUse(r.uint64())
}
return n
@@ -1343,8 +1526,15 @@ func (r *importReader) node() ir.Node {
n.Args.Append(r.exprList()...)
return n
+ case ir.OLINKSYMOFFSET:
+ pos := r.pos()
+ name := r.string()
+ off := r.uint64()
+ typ := r.typ()
+ return ir.NewLinksymOffsetExpr(pos, Lookup(name).Linksym(), int64(off), typ)
+
// unary expressions
- case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV, ir.OIDATA:
n := ir.NewUnaryExpr(r.pos(), op, r.expr())
if go117ExportTypes {
n.SetType(r.typ())
@@ -1368,7 +1558,7 @@ func (r *importReader) node() ir.Node {
// binary expressions
case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
- ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR:
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR, ir.OEFACE:
n := ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr())
if go117ExportTypes {
n.SetType(r.typ())
@@ -1496,6 +1686,26 @@ func (r *importReader) node() ir.Node {
case ir.OEND:
return nil
+ case ir.OFUNCINST:
+ pos := r.pos()
+ x := r.expr()
+ ntargs := r.uint64()
+ var targs []ir.Node
+ if ntargs > 0 {
+ targs = make([]ir.Node, ntargs)
+ for i := range targs {
+ targs[i] = ir.TypeNode(r.typ())
+ }
+ }
+ n := ir.NewInstExpr(pos, ir.OFUNCINST, x, targs)
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OSELRECV2:
+ return ir.NewAssignListStmt(r.pos(), ir.OSELRECV2, r.exprList(), r.exprList())
+
default:
base.Fatalf("cannot import %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", op, int(op))
@@ -1517,11 +1727,7 @@ func (r *importReader) op() ir.Op {
func (r *importReader) fieldList() []ir.Node {
list := make([]ir.Node, r.uint64())
for i := range list {
- x := ir.NewStructKeyExpr(r.pos(), r.selector(), r.expr())
- if go117ExportTypes {
- x.Offset = int64(r.uint64())
- }
- list[i] = x
+ list[i] = ir.NewStructKeyExpr(r.pos(), r.exoticField(), r.expr())
}
return list
}
@@ -1544,3 +1750,146 @@ func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr {
}
return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}
+
+// InstTypeName creates a name for an instantiated type, based on the name of the
+// generic type and the type args.
+func InstTypeName(name string, targs []*types.Type) string {
+ b := bytes.NewBufferString(name)
+ b.WriteByte('[')
+ for i, targ := range targs {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ // WriteString() does not include the package name for the local
+ // package, but we want it to make sure type arguments (including
+ // type params) are uniquely specified.
+ if targ.Sym() != nil && targ.Sym().Pkg == types.LocalPkg {
+ b.WriteString(targ.Sym().Pkg.Name)
+ b.WriteByte('.')
+ }
+ // types1 uses "interface {" and types2 uses "interface{" - convert
+ // to consistent types2 format.
+ tstring := targ.String()
+ tstring = strings.Replace(tstring, "interface {", "interface{", -1)
+ b.WriteString(tstring)
+ }
+ b.WriteByte(']')
+ return b.String()
+}
+
+// NewIncompleteNamedType returns a TFORW type t with name specified by sym, such
+// that t.nod and sym.Def are set correctly.
+func NewIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type {
+ name := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ forw := types.NewNamed(name)
+ name.SetType(forw)
+ sym.Def = name
+ return forw
+}
+
+// Instantiate creates a new named type which is the instantiation of the base
+// named generic type, with the specified type args.
+func Instantiate(pos src.XPos, baseType *types.Type, targs []*types.Type) *types.Type {
+ baseSym := baseType.Sym()
+ if strings.Index(baseSym.Name, "[") >= 0 {
+ base.Fatalf("arg to Instantiate is not a base generic type")
+ }
+ name := InstTypeName(baseSym.Name, targs)
+ instSym := baseSym.Pkg.Lookup(name)
+ if instSym.Def != nil {
+ // May match existing type from previous import or
+ // types2-to-types1 conversion, or from in-progress instantiation
+ // in the current type import stack.
+ return instSym.Def.Type()
+ }
+
+ t := NewIncompleteNamedType(baseType.Pos(), instSym)
+ t.SetRParams(targs)
+ t.OrigSym = baseSym
+
+ // baseType may still be TFORW or its methods may not be fully filled in
+ // (since we are in the middle of importing it). So, delay call to
+ // substInstType until we get back up to the top of the current top-most
+ // type import.
+ deferredInstStack = append(deferredInstStack, t)
+
+ return t
+}
+
+var deferredInstStack []*types.Type
+var deferInst int
+
+// deferDoInst defers substitution on instantiated types until we are at the
+// top-most defined type, so the base types are fully defined.
+func deferDoInst() {
+ deferInst++
+}
+
+func resumeDoInst() {
+ if deferInst == 1 {
+ for len(deferredInstStack) > 0 {
+ t := deferredInstStack[0]
+ deferredInstStack = deferredInstStack[1:]
+ substInstType(t, t.OrigSym.Def.(*ir.Name).Type(), t.RParams())
+ }
+ }
+ deferInst--
+}
+
+// doInst creates a new instantiation type (which will be added to
+// deferredInstStack for completion later) for an incomplete type encountered
+// during a type substitution for an instantiation. This is needed for
+// instantiations of mutually recursive types.
+func doInst(t *types.Type) *types.Type {
+ return Instantiate(t.Pos(), t.OrigSym.Def.(*ir.Name).Type(), t.RParams())
+}
+
+// substInstType completes the instantiation of a generic type by doing a
+// substitution on the underlying type itself and any methods. t is the
+// instantiation being created, baseType is the base generic type, and targs are
+// the type arguments that baseType is being instantiated with.
+func substInstType(t *types.Type, baseType *types.Type, targs []*types.Type) {
+ subst := Tsubster{
+ Tparams: baseType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t.SetUnderlying(subst.Typ(baseType.Underlying()))
+
+ newfields := make([]*types.Field, baseType.Methods().Len())
+ for i, f := range baseType.Methods().Slice() {
+ if !f.IsMethod() || types.IsInterfaceMethod(f.Type) {
+ // Do a normal substitution if this is a non-method (which
+ // means this must be an interface used as a constraint) or
+ // an interface method.
+ t2 := subst.Typ(f.Type)
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ continue
+ }
+ recvType := f.Type.Recv().Type
+ if recvType.IsPtr() {
+ recvType = recvType.Elem()
+ }
+ // Substitute in the method using the type params used in the
+ // method (not the type params in the definition of the generic type).
+ msubst := Tsubster{
+ Tparams: recvType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t2 := msubst.Typ(f.Type)
+ oldsym := f.Nname.Sym()
+ newsym := MakeInstName(oldsym, targs, true)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ t.Methods().Set(newfields)
+}
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index 922a01bfbe..c322d490e5 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -172,6 +172,10 @@ assignOK:
r := r.(*ir.TypeAssertExpr)
stmt.SetOp(ir.OAS2DOTTYPE)
r.SetOp(ir.ODOTTYPE2)
+ case ir.ODYNAMICDOTTYPE:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODYNAMICDOTTYPE2)
default:
break assignOK
}
@@ -201,7 +205,6 @@ assignOK:
stmt := stmt.(*ir.AssignListStmt)
stmt.SetOp(ir.OAS2FUNC)
r := rhs[0].(*ir.CallExpr)
- r.Use = ir.CallUseList
rtyp := r.Type()
mismatched := false
@@ -217,7 +220,7 @@ assignOK:
}
}
if mismatched && !failed {
- rewriteMultiValueCall(stmt, r)
+ RewriteMultiValueCall(stmt, r)
}
return
}
@@ -237,6 +240,15 @@ func plural(n int) string {
return "s"
}
+// tcCheckNil typechecks an OCHECKNIL node.
+func tcCheckNil(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ if !n.X.Type().IsPtrShaped() {
+ base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.X)
+ }
+ return n
+}
+
// tcFor typechecks an OFOR node.
func tcFor(n *ir.ForStmt) ir.Node {
Stmts(n.Init())
@@ -653,29 +665,18 @@ func tcSwitchType(n *ir.SwitchStmt) {
}
type typeSet struct {
- m map[string][]typeSetEntry
-}
-
-type typeSetEntry struct {
- pos src.XPos
- typ *types.Type
+ m map[string]src.XPos
}
func (s *typeSet) add(pos src.XPos, typ *types.Type) {
if s.m == nil {
- s.m = make(map[string][]typeSetEntry)
+ s.m = make(map[string]src.XPos)
}
- // LongString does not uniquely identify types, so we need to
- // disambiguate collisions with types.Identical.
- // TODO(mdempsky): Add a method that *is* unique.
- ls := typ.LongString()
- prevs := s.m[ls]
- for _, prev := range prevs {
- if types.Identical(typ, prev.typ) {
- base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
- return
- }
+ ls := typ.LinkString()
+ if prev, ok := s.m[ls]; ok {
+ base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev))
+ return
}
- s.m[ls] = append(prevs, typeSetEntry{pos, typ})
+ s.m[ls] = pos
}
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index 9ee7a94b1f..e86c4c6bca 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -5,6 +5,7 @@
package typecheck
import (
+ "bytes"
"fmt"
"sort"
"strconv"
@@ -352,9 +353,10 @@ func Assignop(src, dst *types.Type) (ir.Op, string) {
return ir.OCONVNOP, ""
}
- // 2. src and dst have identical underlying types
- // and either src or dst is not a named type or
- // both are empty interface types.
+ // 2. src and dst have identical underlying types and
+ // a. either src or dst is not a named type, or
+ // b. both are empty interface types, or
+ // c. at least one is a gcshape type.
// For assignable but different non-empty interface types,
// we want to recompute the itab. Recomputing the itab ensures
// that itabs are unique (thus an interface with a compile-time
@@ -371,21 +373,24 @@ func Assignop(src, dst *types.Type) (ir.Op, string) {
// which need to have their itab updated.
return ir.OCONVNOP, ""
}
+ if src.IsShape() || dst.IsShape() {
+ // Conversion between a shape type and one of the types
+ // it represents also needs no conversion.
+ return ir.OCONVNOP, ""
+ }
}
// 3. dst is an interface type and src implements dst.
if dst.IsInterface() && src.Kind() != types.TNIL {
var missing, have *types.Field
var ptr int
+ if src.IsShape() {
+ // Shape types implement things they have already
+ // been typechecked to implement, even if they
+ // don't have the methods for them.
+ return ir.OCONVIFACE, ""
+ }
if implements(src, dst, &missing, &have, &ptr) {
- // Call NeedITab/ITabAddr so that (src, dst)
- // gets added to itabs early, which allows
- // us to de-virtualize calls through this
- // type/interface pair later. See CompileITabs in reflect.go
- if types.IsDirectIface(src) && !dst.IsEmptyInterface() {
- NeedITab(src, dst)
- }
-
return ir.OCONVIFACE, ""
}
@@ -722,13 +727,23 @@ func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field,
return m, followptr
}
+// implements reports whether t implements the interface iface. t can be
+// an interface, a type parameter, or a concrete type. If implements returns
+// false, it stores a method of iface that is not implemented in *m. If the
+// method name matches but the type is wrong, it additionally stores the type
+// of the method (on t) in *samename.
func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
t0 := t
if t == nil {
return false
}
- if t.IsInterface() {
+ if t.IsInterface() || t.IsTypeParam() {
+ if t.IsTypeParam() {
+ // A typeparam satisfies an interface if its type bound
+ // has all the methods of that interface.
+ t = t.Bound()
+ }
i := 0
tms := t.AllMethods().Slice()
for _, im := range iface.AllMethods().Slice() {
@@ -874,3 +889,515 @@ var slist []symlink
type symlink struct {
field *types.Field
}
+
+// TypesOf converts a list of nodes to a list
+// of types of those nodes.
+func TypesOf(x []ir.Node) []*types.Type {
+ r := make([]*types.Type, len(x))
+ for i, n := range x {
+ r[i] = n.Type()
+ }
+ return r
+}
+
+// makeGenericName returns the name of the generic function instantiated
+// with the given types.
+// name is the name of the generic function or method.
+func makeGenericName(name string, targs []*types.Type, hasBrackets bool) string {
+ b := bytes.NewBufferString("")
+
+ // Determine if the type args are concrete types or new typeparams.
+ hasTParam := false
+ for _, targ := range targs {
+ if hasTParam {
+ assert(targ.HasTParam() || targ.HasShape())
+ } else if targ.HasTParam() || targ.HasShape() {
+ hasTParam = true
+ }
+ }
+
+ // Marker to distinguish generic instantiations from fully stenciled wrapper functions.
+ // Once we move to GC shape implementations, this prefix will not be necessary as the
+ // GC shape naming will distinguish them.
+ // e.g. f[8bytenonpointer] vs. f[int].
+ // For now, we use .inst.f[int] vs. f[int].
+ if !hasTParam {
+ b.WriteString(".inst.")
+ }
+
+ i := strings.Index(name, "[")
+ assert(hasBrackets == (i >= 0))
+ if i >= 0 {
+ b.WriteString(name[0:i])
+ } else {
+ b.WriteString(name)
+ }
+ b.WriteString("[")
+ for i, targ := range targs {
+ if i > 0 {
+ b.WriteString(",")
+ }
+ // WriteString() does not include the package name for the local
+ // package, but we want it for uniqueness.
+ if targ.Sym() != nil && targ.Sym().Pkg == types.LocalPkg {
+ b.WriteString(targ.Sym().Pkg.Name)
+ b.WriteByte('.')
+ }
+ // types1 uses "interface {" and types2 uses "interface{" - convert
+ // to consistent types2 format.
+ tstring := targ.String()
+ tstring = strings.Replace(tstring, "interface {", "interface{", -1)
+ b.WriteString(tstring)
+ }
+ b.WriteString("]")
+ if i >= 0 {
+ i2 := strings.LastIndex(name[i:], "]")
+ assert(i2 >= 0)
+ b.WriteString(name[i+i2+1:])
+ }
+ if strings.HasPrefix(b.String(), ".inst..inst.") {
+ panic(fmt.Sprintf("multiple .inst. prefix in %s", b.String()))
+ }
+ return b.String()
+}
+
+// MakeInstName makes the unique name for a stenciled generic function or method,
+// based on the name of the function fnsym and the targs. It replaces any
+// existing bracket type list in the name. makeInstName asserts that fnsym has
+// brackets in its name if and only if hasBrackets is true.
+//
+// Names of declared generic functions have no brackets originally, so hasBrackets
+// should be false. Names of generic methods already have brackets, since the new
+// type parameter is specified in the generic type of the receiver (e.g. func
+// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set.
+//
+// The standard naming is something like: 'genFn[int,bool]' for functions and
+// '(*genType[int,bool]).methodName' for methods
+func MakeInstName(gf *types.Sym, targs []*types.Type, hasBrackets bool) *types.Sym {
+ return gf.Pkg.Lookup(makeGenericName(gf.Name, targs, hasBrackets))
+}
+
+func MakeDictName(gf *types.Sym, targs []*types.Type, hasBrackets bool) *types.Sym {
+ for _, targ := range targs {
+ if targ.HasTParam() {
+ fmt.Printf("FUNCTION %s\n", gf.Name)
+ for _, targ := range targs {
+ fmt.Printf(" PARAM %+v\n", targ)
+ }
+ panic("dictionary should always have concrete type args")
+ }
+ }
+ name := makeGenericName(gf.Name, targs, hasBrackets)
+ name = ".dict." + name[6:]
+ return gf.Pkg.Lookup(name)
+}
+
+func assert(p bool) {
+ base.Assert(p)
+}
+
+// General type substituter, for replacing typeparams with type args.
+type Tsubster struct {
+ Tparams []*types.Type
+ Targs []*types.Type
+ // If non-nil, the substitution map from name nodes in the generic function to the
+ // name nodes in the new stenciled function.
+ Vars map[*ir.Name]*ir.Name
+ // New fully-instantiated generic types whose methods should be instantiated.
+ InstTypeList []*types.Type
+ // If non-nil, function to substitute an incomplete (TFORW) type.
+ SubstForwFunc func(*types.Type) *types.Type
+}
+
+// Typ computes the type obtained by substituting any type parameter in t with the
+// corresponding type argument in subst. If t contains no type parameters, the
+// result is t; otherwise the result is a new type. It deals with recursive types
+// by using TFORW types and finding partially or fully created types via sym.Def.
+func (ts *Tsubster) Typ(t *types.Type) *types.Type {
+ if !t.HasTParam() && !t.HasShape() && t.Kind() != types.TFUNC {
+ // Note: function types need to be copied regardless, as the
+ // types of closures may contain declarations that need
+ // to be copied. See #45738.
+ return t
+ }
+
+ if t.IsTypeParam() || t.IsShape() {
+ for i, tp := range ts.Tparams {
+ if tp == t {
+ return ts.Targs[i]
+ }
+ }
+ // If t is a simple typeparam T, then t has the name/symbol 'T'
+ // and t.Underlying() == t.
+ //
+ // However, consider the type definition: 'type P[T any] T'. We
+ // might use this definition so we can have a variant of type T
+ // that we can add new methods to. Suppose t is a reference to
+ // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM,
+ // because P[T] is defined as T. If we look at t.Underlying(), it
+ // is different, because the name of t.Underlying() is 'T' rather
+ // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM.
+ // In this case, we do the needed recursive substitution in the
+ // case statement below.
+ if t.Underlying() == t {
+ // t is a simple typeparam that didn't match anything in tparam
+ return t
+ }
+ // t is a more complex typeparam (e.g. P[T], as above, whose
+ // definition is just T).
+ assert(t.Sym() != nil)
+ }
+
+ var newsym *types.Sym
+ var neededTargs []*types.Type
+ var targsChanged bool
+ var forw *types.Type
+
+ if t.Sym() != nil {
+ // Translate the type params for this type according to
+ // the tparam/targs mapping from subst.
+ neededTargs = make([]*types.Type, len(t.RParams()))
+ for i, rparam := range t.RParams() {
+ neededTargs[i] = ts.Typ(rparam)
+ if !types.Identical(neededTargs[i], rparam) {
+ targsChanged = true
+ }
+ }
+ // For a named (defined) type, we have to change the name of the
+ // type as well. We do this first, so we can look up if we've
+ // already seen this type during this substitution or other
+ // definitions/substitutions.
+ genName := genericTypeName(t.Sym())
+ newsym = t.Sym().Pkg.Lookup(InstTypeName(genName, neededTargs))
+ if newsym.Def != nil {
+ // We've already created this instantiated defined type.
+ return newsym.Def.Type()
+ }
+
+ // In order to deal with recursive generic types, create a TFORW
+ // type initially and set the Def field of its sym, so it can be
+ // found if this type appears recursively within the type.
+ forw = NewIncompleteNamedType(t.Pos(), newsym)
+ //println("Creating new type by sub", newsym.Name, forw.HasTParam())
+ forw.SetRParams(neededTargs)
+ // Copy the OrigSym from the re-instantiated type (which is the sym of
+ // the base generic type).
+ assert(t.OrigSym != nil)
+ forw.OrigSym = t.OrigSym
+ }
+
+ var newt *types.Type
+
+ switch t.Kind() {
+ case types.TTYPEPARAM:
+ if t.Sym() == newsym && !targsChanged {
+ // The substitution did not change the type.
+ return t
+ }
+ // Substitute the underlying typeparam (e.g. T in P[T], see
+ // the example describing type P[T] above).
+ newt = ts.Typ(t.Underlying())
+ assert(newt != t)
+
+ case types.TARRAY:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewArray(newelem, t.NumElem())
+ }
+
+ case types.TPTR:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewPtr(newelem)
+ }
+
+ case types.TSLICE:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewSlice(newelem)
+ }
+
+ case types.TSTRUCT:
+ newt = ts.tstruct(t, targsChanged)
+ if newt == t {
+ newt = nil
+ }
+
+ case types.TFUNC:
+ newrecvs := ts.tstruct(t.Recvs(), false)
+ newparams := ts.tstruct(t.Params(), false)
+ newresults := ts.tstruct(t.Results(), false)
+ // Translate the tparams of a signature.
+ newtparams := ts.tstruct(t.TParams(), false)
+ if newrecvs != t.Recvs() || newparams != t.Params() ||
+ newresults != t.Results() || newtparams != t.TParams() || targsChanged {
+ // If any types have changed, then the all the fields of
+ // of recv, params, and results must be copied, because they have
+ // offset fields that are dependent, and so must have an
+ // independent copy for each new signature.
+ var newrecv *types.Field
+ if newrecvs.NumFields() > 0 {
+ if newrecvs == t.Recvs() {
+ newrecvs = ts.tstruct(t.Recvs(), true)
+ }
+ newrecv = newrecvs.Field(0)
+ }
+ if newparams == t.Params() {
+ newparams = ts.tstruct(t.Params(), true)
+ }
+ if newresults == t.Results() {
+ newresults = ts.tstruct(t.Results(), true)
+ }
+ var tparamfields []*types.Field
+ if newtparams.HasTParam() {
+ tparamfields = newtparams.FieldSlice()
+ } else {
+ // Completely remove the tparams from the resulting
+ // signature, if the tparams are now concrete types.
+ tparamfields = nil
+ }
+ newt = types.NewSignature(t.Pkg(), newrecv, tparamfields,
+ newparams.FieldSlice(), newresults.FieldSlice())
+ }
+
+ case types.TINTER:
+ newt = ts.tinter(t)
+ if newt == t && !targsChanged {
+ newt = nil
+ }
+
+ case types.TMAP:
+ newkey := ts.Typ(t.Key())
+ newval := ts.Typ(t.Elem())
+ if newkey != t.Key() || newval != t.Elem() || targsChanged {
+ newt = types.NewMap(newkey, newval)
+ }
+
+ case types.TCHAN:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewChan(newelem, t.ChanDir())
+ if !newt.HasTParam() {
+ // TODO(danscales): not sure why I have to do this
+ // only for channels.....
+ types.CheckSize(newt)
+ }
+ }
+ case types.TFORW:
+ if ts.SubstForwFunc != nil {
+ newt = ts.SubstForwFunc(t)
+ } else {
+ assert(false)
+ }
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+ types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64,
+ types.TUINTPTR, types.TBOOL, types.TSTRING, types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
+ newt = t.Underlying()
+ case types.TUNION:
+ nt := t.NumTerms()
+ newterms := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ changed := false
+ for i := 0; i < nt; i++ {
+ term, tilde := t.Term(i)
+ tildes[i] = tilde
+ newterms[i] = ts.Typ(term)
+ if newterms[i] != term {
+ changed = true
+ }
+ }
+ if changed {
+ newt = types.NewUnion(newterms, tildes)
+ }
+ default:
+ panic(fmt.Sprintf("Bad type in (*TSubster).Typ: %v", t.Kind()))
+ }
+ if newt == nil {
+ // Even though there were typeparams in the type, there may be no
+ // change if this is a function type for a function call (which will
+ // have its own tparams/targs in the function instantiation).
+ return t
+ }
+
+ if t.Sym() == nil && t.Kind() != types.TINTER {
+ // Not a named type or interface type, so there was no forwarding type
+ // and there are no methods to substitute.
+ assert(t.Methods().Len() == 0)
+ return newt
+ }
+
+ if forw != nil {
+ forw.SetUnderlying(newt)
+ newt = forw
+ }
+
+ if t.Kind() != types.TINTER && t.Methods().Len() > 0 {
+ // Fill in the method info for the new type.
+ var newfields []*types.Field
+ newfields = make([]*types.Field, t.Methods().Len())
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.Typ(f.Type)
+ oldsym := f.Nname.Sym()
+ newsym := MakeInstName(oldsym, ts.Targs, true)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ newt.Methods().Set(newfields)
+ if !newt.HasTParam() && !newt.HasShape() {
+ // Generate all the methods for a new fully-instantiated type.
+ ts.InstTypeList = append(ts.InstTypeList, newt)
+ }
+ }
+ return newt
+}
+
+// tstruct substitutes type params in types of the fields of a structure type. For
+// each field, tstruct copies the Nname, and translates it if Nname is in
+// ts.vars. To always force the creation of a new (top-level) struct,
+// regardless of whether anything changed with the types or names of the struct's
+// fields, set force to true.
+func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
+ if t.NumFields() == 0 {
+ if t.HasTParam() {
+ // For an empty struct, we need to return a new type,
+ // since it may now be fully instantiated (HasTParam
+ // becomes false).
+ return types.NewStruct(t.Pkg(), nil)
+ }
+ return t
+ }
+ var newfields []*types.Field
+ if force {
+ newfields = make([]*types.Field, t.NumFields())
+ }
+ for i, f := range t.Fields().Slice() {
+ t2 := ts.Typ(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.NumFields())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Field(j)
+ }
+ }
+ if newfields != nil {
+ // TODO(danscales): make sure this works for the field
+ // names of embedded types (which should keep the name of
+ // the type param, not the instantiated type).
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Embedded = f.Embedded
+ if f.IsDDD() {
+ newfields[i].SetIsDDD(true)
+ }
+ if f.Nointerface() {
+ newfields[i].SetNointerface(true)
+ }
+ if f.Nname != nil && ts.Vars != nil {
+ v := ts.Vars[f.Nname.(*ir.Name)]
+ if v != nil {
+ // This is the case where we are
+ // translating the type of the function we
+ // are substituting, so its dcls are in
+ // the subst.ts.vars table, and we want to
+ // change to reference the new dcl.
+ newfields[i].Nname = v
+ } else {
+ // This is the case where we are
+ // translating the type of a function
+ // reference inside the function we are
+ // substituting, so we leave the Nname
+ // value as is.
+ newfields[i].Nname = f.Nname
+ }
+ }
+ }
+ }
+ if newfields != nil {
+ return types.NewStruct(t.Pkg(), newfields)
+ }
+ return t
+
+}
+
+// tinter substitutes type params in types of the methods of an interface type.
+func (ts *Tsubster) tinter(t *types.Type) *types.Type {
+ if t.Methods().Len() == 0 {
+ return t
+ }
+ var newfields []*types.Field
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.Typ(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.Methods().Len())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Methods().Index(j)
+ }
+ }
+ if newfields != nil {
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ }
+ }
+ if newfields != nil {
+ return types.NewInterface(t.Pkg(), newfields)
+ }
+ return t
+}
+
+// genericSym returns the name of the base generic type for the type named by
+// sym. It simply returns the name obtained by removing everything after the
+// first bracket ("[").
+func genericTypeName(sym *types.Sym) string {
+ return sym.Name[0:strings.Index(sym.Name, "[")]
+}
+
+// Shapify takes a concrete type and returns a GCshape type that can
+// be used in place of the input type and still generate identical code.
+// No methods are added - all methods calls directly on a shape should
+// be done by converting to an interface using the dictionary.
+//
+// TODO: this could take the generic function and base its decisions
+// on how that generic function uses this type argument. For instance,
+// if it doesn't use it as a function argument/return value, then
+// we don't need to distinguish int64 and float64 (because they only
+// differ in how they get passed as arguments). For now, we only
+// unify two different types if they are identical in every possible way.
+func Shapify(t *types.Type) *types.Type {
+ assert(!t.HasShape())
+ // Map all types with the same underlying type to the same shape.
+ u := t.Underlying()
+
+ // All pointers have the same shape.
+ // TODO: Make unsafe.Pointer the same shape as normal pointers.
+ if u.Kind() == types.TPTR {
+ u = types.Types[types.TUINT8].PtrTo()
+ }
+
+ if s := shaped[u]; s != nil {
+ return s
+ }
+
+ sym := shapePkg.Lookup(u.LinkString())
+ name := ir.NewDeclNameAt(u.Pos(), ir.OTYPE, sym)
+ s := types.NewNamed(name)
+ s.SetUnderlying(u)
+ s.SetIsShape(true)
+ s.SetHasShape(true)
+ name.SetType(s)
+ name.SetTypecheck(1)
+ shaped[u] = s
+ return s
+}
+
+var shaped = map[*types.Type]*types.Type{}
+
+var shapePkg = types.NewPkg(".shape", ".shape")
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
index f29af82db2..ed3aaecc5a 100644
--- a/src/cmd/compile/internal/typecheck/syms.go
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -75,9 +75,9 @@ func InitRuntime() {
typ := typs[d.typ]
switch d.tag {
case funcTag:
- importfunc(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+ importfunc(src.NoXPos, sym, typ)
case varTag:
- importvar(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+ importvar(src.NoXPos, sym, typ)
default:
base.Fatalf("unhandled declaration tag %v", d.tag)
}
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index 359f662369..db1b11c4cf 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -13,6 +13,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/src"
)
// Function collecting autotmps generated during typechecking,
@@ -24,7 +25,6 @@ var inimport bool // set during import
var TypecheckAllowed bool
var (
- NeedITab = func(t, itype *types.Type) {}
NeedRuntimeType = func(*types.Type) {}
)
@@ -35,18 +35,10 @@ func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) }
func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) }
func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) }
-func Call(call *ir.CallExpr) {
- t := call.X.Type()
- if t == nil {
- panic("misuse of Call")
- }
- ctx := ctxStmt
- if t.NumResults() > 0 {
- ctx = ctxExpr | ctxMultiOK
- }
- if typecheck(call, ctx) != call {
- panic("bad typecheck")
- }
+func Call(pos src.XPos, callee ir.Node, args []ir.Node, dots bool) ir.Node {
+ call := ir.NewCallExpr(pos, ir.OCALL, callee, args)
+ call.IsDDD = dots
+ return typecheck(call, ctxStmt|ctxExpr)
}
func Callee(n ir.Node) ir.Node {
@@ -59,8 +51,8 @@ func FuncBody(n *ir.Func) {
Stmts(n.Body)
CheckUnused(n)
CheckReturn(n)
- if base.Errors() > errorsBefore {
- n.Body = nil // type errors; do not compile
+ if ir.IsBlank(n.Nname) || base.Errors() > errorsBefore {
+ n.Body = nil // blank function or type errors; do not compile
}
}
@@ -777,6 +769,10 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.CallExpr)
return tcRecover(n)
+ case ir.ORECOVERFP:
+ n := n.(*ir.CallExpr)
+ return tcRecoverFP(n)
+
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
return tcUnsafeAdd(n)
@@ -787,11 +783,7 @@ func typecheck1(n ir.Node, top int) ir.Node {
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
- tcClosure(n, top)
- if n.Type() == nil {
- return n
- }
- return n
+ return tcClosure(n, top)
case ir.OITAB:
n := n.(*ir.UnaryExpr)
@@ -814,6 +806,14 @@ func typecheck1(n ir.Node, top int) ir.Node {
n.SetType(types.Types[types.TUINTPTR])
return n
+ case ir.OGETCALLERPC, ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ if len(n.Args) != 0 {
+ base.FatalfAt(n.Pos(), "unexpected arguments: %v", n)
+ }
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
n.X = Expr(n.X)
@@ -881,6 +881,10 @@ func typecheck1(n ir.Node, top int) ir.Node {
n := n.(*ir.TailCallStmt)
return n
+ case ir.OCHECKNIL:
+ n := n.(*ir.UnaryExpr)
+ return tcCheckNil(n)
+
case ir.OSELECT:
tcSelect(n.(*ir.SelectStmt))
return n
@@ -951,12 +955,12 @@ func typecheckargs(n ir.InitNode) {
}
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
- rewriteMultiValueCall(n, list[0])
+ RewriteMultiValueCall(n, list[0])
}
-// rewriteMultiValueCall rewrites multi-valued f() to use temporaries,
+// RewriteMultiValueCall rewrites multi-valued f() to use temporaries,
// so the backend wouldn't need to worry about tuple-valued expressions.
-func rewriteMultiValueCall(n ir.InitNode, call ir.Node) {
+func RewriteMultiValueCall(n ir.InitNode, call ir.Node) {
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
@@ -1913,11 +1917,6 @@ func typecheckdef(n *ir.Name) {
n.SetDiag(true)
goto ret
}
- // For package-level type aliases, set n.Sym.Def so we can identify
- // it as a type alias during export. See also #31959.
- if n.Curfn == nil {
- n.Sym().Def = n.Ntype
- }
}
break
}
diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go
index de185ab944..54f3c89c24 100644
--- a/src/cmd/compile/internal/typecheck/universe.go
+++ b/src/cmd/compile/internal/typecheck/universe.go
@@ -158,6 +158,15 @@ func InitUniverse() {
s.Def = n
types.CalcSize(types.ErrorType)
+ // comparable type (interface)
+ s = types.BuiltinPkg.Lookup("comparable")
+ n = ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, s)
+ types.ComparableType = types.NewNamed(n)
+ types.ComparableType.SetUnderlying(makeComparableInterface())
+ n.SetType(types.ComparableType)
+ s.Def = n
+ types.CalcSize(types.ComparableType)
+
types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, ir.Pkgs.Unsafe, "Pointer")
// simple aliases
@@ -338,6 +347,12 @@ func makeErrorInterface() *types.Type {
return types.NewInterface(types.NoPkg, []*types.Field{method})
}
+func makeComparableInterface() *types.Type {
+ sig := types.NewSignature(types.NoPkg, fakeRecvField(), nil, nil, nil)
+ method := types.NewField(src.NoXPos, Lookup("=="), sig)
+ return types.NewInterface(types.NoPkg, []*types.Field{method})
+}
+
// DeclareUniverse makes the universe block visible within the current package.
func DeclareUniverse() {
// Operationally, this is similar to a dot import of builtinpkg, except
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index 8b988952a7..0824f6d093 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -239,17 +239,37 @@ func (t *Type) String() string {
return tconv(t, 0, fmtGo)
}
-// ShortString generates a short description of t.
-// It is used in autogenerated method names, reflection,
-// and itab names.
-func (t *Type) ShortString() string {
+// LinkString returns an unexpanded string description of t, suitable
+// for use in link symbols. "Unexpanded" here means that the
+// description uses `"".` to qualify identifiers from the current
+// package, and "expansion" refers to the renaming step performed by
+// the linker to replace these qualifiers with proper `path/to/pkg.`
+// qualifiers.
+//
+// After expansion, the description corresponds to type identity. That
+// is, for any pair of types t1 and t2, Identical(t1, t2) and
+// expand(t1.LinkString()) == expand(t2.LinkString()) report the same
+// value.
+//
+// Within a single compilation unit, LinkString always returns the
+// same unexpanded description for identical types. Thus it's safe to
+// use as a map key to implement a type-identity-keyed map. However,
+// make sure all LinkString calls used for this purpose happen within
+// the same compile process; the string keys are not stable across
+// multiple processes.
+func (t *Type) LinkString() string {
return tconv(t, 0, fmtTypeID)
}
-// LongString generates a complete description of t.
-// It is useful for reflection,
-// or when a unique fingerprint or hash of a type is required.
-func (t *Type) LongString() string {
+// NameString generates a user-readable, mostly unique string
+// description of t. NameString always returns the same description
+// for identical types, even across compilation units.
+//
+// NameString qualifies identifiers by package name, so it has
+// collisions when different packages share the same names and
+// identifiers. It also does not distinguish function-scope defined
+// types from package-scoped defined types or from each other.
+func (t *Type) NameString() string {
return tconv(t, 0, fmtTypeIDName)
}
@@ -316,31 +336,34 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
// Unless the 'L' flag was specified, if the type has a name, just print that name.
if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] {
- switch mode {
- case fmtTypeID, fmtTypeIDName:
- if verb == 'S' {
- if t.Vargen != 0 {
- sconv2(b, t.Sym(), 'S', mode)
- fmt.Fprintf(b, "·%d", t.Vargen)
- return
- }
- sconv2(b, t.Sym(), 'S', mode)
- return
- }
+ // Default to 'v' if verb is invalid.
+ if verb != 'S' {
+ verb = 'v'
+ }
- if mode == fmtTypeIDName {
- sconv2(b, t.Sym(), 'v', fmtTypeIDName)
- return
+ // In unified IR, function-scope defined types will have a ·N
+ // suffix embedded directly in their Name. Trim this off for
+ // non-fmtTypeID modes.
+ sym := t.Sym()
+ if mode != fmtTypeID {
+ i := len(sym.Name)
+ for i > 0 && sym.Name[i-1] >= '0' && sym.Name[i-1] <= '9' {
+ i--
}
-
- if t.Sym().Pkg == LocalPkg && t.Vargen != 0 {
- sconv2(b, t.Sym(), 'v', mode)
- fmt.Fprintf(b, "·%d", t.Vargen)
- return
+ const dot = "·"
+ if i >= len(dot) && sym.Name[i-len(dot):i] == dot {
+ sym = &Sym{Pkg: sym.Pkg, Name: sym.Name[:i-len(dot)]}
}
}
-
- sconv2(b, t.Sym(), 'v', mode)
+ sconv2(b, sym, verb, mode)
+
+ // TODO(mdempsky): Investigate including Vargen in fmtTypeIDName
+ // output too. It seems like it should, but that mode is currently
+ // used in string representation used by reflection, which is
+ // user-visible and doesn't expect this.
+ if mode == fmtTypeID && t.Vargen != 0 {
+ fmt.Fprintf(b, "·%d", t.Vargen)
+ }
return
}
@@ -567,6 +590,18 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
b.WriteString(fmt.Sprintf("%p", t))
}
+ case TUNION:
+ for i := 0; i < t.NumTerms(); i++ {
+ if i > 0 {
+ b.WriteString("|")
+ }
+ elem, tilde := t.Term(i)
+ if tilde {
+ b.WriteString("~")
+ }
+ tconv2(b, elem, 0, mode, visited)
+ }
+
case Txxx:
b.WriteString("Txxx")
@@ -671,7 +706,7 @@ func FmtConst(v constant.Value, sharp bool) string {
// TypeHash computes a hash value for type t to use in type switch statements.
func TypeHash(t *Type) uint32 {
- p := t.LongString()
+ p := t.NameString()
// Using MD5 is overkill, but reduces accidental collisions.
h := md5.Sum([]byte(p))
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
index dde9f51856..dc39acced8 100644
--- a/src/cmd/compile/internal/types/identity.go
+++ b/src/cmd/compile/internal/types/identity.go
@@ -29,6 +29,14 @@ func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) b
return false
}
if t1.sym != nil || t2.sym != nil {
+ if t1.HasShape() || t2.HasShape() {
+ switch t1.kind {
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64, TBOOL, TSTRING, TPTR, TUNSAFEPTR:
+ return true
+ }
+ // fall through to unnamed type comparison for complex types.
+ goto cont
+ }
// Special case: we keep byte/uint8 and rune/int32
// separate for error messages. Treat them as equal.
switch t1.kind {
@@ -40,6 +48,7 @@ func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) b
return false
}
}
+cont:
// Any cyclic type must go through a named type, and if one is
// named, it is only identical to the other if they are the
diff --git a/src/cmd/compile/internal/types/kind_string.go b/src/cmd/compile/internal/types/kind_string.go
index ae24a58b92..3e6a8bc064 100644
--- a/src/cmd/compile/internal/types/kind_string.go
+++ b/src/cmd/compile/internal/types/kind_string.go
@@ -38,20 +38,21 @@ func _() {
_ = x[TSTRING-27]
_ = x[TUNSAFEPTR-28]
_ = x[TTYPEPARAM-29]
- _ = x[TIDEAL-30]
- _ = x[TNIL-31]
- _ = x[TBLANK-32]
- _ = x[TFUNCARGS-33]
- _ = x[TCHANARGS-34]
- _ = x[TSSA-35]
- _ = x[TTUPLE-36]
- _ = x[TRESULTS-37]
- _ = x[NTYPE-38]
+ _ = x[TUNION-30]
+ _ = x[TIDEAL-31]
+ _ = x[TNIL-32]
+ _ = x[TBLANK-33]
+ _ = x[TFUNCARGS-34]
+ _ = x[TCHANARGS-35]
+ _ = x[TSSA-36]
+ _ = x[TTUPLE-37]
+ _ = x[TRESULTS-38]
+ _ = x[NTYPE-39]
}
-const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRTYPEPARAMIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
+const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRTYPEPARAMUNIONIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
-var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 162, 167, 170, 175, 183, 191, 194, 199, 206, 211}
+var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 162, 167, 172, 175, 180, 188, 196, 199, 204, 211, 216}
func (i Kind) String() string {
if i >= Kind(len(_Kind_index)-1) {
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
index a6d2e2007b..f63a357f0d 100644
--- a/src/cmd/compile/internal/types/pkg.go
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -137,7 +137,3 @@ func CleanroomDo(f func()) {
f()
pkgMap = saved
}
-
-func IsDotAlias(sym *Sym) bool {
- return sym.Def != nil && sym.Def.Sym() != sym
-}
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index f0e695ab96..89391ade68 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -90,6 +90,26 @@ func expandiface(t *Type) {
methods = append(methods, m)
}
+ {
+ methods := t.Methods().Slice()
+ sort.SliceStable(methods, func(i, j int) bool {
+ mi, mj := methods[i], methods[j]
+
+ // Sort embedded types by type name (if any).
+ if mi.Sym == nil && mj.Sym == nil {
+ return mi.Type.Sym().Less(mj.Type.Sym())
+ }
+
+ // Sort methods before embedded types.
+ if mi.Sym == nil || mj.Sym == nil {
+ return mi.Sym != nil
+ }
+
+ // Sort methods by symbol name.
+ return mi.Sym.Less(mj.Sym)
+ })
+ }
+
for _, m := range t.Methods().Slice() {
if m.Sym == nil {
continue
@@ -104,8 +124,17 @@ func expandiface(t *Type) {
continue
}
+ if m.Type.IsUnion() {
+ continue
+ }
+
+ // In 1.18, embedded types can be anything. In Go 1.17, we disallow
+ // embedding anything other than interfaces.
if !m.Type.IsInterface() {
- base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
+ if AllowsGoVersion(t.Pkg(), 1, 18) {
+ continue
+ }
+ base.ErrorfAt(m.Pos, "interface contains embedded non-interface, non-union %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
@@ -120,10 +149,15 @@ func expandiface(t *Type) {
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.AllMethods().Slice() {
- // Use m.Pos rather than t1.Pos to preserve embedding position.
f := NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
+
+ // Clear position after typechecking, for consistency with types2.
+ f.Pos = src.NoXPos
}
+
+ // Clear position after typechecking, for consistency with types2.
+ m.Pos = src.NoXPos
}
sort.Sort(MethodsByName(methods))
@@ -405,6 +439,12 @@ func CalcSize(t *Type) {
t.Align = uint8(PtrSize)
expandiface(t)
+ case TUNION:
+ // Always part of an interface for now, so size/align don't matter.
+ // Pretend a union is represented like an interface.
+ w = 2 * int64(PtrSize)
+ t.Align = uint8(PtrSize)
+
case TCHAN: // implemented as pointer
w = int64(PtrSize)
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
index 7028938742..7349e52a73 100644
--- a/src/cmd/compile/internal/types/sizeof_test.go
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
_64bit uintptr // size on 64bit platforms
}{
{Sym{}, 44, 72},
- {Type{}, 60, 104},
+ {Type{}, 64, 112},
{Map{}, 20, 40},
{Forward{}, 20, 32},
{Func{}, 28, 48},
diff --git a/src/cmd/compile/internal/types/sort.go b/src/cmd/compile/internal/types/sort.go
index dc59b06415..765c070cd9 100644
--- a/src/cmd/compile/internal/types/sort.go
+++ b/src/cmd/compile/internal/types/sort.go
@@ -4,11 +4,16 @@
package types
-// MethodsByName sorts methods by symbol.
+// MethodsByName sorts methods by name.
type MethodsByName []*Field
-func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+// EmbeddedsByName sorts embedded types by name.
+type EmbeddedsByName []*Field
-func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
+func (x EmbeddedsByName) Len() int { return len(x) }
+func (x EmbeddedsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x EmbeddedsByName) Less(i, j int) bool { return x[i].Type.Sym().Less(x[j].Type.Sym()) }
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
index 534cf7e237..fb642f52f8 100644
--- a/src/cmd/compile/internal/types/sym.go
+++ b/src/cmd/compile/internal/types/sym.go
@@ -110,6 +110,14 @@ func (a *Sym) Less(b *Sym) bool {
return false
}
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
// Exported symbols before non-exported.
ea := IsExported(a.Name)
eb := IsExported(b.Name)
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 1a9aa6916a..875b0ba82f 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base"
"cmd/internal/src"
"fmt"
+ "strings"
"sync"
)
@@ -73,6 +74,7 @@ const (
TSTRING
TUNSAFEPTR
TTYPEPARAM
+ TUNION
// pseudo-types for literals
TIDEAL // untyped numeric constants
@@ -121,6 +123,8 @@ var (
// Predeclared error interface type.
ErrorType *Type
+ // Predeclared comparable interface type.
+ ComparableType *Type
// Types to represent untyped string and boolean constants.
UntypedString = New(TSTRING)
@@ -151,7 +155,7 @@ type Type struct {
// TARRAY: *Array
// TSLICE: Slice
// TSSA: string
- // TTYPEPARAM: *Interface (though we may not need to store/use the Interface info)
+ // TTYPEPARAM: *Typeparam
Extra interface{}
// Width is the width of this Type in bytes.
@@ -182,12 +186,19 @@ type Type struct {
flags bitset8
// For defined (named) generic types, a pointer to the list of type params
- // (in order) of this type that need to be instantiated. For
- // fully-instantiated generic types, this is the targs used to instantiate
- // them (which are used when generating the corresponding instantiated
- // methods). rparams is only set for named types that are generic or are
- // fully-instantiated from a generic type, and is otherwise set to nil.
+ // (in order) of this type that need to be instantiated. For instantiated
+ // generic types, this is the targs used to instantiate them. These targs
+ // may be typeparams (for re-instantiated types such as Value[T2]) or
+ // concrete types (for fully instantiated types such as Value[int]).
+ // rparams is only set for named types that are generic or are fully
+ // instantiated from a generic type, and is otherwise set to nil.
+ // TODO(danscales): choose a better name.
rparams *[]*Type
+
+ // For an instantiated generic type, the symbol for the base generic type.
+ // This backpointer is useful, because the base type is the type that has
+ // the method bodies.
+ OrigSym *Sym
}
func (*Type) CanBeAnSSAAux() {}
@@ -199,6 +210,8 @@ const (
typeDeferwidth // width computation has been deferred and type is on deferredTypeStack
typeRecur
typeHasTParam // there is a typeparam somewhere in the type (generic function or type)
+ typeIsShape // represents a set of closely related types, for generics
+ typeHasShape // there is a shape somewhere in the type
)
func (t *Type) NotInHeap() bool { return t.flags&typeNotInHeap != 0 }
@@ -207,13 +220,21 @@ func (t *Type) Noalg() bool { return t.flags&typeNoalg != 0 }
func (t *Type) Deferwidth() bool { return t.flags&typeDeferwidth != 0 }
func (t *Type) Recur() bool { return t.flags&typeRecur != 0 }
func (t *Type) HasTParam() bool { return t.flags&typeHasTParam != 0 }
+func (t *Type) IsShape() bool { return t.flags&typeIsShape != 0 }
+func (t *Type) HasShape() bool { return t.flags&typeHasShape != 0 }
func (t *Type) SetNotInHeap(b bool) { t.flags.set(typeNotInHeap, b) }
func (t *Type) SetBroke(b bool) { t.flags.set(typeBroke, b) }
func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) }
func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
-func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b) }
+
+// Generic types should never have alg functions.
+func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b); t.flags.set(typeNoalg, b) }
+
+// Should always do SetHasShape(true) when doing SeIsShape(true).
+func (t *Type) SetIsShape(b bool) { t.flags.set(typeIsShape, b) }
+func (t *Type) SetHasShape(b bool) { t.flags.set(typeHasShape, b) }
// Kind returns the kind of type t.
func (t *Type) Kind() Kind { return t.kind }
@@ -255,9 +276,6 @@ func (t *Type) SetRParams(rparams []*Type) {
base.Fatalf("Setting nil or zero-length rparams")
}
t.rparams = &rparams
- if t.HasTParam() {
- return
- }
// HasTParam should be set if any rparam is or has a type param. This is
// to handle the case of a generic type which doesn't reference any of its
// type params (e.g. most commonly, an empty struct).
@@ -266,9 +284,33 @@ func (t *Type) SetRParams(rparams []*Type) {
t.SetHasTParam(true)
break
}
+ if rparam.HasShape() {
+ t.SetHasShape(true)
+ break
+ }
}
}
+// IsBaseGeneric returns true if t is a generic type (not reinstantiated with
+// another type params or fully instantiated.
+func (t *Type) IsBaseGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") < 0
+}
+
+// IsInstantiatedGeneric returns t if t ia generic type that has been
+// reinstantiated with new typeparams (i.e. is not fully instantiated).
+func (t *Type) IsInstantiatedGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") >= 0 &&
+ t.HasTParam()
+}
+
+// IsFullyInstantiated reports whether t is a fully instantiated generic type; i.e. an
+// instantiated generic type where all type arguments are non-generic or fully
+// instantiated generic types.
+func (t *Type) IsFullyInstantiated() bool {
+ return len(t.RParams()) > 0 && !t.HasTParam()
+}
+
// NoPkg is a nil *Pkg value for clarity.
// It's intended for use when constructing types that aren't exported
// and thus don't need to be associated with any package.
@@ -377,6 +419,18 @@ type Interface struct {
pkg *Pkg
}
+// Typeparam contains Type fields specific to typeparam types.
+type Typeparam struct {
+ index int // type parameter index in source order, starting at 0
+ bound *Type
+}
+
+// Union contains Type fields specific to union types.
+type Union struct {
+ terms []*Type
+ tildes []bool // whether terms[i] is of form ~T
+}
+
// Ptr contains Type fields specific to pointer types.
type Ptr struct {
Elem *Type // element type
@@ -558,7 +612,9 @@ func New(et Kind) *Type {
case TRESULTS:
t.Extra = new(Results)
case TTYPEPARAM:
- t.Extra = new(Interface)
+ t.Extra = new(Typeparam)
+ case TUNION:
+ t.Extra = new(Union)
}
return t
}
@@ -574,6 +630,9 @@ func NewArray(elem *Type, bound int64) *Type {
if elem.HasTParam() {
t.SetHasTParam(true)
}
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
return t
}
@@ -592,6 +651,9 @@ func NewSlice(elem *Type) *Type {
if elem.HasTParam() {
t.SetHasTParam(true)
}
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
return t
}
@@ -604,6 +666,9 @@ func NewChan(elem *Type, dir ChanDir) *Type {
if elem.HasTParam() {
t.SetHasTParam(true)
}
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
return t
}
@@ -614,6 +679,9 @@ func NewTuple(t1, t2 *Type) *Type {
if t1.HasTParam() || t2.HasTParam() {
t.SetHasTParam(true)
}
+ if t1.HasShape() || t2.HasShape() {
+ t.SetHasShape(true)
+ }
return t
}
@@ -645,6 +713,9 @@ func NewMap(k, v *Type) *Type {
if k.HasTParam() || v.HasTParam() {
t.SetHasTParam(true)
}
+ if k.HasShape() || v.HasShape() {
+ t.SetHasShape(true)
+ }
return t
}
@@ -669,6 +740,9 @@ func NewPtr(elem *Type) *Type {
// when this entry was cached.
t.SetHasTParam(true)
}
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
return t
}
@@ -682,6 +756,9 @@ func NewPtr(elem *Type) *Type {
if elem.HasTParam() {
t.SetHasTParam(true)
}
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
return t
}
@@ -825,6 +902,8 @@ func (t *Type) copy() *Type {
case TARRAY:
x := *t.Extra.(*Array)
nt.Extra = &x
+ case TTYPEPARAM:
+ base.Fatalf("typeparam types cannot be copied")
case TTUPLE, TSSA, TRESULTS:
base.Fatalf("ssa types cannot be copied")
}
@@ -925,7 +1004,7 @@ func (t *Type) FuncArgs() *Type {
return t.Extra.(FuncArgs).T
}
-// IsFuncArgStruct reports whether t is a struct representing function parameters.
+// IsFuncArgStruct reports whether t is a struct representing function parameters or results.
func (t *Type) IsFuncArgStruct() bool {
return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
}
@@ -1436,6 +1515,14 @@ func (t *Type) IsInterface() bool {
return t.kind == TINTER
}
+func (t *Type) IsUnion() bool {
+ return t.kind == TUNION
+}
+
+func (t *Type) IsTypeParam() bool {
+ return t.kind == TTYPEPARAM
+}
+
// IsEmptyInterface reports whether t is an empty interface type.
func (t *Type) IsEmptyInterface() bool {
return t.IsInterface() && t.AllMethods().Len() == 0
@@ -1708,6 +1795,9 @@ func (t *Type) SetUnderlying(underlying *Type) {
if underlying.HasTParam() {
t.SetHasTParam(true)
}
+ if underlying.HasShape() {
+ t.SetHasShape(true)
+ }
// spec: "The declared type does not inherit any methods bound
// to the existing type, but the method set of an interface
@@ -1739,6 +1829,15 @@ func fieldsHasTParam(fields []*Field) bool {
return false
}
+func fieldsHasShape(fields []*Field) bool {
+ for _, f := range fields {
+ if f.Type != nil && f.Type.HasShape() {
+ return true
+ }
+ }
+ return false
+}
+
// NewBasic returns a new basic type of the given kind.
func NewBasic(kind Kind, obj Object) *Type {
t := New(kind)
@@ -1758,6 +1857,10 @@ func NewInterface(pkg *Pkg, methods []*Field) *Type {
t.SetHasTParam(true)
break
}
+ if f.Type != nil && f.Type.HasShape() {
+ t.SetHasShape(true)
+ break
+ }
}
if anyBroke(methods) {
t.SetBroke(true)
@@ -1766,14 +1869,75 @@ func NewInterface(pkg *Pkg, methods []*Field) *Type {
return t
}
-// NewTypeParam returns a new type param.
-func NewTypeParam(pkg *Pkg) *Type {
+// NewTypeParam returns a new type param with the specified sym (package and name)
+// and specified index within the typeparam list.
+func NewTypeParam(sym *Sym, index int) *Type {
t := New(TTYPEPARAM)
- t.Extra.(*Interface).pkg = pkg
+ t.sym = sym
+ t.Extra.(*Typeparam).index = index
t.SetHasTParam(true)
return t
}
+// Index returns the index of the type param within its param list.
+func (t *Type) Index() int {
+ t.wantEtype(TTYPEPARAM)
+ return t.Extra.(*Typeparam).index
+}
+
+// SetIndex sets the index of the type param within its param list.
+func (t *Type) SetIndex(i int) {
+ t.wantEtype(TTYPEPARAM)
+ t.Extra.(*Typeparam).index = i
+}
+
+// SetBound sets the bound of a typeparam.
+func (t *Type) SetBound(bound *Type) {
+ t.wantEtype(TTYPEPARAM)
+ t.Extra.(*Typeparam).bound = bound
+}
+
+// Bound returns the bound of a typeparam.
+func (t *Type) Bound() *Type {
+ t.wantEtype(TTYPEPARAM)
+ return t.Extra.(*Typeparam).bound
+}
+
+// NewUnion returns a new union with the specified set of terms (types). If
+// tildes[i] is true, then terms[i] represents ~T, rather than just T.
+func NewUnion(terms []*Type, tildes []bool) *Type {
+ t := New(TUNION)
+ if len(terms) != len(tildes) {
+ base.Fatalf("Mismatched terms and tildes for NewUnion")
+ }
+ t.Extra.(*Union).terms = terms
+ t.Extra.(*Union).tildes = tildes
+ nt := len(terms)
+ for i := 0; i < nt; i++ {
+ if terms[i].HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if terms[i].HasShape() {
+ t.SetHasShape(true)
+ }
+ }
+ return t
+}
+
+// NumTerms returns the number of terms in a union type.
+func (t *Type) NumTerms() int {
+ t.wantEtype(TUNION)
+ return len(t.Extra.(*Union).terms)
+}
+
+// Term returns ith term of a union type as (term, tilde). If tilde is true, term
+// represents ~T, rather than just T.
+func (t *Type) Term(i int) (*Type, bool) {
+ t.wantEtype(TUNION)
+ u := t.Extra.(*Union)
+ return u.terms[i], u.tildes[i]
+}
+
const BOGUS_FUNARG_OFFSET = -1000000000
func unzeroFieldOffsets(f []*Field) {
@@ -1817,6 +1981,9 @@ func NewSignature(pkg *Pkg, recv *Field, tparams, params, results []*Field) *Typ
fieldsHasTParam(results) {
t.SetHasTParam(true)
}
+ if fieldsHasShape(recvs) || fieldsHasShape(params) || fieldsHasShape(results) {
+ t.SetHasShape(true)
+ }
return t
}
@@ -1832,6 +1999,9 @@ func NewStruct(pkg *Pkg, fields []*Field) *Type {
if fieldsHasTParam(fields) {
t.SetHasTParam(true)
}
+ if fieldsHasShape(fields) {
+ t.SetHasShape(true)
+ }
return t
}
@@ -2028,7 +2198,7 @@ func TypeSymLookup(name string) *Sym {
}
func TypeSymName(t *Type) string {
- name := t.ShortString()
+ name := t.LinkString()
// Use a separate symbol name for Noalg types for #17752.
if TypeHasNoAlg(t) {
name = "noalg." + name
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index 2939dcc0bd..ae4fb6ad10 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -125,6 +125,12 @@ type Config struct {
// TODO(gri) Consolidate error messages and remove this flag.
CompilerErrorMessages bool
+ // If AllowTypeLists is set, the type list syntax is permitted
+ // in an interface in addition to the type set syntax.
+ // TODO(gri) Remove once type lists are no longer supported by
+ // the parser.
+ AllowTypeLists bool
+
// If go115UsesCgo is set, the type checker expects the
// _cgo_gotypes.go file generated by running cmd/cgo to be
// provided as a package source file. Qualified identifiers
@@ -355,7 +361,7 @@ func (tv TypeAndValue) HasOk() bool {
// Inferred reports the inferred type arguments and signature
// for a parameterized function call that uses type inference.
type Inferred struct {
- Targs []Type
+ TArgs []Type
Sig *Signature
}
@@ -424,11 +430,11 @@ func Implements(V Type, T *Interface) bool {
// Identical reports whether x and y are identical types.
// Receivers of Signature types are ignored.
func Identical(x, y Type) bool {
- return (*Checker)(nil).identical(x, y)
+ return identical(x, y, true, nil)
}
// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
// Receivers of Signature types are ignored.
func IdenticalIgnoreTags(x, y Type) bool {
- return (*Checker)(nil).identicalIgnoreTags(x, y)
+ return identical(x, y, false, nil)
}
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 873390c1e9..d8844956af 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -329,27 +329,26 @@ func TestTypesInfo(t *testing.T) {
{brokenPkg + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string]invalid type`},
// parameterized functions
- {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[T₁ interface{}](T₁)`},
- {genericPkg + `p1; func f[T any](T); var _ = f[int]`, `f[int]`, `func(int)`},
- {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[T₁ interface{}](T₁)`},
- {genericPkg + `p3; func f[T any](T); func _() { f(42) }`, `f(42)`, `()`},
+ {genericPkg + `p0; func f[T any](T) {}; var _ = f[int]`, `f`, `func[generic_p0.T₁ interface{}](generic_p0.T₁)`},
+ {genericPkg + `p1; func f[T any](T) {}; var _ = f[int]`, `f[int]`, `func(int)`},
+ {genericPkg + `p2; func f[T any](T) {}; func _() { f(42) }`, `f`, `func[generic_p2.T₁ interface{}](generic_p2.T₁)`},
+ {genericPkg + `p3; func f[T any](T) {}; func _() { f(42) }`, `f(42)`, `()`},
// type parameters
{genericPkg + `t0; type t[] int; var _ t`, `t`, `generic_t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
- {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[P₁ interface{}]`},
- {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[P₁ interface{}]`},
- {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[P₁, Q₂ interface{}]`},
- {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P₁, Q₂ interface{m()}]`},
+ {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[generic_t1.P₁ interface{}]`},
+ {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[generic_t2.P₁ interface{}]`},
+ {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[generic_t3.P₁, generic_t3.Q₂ interface{}]`},
+ {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[broken_t4.P₁, broken_t4.Q₂ interface{m()}]`},
// instantiated types must be sanitized
{genericPkg + `g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `generic_g0.t[int]`},
// issue 45096
- {genericPkg + `issue45096; func _[T interface{ type int8, int16, int32 }](x T) { _ = x < 0 }`, `0`, `T₁`},
+ {genericPkg + `issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `generic_issue45096.T₁`},
}
for _, test := range tests {
- ResetId() // avoid renumbering of type parameter ids when adding tests
info := Info{Types: make(map[syntax.Expr]TypeAndValue)}
var name string
if strings.HasPrefix(test.src, brokenPkg) {
@@ -390,60 +389,60 @@ func TestInferredInfo(t *testing.T) {
targs []string
sig string
}{
- {genericPkg + `p0; func f[T any](T); func _() { f(42) }`,
+ {genericPkg + `p0; func f[T any](T) {}; func _() { f(42) }`,
`f`,
[]string{`int`},
`func(int)`,
},
- {genericPkg + `p1; func f[T any](T) T; func _() { f('@') }`,
+ {genericPkg + `p1; func f[T any](T) T { panic(0) }; func _() { f('@') }`,
`f`,
[]string{`rune`},
`func(rune) rune`,
},
- {genericPkg + `p2; func f[T any](...T) T; func _() { f(0i) }`,
+ {genericPkg + `p2; func f[T any](...T) T { panic(0) }; func _() { f(0i) }`,
`f`,
[]string{`complex128`},
`func(...complex128) complex128`,
},
- {genericPkg + `p3; func f[A, B, C any](A, *B, []C); func _() { f(1.2, new(string), []byte{}) }`,
+ {genericPkg + `p3; func f[A, B, C any](A, *B, []C) {}; func _() { f(1.2, new(string), []byte{}) }`,
`f`,
[]string{`float64`, `string`, `byte`},
`func(float64, *string, []byte)`,
},
- {genericPkg + `p4; func f[A, B any](A, *B, ...[]B); func _() { f(1.2, new(byte)) }`,
+ {genericPkg + `p4; func f[A, B any](A, *B, ...[]B) {}; func _() { f(1.2, new(byte)) }`,
`f`,
[]string{`float64`, `byte`},
`func(float64, *byte, ...[]byte)`,
},
// we don't know how to translate these but we can type-check them
- {genericPkg + `q0; type T struct{}; func (T) m[P any](P); func _(x T) { x.m(42) }`,
+ {genericPkg + `q0; type T struct{}; func (T) m[P any](P) {}; func _(x T) { x.m(42) }`,
`x.m`,
[]string{`int`},
`func(int)`,
},
- {genericPkg + `q1; type T struct{}; func (T) m[P any](P) P; func _(x T) { x.m(42) }`,
+ {genericPkg + `q1; type T struct{}; func (T) m[P any](P) P { panic(0) }; func _(x T) { x.m(42) }`,
`x.m`,
[]string{`int`},
`func(int) int`,
},
- {genericPkg + `q2; type T struct{}; func (T) m[P any](...P) P; func _(x T) { x.m(42) }`,
+ {genericPkg + `q2; type T struct{}; func (T) m[P any](...P) P { panic(0) }; func _(x T) { x.m(42) }`,
`x.m`,
[]string{`int`},
`func(...int) int`,
},
- {genericPkg + `q3; type T struct{}; func (T) m[A, B, C any](A, *B, []C); func _(x T) { x.m(1.2, new(string), []byte{}) }`,
+ {genericPkg + `q3; type T struct{}; func (T) m[A, B, C any](A, *B, []C) {}; func _(x T) { x.m(1.2, new(string), []byte{}) }`,
`x.m`,
[]string{`float64`, `string`, `byte`},
`func(float64, *string, []byte)`,
},
- {genericPkg + `q4; type T struct{}; func (T) m[A, B any](A, *B, ...[]B); func _(x T) { x.m(1.2, new(byte)) }`,
+ {genericPkg + `q4; type T struct{}; func (T) m[A, B any](A, *B, ...[]B) {}; func _(x T) { x.m(1.2, new(byte)) }`,
`x.m`,
[]string{`float64`, `byte`},
`func(float64, *byte, ...[]byte)`,
},
- {genericPkg + `r0; type T[P any] struct{}; func (_ T[P]) m[Q any](Q); func _[P any](x T[P]) { x.m(42) }`,
+ {genericPkg + `r0; type T[P any] struct{}; func (_ T[P]) m[Q any](Q) {}; func _[P any](x T[P]) { x.m(42) }`,
`x.m`,
[]string{`int`},
`func(int)`,
@@ -455,38 +454,38 @@ func TestInferredInfo(t *testing.T) {
// `func(float64)`,
// },
- {genericPkg + `s1; func f[T any, P interface{type *T}](x T); func _(x string) { f(x) }`,
+ {genericPkg + `s1; func f[T any, P interface{~*T}](x T) {}; func _(x string) { f(x) }`,
`f`,
[]string{`string`, `*string`},
`func(x string)`,
},
- {genericPkg + `s2; func f[T any, P interface{type *T}](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s2; func f[T any, P interface{~*T}](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `*int`},
`func(x []int)`,
},
- {genericPkg + `s3; type C[T any] interface{type chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`},
`func(x []int)`,
},
- {genericPkg + `s4; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func(x []int)`,
},
- {genericPkg + `t1; func f[T any, P interface{type *T}]() T; func _() { _ = f[string] }`,
+ {genericPkg + `t1; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = f[string] }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {genericPkg + `t2; type C[T any] interface{type chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t2; type C[T any] interface{~chan<- T}; func f[T any, P C[T]]() []T { return nil }; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`},
`func() []int`,
},
- {genericPkg + `t3; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
@@ -515,7 +514,7 @@ func TestInferredInfo(t *testing.T) {
panic(fmt.Sprintf("unexpected call expression type %T", call))
}
if syntax.String(fun) == test.fun {
- targs = inf.Targs
+ targs = inf.TArgs
sig = inf.Sig
break
}
@@ -1568,6 +1567,14 @@ func F(){
}
}
+var nopos syntax.Pos
+
+// newDefined creates a new defined type named T with the given underlying type.
+func newDefined(underlying Type) *Named {
+ tname := NewTypeName(nopos, nil, "T", nil)
+ return NewNamed(tname, underlying, nil)
+}
+
func TestConvertibleTo(t *testing.T) {
for _, test := range []struct {
v, t Type
@@ -1847,3 +1854,28 @@ func f(x T) T { return foo.F(x) }
}
}
}
+
+func TestInstantiate(t *testing.T) {
+ // eventually we like more tests but this is a start
+ const src = genericPkg + "p; type T[P any] *T[P]"
+ pkg, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // type T should have one type parameter
+ T := pkg.Scope().Lookup("T").Type().(*Named)
+ if n := T.TParams().Len(); n != 1 {
+ t.Fatalf("expected 1 type parameter; found %d", n)
+ }
+
+ // instantiation should succeed (no endless recursion)
+ // even with a nil *Checker
+ var check *Checker
+ res := check.Instantiate(nopos, T, []Type{Typ[Int]}, nil, false)
+
+ // instantiated type should point to itself
+ if res.Underlying().(*Pointer).Elem() != res {
+ t.Fatalf("unexpected result type: %s", res)
+ }
+}
diff --git a/src/cmd/compile/internal/types2/array.go b/src/cmd/compile/internal/types2/array.go
new file mode 100644
index 0000000000..502d49bc25
--- /dev/null
+++ b/src/cmd/compile/internal/types2/array.go
@@ -0,0 +1,25 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// An Array represents an array type.
+type Array struct {
+ len int64
+ elem Type
+}
+
+// NewArray returns a new array type for the given element type and length.
+// A negative length indicates an unknown length.
+func NewArray(elem Type, len int64) *Array { return &Array{len: len, elem: elem} }
+
+// Len returns the length of array a.
+// A negative result indicates an unknown length.
+func (a *Array) Len() int64 { return a.len }
+
+// Elem returns element type of array a.
+func (a *Array) Elem() Type { return a.elem }
+
+func (a *Array) Underlying() Type { return a }
+func (a *Array) String() string { return TypeString(a, nil) }
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
index 583118c8b2..6184fc2ea5 100644
--- a/src/cmd/compile/internal/types2/assignments.go
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -68,7 +68,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
// x.typ is typed
// A generic (non-instantiated) function value cannot be assigned to a variable.
- if sig := asSignature(x.typ); sig != nil && len(sig.tparams) > 0 {
+ if sig := asSignature(x.typ); sig != nil && sig.TParams().Len() > 0 {
check.errorf(x, "cannot use generic function %s without instantiation in %s", x, context)
}
diff --git a/src/cmd/compile/internal/types2/basic.go b/src/cmd/compile/internal/types2/basic.go
new file mode 100644
index 0000000000..2fd973cafb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/basic.go
@@ -0,0 +1,82 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// BasicKind describes the kind of basic type.
+type BasicKind int
+
+const (
+ Invalid BasicKind = iota // type is invalid
+
+ // predeclared types
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ String
+ UnsafePointer
+
+ // types for untyped values
+ UntypedBool
+ UntypedInt
+ UntypedRune
+ UntypedFloat
+ UntypedComplex
+ UntypedString
+ UntypedNil
+
+ // aliases
+ Byte = Uint8
+ Rune = Int32
+)
+
+// BasicInfo is a set of flags describing properties of a basic type.
+type BasicInfo int
+
+// Properties of basic types.
+const (
+ IsBoolean BasicInfo = 1 << iota
+ IsInteger
+ IsUnsigned
+ IsFloat
+ IsComplex
+ IsString
+ IsUntyped
+
+ IsOrdered = IsInteger | IsFloat | IsString
+ IsNumeric = IsInteger | IsFloat | IsComplex
+ IsConstType = IsBoolean | IsNumeric | IsString
+)
+
+// A Basic represents a basic type.
+type Basic struct {
+ kind BasicKind
+ info BasicInfo
+ name string
+}
+
+// Kind returns the kind of basic type b.
+func (b *Basic) Kind() BasicKind { return b.kind }
+
+// Info returns information about properties of basic type b.
+func (b *Basic) Info() BasicInfo { return b.info }
+
+// Name returns the name of basic type b.
+func (b *Basic) Name() string { return b.name }
+
+func (b *Basic) Underlying() Type { return b }
+func (b *Basic) String() string { return TypeString(b, nil) }
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index f90e06f226..da2dcf54aa 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -46,7 +46,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
default:
// make argument getter
xlist, _ := check.exprList(call.ArgList, false)
- arg = func(x *operand, i int) { *x = *xlist[i]; x.typ = expand(x.typ) }
+ arg = func(x *operand, i int) { *x = *xlist[i] }
nargs = len(xlist)
// evaluate first argument, if present
if nargs > 0 {
@@ -144,7 +144,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
mode := invalid
var typ Type
var val constant.Value
- switch typ = implicitArrayDeref(optype(x.typ)); t := typ.(type) {
+ switch typ = arrayPtrDeref(under(x.typ)); t := typ.(type) {
case *Basic:
if isString(t) && id == _Len {
if x.mode == constant_ {
@@ -178,9 +178,9 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
mode = value
}
- case *Sum:
- if t.is(func(t Type) bool {
- switch t := under(t).(type) {
+ case *TypeParam:
+ if t.underIs(func(t Type) bool {
+ switch t := arrayPtrDeref(t).(type) {
case *Basic:
if isString(t) && id == _Len {
return true
@@ -212,19 +212,23 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
case _Close:
// close(c)
- c := asChan(x.typ)
- if c == nil {
- check.errorf(x, invalidArg+"%s is not a channel", x)
- return
- }
- if c.dir == RecvOnly {
- check.errorf(x, invalidArg+"%s must not be a receive-only channel", x)
+ if !underIs(x.typ, func(u Type) bool {
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.errorf(x, invalidOp+"cannot close non-channel %s", x)
+ return false
+ }
+ if uch.dir == RecvOnly {
+ check.errorf(x, invalidOp+"cannot close receive-only channel %s", x)
+ return false
+ }
+ return true
+ }) {
return
}
-
x.mode = novalue
if check.Types != nil {
- check.recordBuiltinType(call.Fun, makeSig(nil, c))
+ check.recordBuiltinType(call.Fun, makeSig(nil, x.typ))
}
case _Complex:
@@ -281,7 +285,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
}
// both argument types must be identical
- if !check.identical(x.typ, y.typ) {
+ if !Identical(x.typ, y.typ) {
check.errorf(x, invalidOp+"%v (mismatched types %s and %s)", call, x.typ, y.typ)
return
}
@@ -332,13 +336,15 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
return
}
var src Type
- switch t := optype(y.typ).(type) {
+ switch t := under(y.typ).(type) {
case *Basic:
if isString(y.typ) {
src = universeByte
}
case *Slice:
src = t.elem
+ case *TypeParam:
+ check.error(x, "copy on generic operands not yet implemented")
}
if dst == nil || src == nil {
@@ -346,7 +352,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
return
}
- if !check.identical(dst, src) {
+ if !Identical(dst, src) {
check.errorf(x, invalidArg+"arguments to copy %s and %s have different element types %s and %s", x, &y, dst, src)
return
}
@@ -358,25 +364,40 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
x.typ = Typ[Int]
case _Delete:
- // delete(m, k)
- m := asMap(x.typ)
- if m == nil {
- check.errorf(x, invalidArg+"%s is not a map", x)
+ // delete(map_, key)
+ // map_ must be a map type or a type parameter describing map types.
+ // The key cannot be a type parameter for now.
+ map_ := x.typ
+ var key Type
+ if !underIs(map_, func(u Type) bool {
+ map_, _ := u.(*Map)
+ if map_ == nil {
+ check.errorf(x, invalidArg+"%s is not a map", x)
+ return false
+ }
+ if key != nil && !Identical(map_.key, key) {
+ check.errorf(x, invalidArg+"maps of %s must have identical key types", x)
+ return false
+ }
+ key = map_.key
+ return true
+ }) {
return
}
+
arg(x, 1) // k
if x.mode == invalid {
return
}
- check.assignment(x, m.key, "argument to delete")
+ check.assignment(x, key, "argument to delete")
if x.mode == invalid {
return
}
x.mode = novalue
if check.Types != nil {
- check.recordBuiltinType(call.Fun, makeSig(nil, m, m.key))
+ check.recordBuiltinType(call.Fun, makeSig(nil, map_, key))
}
case _Imag, _Real:
@@ -451,39 +472,21 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
return
}
- min, max := -1, 10
- var valid func(t Type) bool
- valid = func(t Type) bool {
- var m int
- switch t := optype(t).(type) {
- case *Slice:
- m = 2
- case *Map, *Chan:
- m = 1
- case *Sum:
- return t.is(valid)
- default:
- return false
- }
- if m > min {
- min = m
- }
- if m+1 < max {
- max = m + 1
- }
- return true
- }
-
- if !valid(T) {
+ var min int // minimum number of arguments
+ switch optype(T).(type) {
+ case *Slice:
+ min = 2
+ case *Map, *Chan:
+ min = 1
+ case *top:
+ check.errorf(arg0, invalidArg+"cannot make %s; type parameter has no structural type", arg0)
+ return
+ default:
check.errorf(arg0, invalidArg+"cannot make %s; type must be slice, map, or channel", arg0)
return
}
- if nargs < min || max < nargs {
- if min == max {
- check.errorf(call, "%v expects %d arguments; found %d", call, min, nargs)
- } else {
- check.errorf(call, "%v expects %d or %d arguments; found %d", call, min, max, nargs)
- }
+ if nargs < min || min+1 < nargs {
+ check.errorf(call, invalidOp+"%v expects %d or %d arguments; found %d", call, min, min+1, nargs)
return
}
@@ -603,19 +606,22 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
case _Alignof:
// unsafe.Alignof(x T) uintptr
- if asTypeParam(x.typ) != nil {
- check.errorf(call, invalidOp+"unsafe.Alignof undefined for %s", x)
- return
- }
check.assignment(x, nil, "argument to unsafe.Alignof")
if x.mode == invalid {
return
}
- x.mode = constant_
- x.val = constant.MakeInt64(check.conf.alignof(x.typ))
+ if hasVarSize(x.typ) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.alignof(x.typ))
+ // result is constant - no need to record signature
+ }
x.typ = Typ[Uintptr]
- // result is constant - no need to record signature
case _Offsetof:
// unsafe.Offsetof(x T) uintptr, where x must be a selector
@@ -635,7 +641,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
base := derefStructPtr(x.typ)
sel := selx.Sel.Value
- obj, index, indirect := check.lookupFieldOrMethod(base, false, check.pkg, sel)
+ obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel)
switch obj.(type) {
case nil:
check.errorf(x, invalidArg+"%s has no single field %s", base, sel)
@@ -653,30 +659,43 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
return
}
- // TODO(gri) Should we pass x.typ instead of base (and indirect report if derefStructPtr indirected)?
+ // TODO(gri) Should we pass x.typ instead of base (and have indirect report if derefStructPtr indirected)?
check.recordSelection(selx, FieldVal, base, obj, index, false)
- offs := check.conf.offsetof(base, index)
- x.mode = constant_
- x.val = constant.MakeInt64(offs)
+ // The field offset is considered a variable even if the field is declared before
+ // the part of the struct which is variable-sized. This makes both the rules
+ // simpler and also permits (or at least doesn't prevent) a compiler from re-
+ // arranging struct fields if it wanted to.
+ if hasVarSize(base) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], obj.Type()))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.offsetof(base, index))
+ // result is constant - no need to record signature
+ }
x.typ = Typ[Uintptr]
- // result is constant - no need to record signature
case _Sizeof:
// unsafe.Sizeof(x T) uintptr
- if asTypeParam(x.typ) != nil {
- check.errorf(call, invalidOp+"unsafe.Sizeof undefined for %s", x)
- return
- }
check.assignment(x, nil, "argument to unsafe.Sizeof")
if x.mode == invalid {
return
}
- x.mode = constant_
- x.val = constant.MakeInt64(check.conf.sizeof(x.typ))
+ if hasVarSize(x.typ) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.sizeof(x.typ))
+ // result is constant - no need to record signature
+ }
x.typ = Typ[Uintptr]
- // result is constant - no need to record signature
case _Slice:
// unsafe.Slice(ptr *T, len IntegerType) []T
@@ -748,6 +767,25 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
return true
}
+// hasVarSize reports if the size of type t is variable due to type parameters.
+func hasVarSize(t Type) bool {
+ switch t := under(t).(type) {
+ case *Array:
+ return hasVarSize(t.elem)
+ case *Struct:
+ for _, f := range t.fields {
+ if hasVarSize(f.typ) {
+ return true
+ }
+ }
+ case *TypeParam:
+ return true
+ case *Named, *Union, *top:
+ unreachable()
+ }
+ return false
+}
+
// applyTypeFunc applies f to x. If x is a type parameter,
// the result is a type parameter constrained by an new
// interface bound. The type bounds for that interface
@@ -759,10 +797,10 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
if tp := asTypeParam(x); tp != nil {
// Test if t satisfies the requirements for the argument
// type and collect possible result types at the same time.
- var rtypes []Type
- if !tp.Bound().is(func(x Type) bool {
- if r := f(x); r != nil {
- rtypes = append(rtypes, r)
+ var terms []*Term
+ if !tp.iface().typeSet().is(func(t *term) bool {
+ if r := f(t.typ); r != nil {
+ terms = append(terms, NewTerm(t.tilde, r))
return true
}
return false
@@ -775,11 +813,12 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
// uses of real() where the result is used to
// define type and initialize a variable?
- // construct a suitable new type parameter
- tpar := NewTypeName(nopos, nil /* = Universe pkg */, "<type parameter>", nil)
- ptyp := check.NewTypeParam(tpar, 0, &emptyInterface) // assigns type to tpar as a side-effect
- tsum := NewSum(rtypes)
- ptyp.bound = &Interface{types: tsum, allMethods: markComplete, allTypes: tsum}
+ // Construct a suitable new type parameter for the sum type. The
+ // type param is placed in the current package so export/import
+ // works as expected.
+ tpar := NewTypeName(nopos, check.pkg, "<type parameter>", nil)
+ ptyp := check.NewTypeParam(tpar, NewInterfaceType(nil, []Type{NewUnion(terms)})) // assigns type to tpar as a side-effect
+ ptyp.index = tp.index
return ptyp
}
@@ -803,10 +842,9 @@ func makeSig(res Type, args ...Type) *Signature {
return &Signature{params: params, results: result}
}
-// implicitArrayDeref returns A if typ is of the form *A and A is an array;
+// arrayPtrDeref returns A if typ is of the form *A and A is an array;
// otherwise it returns typ.
-//
-func implicitArrayDeref(typ Type) Type {
+func arrayPtrDeref(typ Type) Type {
if p, ok := typ.(*Pointer); ok {
if a := asArray(p.base); a != nil {
return a
diff --git a/src/cmd/compile/internal/types2/builtins_test.go b/src/cmd/compile/internal/types2/builtins_test.go
index 82c786b86e..52dbba1cb9 100644
--- a/src/cmd/compile/internal/types2/builtins_test.go
+++ b/src/cmd/compile/internal/types2/builtins_test.go
@@ -7,6 +7,7 @@ package types2_test
import (
"cmd/compile/internal/syntax"
"fmt"
+ "strings"
"testing"
. "cmd/compile/internal/types2"
@@ -111,12 +112,15 @@ var builtinCalls = []struct {
{"Alignof", `_ = unsafe.Alignof(0)`, `invalid type`}, // constant
{"Alignof", `var x struct{}; _ = unsafe.Alignof(x)`, `invalid type`}, // constant
+ {"Alignof", `var x P; _ = unsafe.Alignof(x)`, `func(p.P₁) uintptr`},
{"Offsetof", `var x struct{f bool}; _ = unsafe.Offsetof(x.f)`, `invalid type`}, // constant
{"Offsetof", `var x struct{_ int; f bool}; _ = unsafe.Offsetof((&x).f)`, `invalid type`}, // constant
+ {"Offsetof", `var x struct{_ int; f P}; _ = unsafe.Offsetof((&x).f)`, `func(p.P₁) uintptr`},
{"Sizeof", `_ = unsafe.Sizeof(0)`, `invalid type`}, // constant
{"Sizeof", `var x struct{}; _ = unsafe.Sizeof(x)`, `invalid type`}, // constant
+ {"Sizeof", `var x P; _ = unsafe.Sizeof(x)`, `func(p.P₁) uintptr`},
{"Slice", `var p *int; _ = unsafe.Slice(p, 1)`, `func(*int, int) []int`},
{"Slice", `var p *byte; var n uintptr; _ = unsafe.Slice(p, n)`, `func(*byte, uintptr) []byte`},
@@ -149,9 +153,14 @@ func TestBuiltinSignatures(t *testing.T) {
}
}
+func parseGenericSrc(path, src string) (*syntax.File, error) {
+ errh := func(error) {} // dummy error handler so that parsing continues in presence of errors
+ return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), errh, nil, syntax.AllowGenerics)
+}
+
func testBuiltinSignature(t *testing.T, name, src0, want string) {
- src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _() { %s }`, src0)
- f, err := parseSrc("", src)
+ src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _[P any]() { %s }`, src0)
+ f, err := parseGenericSrc("", src)
if err != nil {
t.Errorf("%s: %s", src0, err)
return
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index 6d149340b2..049d80dd9e 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -26,7 +26,7 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
// check number of type arguments (got) vs number of type parameters (want)
sig := x.typ.(*Signature)
- got, want := len(targs), len(sig.tparams)
+ got, want := len(targs), sig.TParams().Len()
if !useConstraintTypeInference && got != want || got > want {
check.errorf(xlist[got-1], "got %d type arguments but want %d", got, want)
x.mode = invalid
@@ -37,7 +37,7 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
// if we don't have enough type arguments, try type inference
inferred := false
if got < want {
- targs = check.infer(inst.Pos(), sig.tparams, targs, nil, nil, true)
+ targs = check.infer(inst.Pos(), sig.TParams().list(), targs, nil, nil, true)
if targs == nil {
// error was already reported
x.mode = invalid
@@ -56,8 +56,8 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
}
// instantiate function signature
- res := check.instantiate(x.Pos(), sig, targs, poslist).(*Signature)
- assert(res.tparams == nil) // signature is not generic anymore
+ res := check.Instantiate(x.Pos(), sig, targs, poslist, true).(*Signature)
+ assert(res.TParams().Len() == 0) // signature is not generic anymore
if inferred {
check.recordInferred(inst, targs, res)
}
@@ -99,7 +99,6 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
check.expr(x, call.ArgList[0])
if x.mode != invalid {
if t := asInterface(T); t != nil {
- check.completeInterface(nopos, t)
if t.IsConstraint() {
check.errorf(call, "cannot use interface %s in conversion (contains type list or is comparable)", T)
break
@@ -156,7 +155,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
assert(len(targs) == len(xlist))
// check number of type arguments (got) vs number of type parameters (want)
- got, want := len(targs), len(sig.tparams)
+ got, want := len(targs), sig.TParams().Len()
if got > want {
check.errorf(xlist[want], "got %d type arguments but want %d", got, want)
check.use(call.ArgList...)
@@ -190,7 +189,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
// if type inference failed, a parametrized result must be invalidated
// (operands cannot have a parametrized type)
- if x.mode == value && len(sig.tparams) > 0 && isParameterized(sig.tparams, x.typ) {
+ if x.mode == value && sig.TParams().Len() > 0 && isParameterized(sig.TParams().list(), x.typ) {
x.mode = invalid
}
@@ -318,24 +317,24 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
}
// infer type arguments and instantiate signature if necessary
- if len(sig.tparams) > 0 {
+ if sig.TParams().Len() > 0 {
// TODO(gri) provide position information for targs so we can feed
// it to the instantiate call for better error reporting
- targs = check.infer(call.Pos(), sig.tparams, targs, sigParams, args, true)
+ targs := check.infer(call.Pos(), sig.TParams().list(), targs, sigParams, args, true)
if targs == nil {
return // error already reported
}
// compute result signature
- rsig = check.instantiate(call.Pos(), sig, targs, nil).(*Signature)
- assert(rsig.tparams == nil) // signature is not generic anymore
+ rsig = check.Instantiate(call.Pos(), sig, targs, nil, true).(*Signature)
+ assert(rsig.TParams().Len() == 0) // signature is not generic anymore
check.recordInferred(call, targs, rsig)
// Optimization: Only if the parameter list was adjusted do we
// need to compute it from the adjusted list; otherwise we can
// simply use the result signature's parameter list.
if adjusted {
- sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.tparams, targs)).(*Tuple)
+ sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TParams().list(), targs)).(*Tuple)
} else {
sigParams = rsig.params
}
@@ -468,7 +467,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
check.instantiatedOperand(x)
- obj, index, indirect = check.lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
+ obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
if obj == nil {
switch {
case index != nil:
@@ -480,11 +479,10 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
var why string
if tpar := asTypeParam(x.typ); tpar != nil {
// Type parameter bounds don't specify fields, so don't mention "field".
- switch obj := tpar.Bound().obj.(type) {
- case nil:
+ if tname := tpar.iface().obj; tname != nil {
+ why = check.sprintf("interface %s has no method %s", tname.name, sel)
+ } else {
why = check.sprintf("type bound for %s has no method %s", x.typ, sel)
- case *TypeName:
- why = check.sprintf("interface %s has no method %s", obj.name, sel)
}
} else {
why = check.sprintf("type %s has no field or method %s", x.typ, sel)
@@ -498,7 +496,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
} else {
changeCase = string(unicode.ToUpper(r)) + sel[1:]
}
- if obj, _, _ = check.lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
+ if obj, _, _ = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
why += ", but does have " + changeCase
}
}
@@ -518,7 +516,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
// the signature accordingly.
// TODO(gri) factor this code out
sig := m.typ.(*Signature)
- if len(sig.rparams) > 0 {
+ if sig.RParams().Len() > 0 {
// For inference to work, we must use the receiver type
// matching the receiver in the actual method declaration.
// If the method is embedded, the matching receiver is the
@@ -547,7 +545,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
// the receiver type arguments here, the receiver must be be otherwise invalid
// and an error has been reported elsewhere.
arg := operand{mode: variable, expr: x.expr, typ: recv}
- targs := check.infer(m.pos, sig.rparams, nil, NewTuple(sig.recv), []*operand{&arg}, false /* no error reporting */)
+ targs := check.infer(m.pos, sig.RParams().list(), nil, NewTuple(sig.recv), []*operand{&arg}, false /* no error reporting */)
//check.dump("### inferred targs = %s", targs)
if targs == nil {
// We may reach here if there were other errors (see issue #40056).
@@ -557,7 +555,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
// (If we modify m, some tests will fail; possibly because the m is in use.)
// TODO(gri) investigate and provide a correct explanation here
copy := *m
- copy.typ = check.subst(e.Pos(), m.typ, makeSubstMap(sig.rparams, targs))
+ copy.typ = check.subst(e.Pos(), m.typ, makeSubstMap(sig.RParams().list(), targs))
obj = &copy
}
// TODO(gri) we also need to do substitution for parameterized interface methods
@@ -576,17 +574,37 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
- // the receiver type becomes the type of the first function
- // argument of the method expression's function type
- var params []*Var
sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, "illegal cycle in method declaration")
+ goto Error
+ }
+
+ // The receiver type becomes the type of the first function
+ // argument of the method expression's function type.
+ var params []*Var
if sig.params != nil {
params = sig.params.vars
}
+ // Be consistent about named/unnamed parameters. This is not needed
+ // for type-checking, but the newly constructed signature may appear
+ // in an error message and then have mixed named/unnamed parameters.
+ // (An alternative would be to not print parameter names in errors,
+ // but it's useful to see them; this is cheap and method expressions
+ // are rare.)
+ name := ""
+ if len(params) > 0 && params[0].name != "" {
+ // name needed
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
x.mode = value
x.typ = &Signature{
tparams: sig.tparams,
- params: NewTuple(append([]*Var{NewVar(nopos, check.pkg, "_", x.typ)}, params...)...),
+ params: NewTuple(params...),
results: sig.results,
variadic: sig.variadic,
}
diff --git a/src/cmd/compile/internal/types2/chan.go b/src/cmd/compile/internal/types2/chan.go
new file mode 100644
index 0000000000..77650dfb09
--- /dev/null
+++ b/src/cmd/compile/internal/types2/chan.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Chan represents a channel type.
+type Chan struct {
+ dir ChanDir
+ elem Type
+}
+
+// A ChanDir value indicates a channel direction.
+type ChanDir int
+
+// The direction of a channel is indicated by one of these constants.
+const (
+ SendRecv ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// NewChan returns a new channel type for the given direction and element type.
+func NewChan(dir ChanDir, elem Type) *Chan {
+ return &Chan{dir: dir, elem: elem}
+}
+
+// Dir returns the direction of channel c.
+func (c *Chan) Dir() ChanDir { return c.dir }
+
+// Elem returns the element type of channel c.
+func (c *Chan) Elem() Type { return c.elem }
+
+func (c *Chan) Underlying() Type { return c }
+func (c *Chan) String() string { return TypeString(c, nil) }
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index 8d6cd1edab..6bc965c497 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -71,7 +71,7 @@ type importKey struct {
// A dotImportKey describes a dot-imported object in the given scope.
type dotImportKey struct {
scope *Scope
- obj Object
+ name string
}
// A Checker maintains the state of the type checker.
@@ -82,11 +82,11 @@ type Checker struct {
conf *Config
pkg *Package
*Info
- version version // accepted language version
- objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
- impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
- posMap map[*Interface][]syntax.Pos // maps interface types to lists of embedded interface positions
- typMap map[string]*Named // maps an instantiated named type hash to a *Named type
+ version version // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
+ objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
+ impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
+ typMap map[string]*Named // maps an instantiated named type hash to a *Named type
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
@@ -188,7 +188,6 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
- posMap: make(map[*Interface][]syntax.Pos),
typMap: make(map[string]*Named),
}
}
@@ -283,11 +282,6 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) {
print("== recordUntyped ==")
check.recordUntyped()
- if check.Info != nil {
- print("== sanitizeInfo ==")
- sanitizeInfo(check.Info)
- }
-
check.pkg.complete = true
// no longer needed - release memory
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
index 30201e2b7f..6c26a4c446 100644
--- a/src/cmd/compile/internal/types2/conversions.go
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -93,7 +93,7 @@ func (x *operand) convertibleTo(check *Checker, T Type) bool {
V := x.typ
Vu := under(V)
Tu := under(T)
- if check.identicalIgnoreTags(Vu, Tu) {
+ if IdenticalIgnoreTags(Vu, Tu) {
return true
}
@@ -101,7 +101,7 @@ func (x *operand) convertibleTo(check *Checker, T Type) bool {
// have identical underlying types if tags are ignored"
if V, ok := V.(*Pointer); ok {
if T, ok := T.(*Pointer); ok {
- if check.identicalIgnoreTags(under(V.base), under(T.base)) {
+ if IdenticalIgnoreTags(under(V.base), under(T.base)) {
return true
}
}
@@ -142,7 +142,7 @@ func (x *operand) convertibleTo(check *Checker, T Type) bool {
if s := asSlice(V); s != nil {
if p := asPointer(T); p != nil {
if a := asArray(p.Elem()); a != nil {
- if check.identical(s.Elem(), a.Elem()) {
+ if Identical(s.Elem(), a.Elem()) {
if check == nil || check.allowVersion(check.pkg, 1, 17) {
return true
}
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 1333e4c0ec..bfccbc5dbf 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -317,6 +317,8 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
}
case *Named:
+ t.expand()
+
// don't touch the type if it is from a different package or the Universe scope
// (doing so would lead to a race condition - was issue #35049)
if t.obj.pkg != check.pkg {
@@ -338,7 +340,7 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
// cycle detected
for i, tn := range path {
if t.obj.pkg != check.pkg {
- panic("internal error: type cycle via package-external type")
+ panic("type cycle via package-external type")
}
if tn == t.obj {
check.cycleError(path[i:])
@@ -346,12 +348,9 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
return t.info
}
}
- panic("internal error: cycle start not found")
+ panic("cycle start not found")
}
return t.info
-
- case *instance:
- return check.validType(t.expand(), path)
}
return valid
@@ -515,75 +514,6 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) {
check.initVars(lhs, []syntax.Expr{init}, nopos)
}
-// under returns the expanded underlying type of n0; possibly by following
-// forward chains of named types. If an underlying type is found, resolve
-// the chain by setting the underlying type for each defined type in the
-// chain before returning it. If no underlying type is found or a cycle
-// is detected, the result is Typ[Invalid]. If a cycle is detected and
-// n0.check != nil, the cycle is reported.
-func (n0 *Named) under() Type {
- u := n0.underlying
- if u == nil {
- return Typ[Invalid]
- }
-
- // If the underlying type of a defined type is not a defined
- // type, then that is the desired underlying type.
- n := asNamed(u)
- if n == nil {
- return u // common case
- }
-
- // Otherwise, follow the forward chain.
- seen := map[*Named]int{n0: 0}
- path := []Object{n0.obj}
- for {
- u = n.underlying
- if u == nil {
- u = Typ[Invalid]
- break
- }
- n1 := asNamed(u)
- if n1 == nil {
- break // end of chain
- }
-
- seen[n] = len(seen)
- path = append(path, n.obj)
- n = n1
-
- if i, ok := seen[n]; ok {
- // cycle
- // TODO(gri) revert this to a method on Checker. Having a possibly
- // nil Checker on Named and TypeParam is too subtle.
- if n0.check != nil {
- n0.check.cycleError(path[i:])
- }
- u = Typ[Invalid]
- break
- }
- }
-
- for n := range seen {
- // We should never have to update the underlying type of an imported type;
- // those underlying types should have been resolved during the import.
- // Also, doing so would lead to a race condition (was issue #31749).
- // Do this check always, not just in debug more (it's cheap).
- if n0.check != nil && n.obj.pkg != n0.check.pkg {
- panic("internal error: imported type with unresolved underlying type")
- }
- n.underlying = u
- }
-
- return u
-}
-
-func (n *Named) setUnderlying(typ Type) {
- if n != nil {
- n.underlying = typ
- }
-}
-
func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named) {
assert(obj.typ == nil)
@@ -599,8 +529,8 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
alias = false
}
+ // alias declaration
if alias {
- // type alias declaration
if !check.allowVersion(check.pkg, 1, 9) {
if check.conf.CompilerErrorMessages {
check.error(tdecl, "type aliases only supported as of -lang=go1.9")
@@ -611,111 +541,94 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
obj.typ = Typ[Invalid]
obj.typ = check.anyType(tdecl.Type)
+ return
+ }
- } else {
- // defined type declaration
-
- named := check.newNamed(obj, nil, nil, nil, nil)
- def.setUnderlying(named)
-
- if tdecl.TParamList != nil {
- check.openScope(tdecl, "type parameters")
- defer check.closeScope()
- named.tparams = check.collectTypeParams(tdecl.TParamList)
- }
-
- // determine underlying type of named
- named.fromRHS = check.definedType(tdecl.Type, named)
+ // type definition or generic type declaration
+ named := check.newNamed(obj, nil, nil, nil, nil)
+ def.setUnderlying(named)
- // The underlying type of named may be itself a named type that is
- // incomplete:
- //
- // type (
- // A B
- // B *C
- // C A
- // )
- //
- // The type of C is the (named) type of A which is incomplete,
- // and which has as its underlying type the named type B.
- // Determine the (final, unnamed) underlying type by resolving
- // any forward chain.
- // TODO(gri) Investigate if we can just use named.fromRHS here
- // and rely on lazy computation of the underlying type.
- named.underlying = under(named)
+ if tdecl.TParamList != nil {
+ check.openScope(tdecl, "type parameters")
+ defer check.closeScope()
+ named.tparams = check.collectTypeParams(tdecl.TParamList)
}
+ // determine underlying type of named
+ named.fromRHS = check.definedType(tdecl.Type, named)
+ assert(named.fromRHS != nil)
+ // The underlying type of named may be itself a named type that is
+ // incomplete:
+ //
+ // type (
+ // A B
+ // B *C
+ // C A
+ // )
+ //
+ // The type of C is the (named) type of A which is incomplete,
+ // and which has as its underlying type the named type B.
+ // Determine the (final, unnamed) underlying type by resolving
+ // any forward chain.
+ // TODO(gri) Investigate if we can just use named.fromRHS here
+ // and rely on lazy computation of the underlying type.
+ named.underlying = under(named)
+
+ // If the RHS is a type parameter, it must be from this type declaration.
+ if tpar, _ := named.underlying.(*TypeParam); tpar != nil && tparamIndex(named.TParams().list(), tpar) < 0 {
+ check.errorf(tdecl.Type, "cannot use function type parameter %s as RHS in type declaration", tpar)
+ named.underlying = Typ[Invalid]
+ }
}
-func (check *Checker) collectTypeParams(list []*syntax.Field) (tparams []*TypeName) {
- // Type parameter lists should not be empty. The parser will
- // complain but we still may get an incorrect AST: ignore it.
- if len(list) == 0 {
- return
- }
+func (check *Checker) collectTypeParams(list []*syntax.Field) *TypeParams {
+ tparams := make([]*TypeName, len(list))
- // Declare type parameters up-front, with empty interface as type bound.
+ // Declare type parameters up-front.
// The scope of type parameters starts at the beginning of the type parameter
- // list (so we can have mutually recursive parameterized interfaces).
- for _, f := range list {
- tparams = check.declareTypeParam(tparams, f.Name)
+ // list (so we can have mutually recursive parameterized type bounds).
+ for i, f := range list {
+ tparams[i] = check.declareTypeParam(f.Name)
}
var bound Type
- for i, j := 0, 0; i < len(list); i = j {
- f := list[i]
-
- // determine the range of type parameters list[i:j] with identical type bound
- // (declared as in (type a, b, c B))
- j = i + 1
- for j < len(list) && list[j].Type == f.Type {
- j++
- }
-
- // this should never be the case, but be careful
- if f.Type == nil {
- continue
- }
-
- // The predeclared identifier "any" is visible only as a constraint
- // in a type parameter list. Look for it before general constraint
- // resolution.
- if tident, _ := unparen(f.Type).(*syntax.Name); tident != nil && tident.Value == "any" && check.lookup("any") == nil {
- bound = universeAny
- } else {
- bound = check.typ(f.Type)
- }
-
- // type bound must be an interface
- // TODO(gri) We should delay the interface check because
- // we may not have a complete interface yet:
- // type C(type T C) interface {}
- // (issue #39724).
- if _, ok := under(bound).(*Interface); ok {
- // set the type bounds
- for i < j {
- tparams[i].typ.(*TypeParam).bound = bound
- i++
- }
- } else if bound != Typ[Invalid] {
- check.errorf(f.Type, "%s is not an interface", bound)
+ for i, f := range list {
+ // Optimization: Re-use the previous type bound if it hasn't changed.
+ // This also preserves the grouped output of type parameter lists
+ // when printing type strings.
+ if i == 0 || f.Type != list[i-1].Type {
+ bound = check.boundType(f.Type)
}
+ tparams[i].typ.(*TypeParam).bound = bound
}
- return
+ return bindTParams(tparams)
}
-func (check *Checker) declareTypeParam(tparams []*TypeName, name *syntax.Name) []*TypeName {
+func (check *Checker) declareTypeParam(name *syntax.Name) *TypeName {
tpar := NewTypeName(name.Pos(), check.pkg, name.Value, nil)
- check.NewTypeParam(tpar, len(tparams), &emptyInterface) // assigns type to tpar as a side-effect
+ check.NewTypeParam(tpar, nil) // assigns type to tpar as a side-effect
check.declare(check.scope, name, tpar, check.scope.pos) // TODO(gri) check scope position
- tparams = append(tparams, tpar)
+ return tpar
+}
- if check.conf.Trace {
- check.trace(name.Pos(), "type param = %v", tparams[len(tparams)-1])
+// boundType type-checks the type expression e and returns its type, or Typ[Invalid].
+// The type must be an interface, including the predeclared type "any".
+func (check *Checker) boundType(e syntax.Expr) Type {
+ // The predeclared identifier "any" is visible only as a type bound in a type parameter list.
+ // If we allow "any" for general use, this if-statement can be removed (issue #33232).
+ if name, _ := unparen(e).(*syntax.Name); name != nil && name.Value == "any" && check.lookup("any") == universeAny {
+ return universeAny.Type()
}
- return tparams
+ bound := check.typ(e)
+ check.later(func() {
+ u := under(bound)
+ if _, ok := u.(*Interface); !ok && u != Typ[Invalid] {
+ check.errorf(e, "%s is not an interface", bound)
+ }
+ })
+ return bound
}
func (check *Checker) collectMethods(obj *TypeName) {
@@ -737,7 +650,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
// and field names must be distinct."
base := asNamed(obj.typ) // shouldn't fail but be conservative
if base != nil {
- if t, _ := base.underlying.(*Struct); t != nil {
+ if t, _ := base.Underlying().(*Struct); t != nil {
for _, fld := range t.fields {
if fld.name != "_" {
assert(mset.insert(fld) == nil)
@@ -779,6 +692,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
}
if base != nil {
+ base.load() // TODO(mdempsky): Probably unnecessary.
base.methods = append(base.methods, m)
}
}
@@ -805,6 +719,10 @@ func (check *Checker) funcDecl(obj *Func, decl *declInfo) {
check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type)
obj.color_ = saved
+ if len(fdecl.TParamList) > 0 && fdecl.Body == nil {
+ check.softErrorf(fdecl, "parameterized function is missing function body")
+ }
+
// function body must be type-checked after global declarations
// (functions implemented elsewhere have no body)
if !check.conf.IgnoreFuncBodies && fdecl.Body != nil {
diff --git a/src/cmd/compile/internal/types2/errorcalls_test.go b/src/cmd/compile/internal/types2/errorcalls_test.go
index 28bb33aaff..80b05f9f0f 100644
--- a/src/cmd/compile/internal/types2/errorcalls_test.go
+++ b/src/cmd/compile/internal/types2/errorcalls_test.go
@@ -18,7 +18,7 @@ func TestErrorCalls(t *testing.T) {
}
for _, file := range files {
- syntax.Walk(file, func(n syntax.Node) bool {
+ syntax.Crawl(file, func(n syntax.Node) bool {
call, _ := n.(*syntax.CallExpr)
if call == nil {
return false
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
index af4ecb2300..a68273271b 100644
--- a/src/cmd/compile/internal/types2/errors.go
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -88,7 +88,7 @@ func sprintf(qf Qualifier, format string, args ...interface{}) string {
case nil:
arg = "<nil>"
case operand:
- panic("internal error: should always pass *operand")
+ panic("got operand instead of *operand")
case *operand:
arg = operandString(a, qf)
case syntax.Pos:
@@ -111,7 +111,7 @@ func (check *Checker) qualifier(pkg *Package) string {
if check.pkgPathMap == nil {
check.pkgPathMap = make(map[string]map[string]bool)
check.seenPkgMap = make(map[*Package]bool)
- check.markImports(pkg)
+ check.markImports(check.pkg)
}
// If the same package name was used by multiple packages, display the full path.
if len(check.pkgPathMap[pkg.name]) > 1 {
@@ -148,7 +148,7 @@ func (check *Checker) sprintf(format string, args ...interface{}) string {
func (check *Checker) report(err *error_) {
if err.empty() {
- panic("internal error: reporting no error")
+ panic("no error to report")
}
check.err(err.pos(), err.msg(check.qualifier), err.soft)
}
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index 23b79656bb..3c2b10cd7e 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -157,6 +157,14 @@ var op2str2 = [...]string{
syntax.Shl: "shift",
}
+func underIs(typ Type, f func(Type) bool) bool {
+ u := under(typ)
+ if tpar, _ := u.(*TypeParam); tpar != nil {
+ return tpar.underIs(f)
+ }
+ return f(u)
+}
+
func (check *Checker) unary(x *operand, e *syntax.Operation) {
check.expr(x, e.X)
if x.mode == invalid {
@@ -177,19 +185,29 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) {
return
case syntax.Recv:
- typ := asChan(x.typ)
- if typ == nil {
- check.errorf(x, invalidOp+"cannot receive from non-channel %s", x)
- x.mode = invalid
- return
- }
- if typ.dir == SendOnly {
- check.errorf(x, invalidOp+"cannot receive from send-only channel %s", x)
+ var elem Type
+ if !underIs(x.typ, func(u Type) bool {
+ ch, _ := u.(*Chan)
+ if ch == nil {
+ check.errorf(x, invalidOp+"cannot receive from non-channel %s", x)
+ return false
+ }
+ if ch.dir == SendOnly {
+ check.errorf(x, invalidOp+"cannot receive from send-only channel %s", x)
+ return false
+ }
+ if elem != nil && !Identical(ch.elem, elem) {
+ check.errorf(x, invalidOp+"channels of %s must have the same element type", x)
+ return false
+ }
+ elem = ch.elem
+ return true
+ }) {
x.mode = invalid
return
}
x.mode = commaok
- x.typ = typ.elem
+ x.typ = elem
check.hasCallOrRecv = true
return
}
@@ -664,7 +682,6 @@ func (check *Checker) convertUntyped(x *operand, target Type) {
// If x is a constant operand, the returned constant.Value will be the
// representation of x in this context.
func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, errorCode) {
- target = expand(target)
if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
return x.typ, nil, 0
}
@@ -691,7 +708,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
return nil, nil, _InvalidUntypedConversion
}
- switch t := optype(target).(type) {
+ switch t := under(target).(type) {
case *Basic:
if x.mode == constant_ {
v, code := check.representation(x, t)
@@ -723,8 +740,8 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
default:
return nil, nil, _InvalidUntypedConversion
}
- case *Sum:
- ok := t.is(func(t Type) bool {
+ case *TypeParam:
+ ok := t.underIs(func(t Type) bool {
target, _, _ := check.implicitTypeAndValue(x, t)
return target != nil
})
@@ -735,7 +752,6 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
// Update operand types to the default type rather than the target
// (interface) type: values must have concrete dynamic types.
// Untyped nil was handled upfront.
- check.completeInterface(nopos, t)
if !t.Empty() {
return nil, nil, _InvalidUntypedConversion // cannot assign untyped values to non-empty interfaces
}
@@ -972,14 +988,28 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op
return
}
- check.convertUntyped(x, y.typ)
- if x.mode == invalid {
- return
+ canMix := func(x, y *operand) bool {
+ if IsInterface(x.typ) || IsInterface(y.typ) {
+ return true
+ }
+ if isBoolean(x.typ) != isBoolean(y.typ) {
+ return false
+ }
+ if isString(x.typ) != isString(y.typ) {
+ return false
+ }
+ return true
}
- check.convertUntyped(&y, x.typ)
- if y.mode == invalid {
- x.mode = invalid
- return
+ if canMix(x, &y) {
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
}
if isComparison(op) {
@@ -987,7 +1017,7 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op
return
}
- if !check.identical(x.typ, y.typ) {
+ if !Identical(x.typ, y.typ) {
// only report an error if we have valid types
// (otherwise we had an error reported elsewhere already)
if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
@@ -1184,7 +1214,7 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
goto Error
}
- switch utyp := optype(base).(type) {
+ switch utyp := under(base).(type) {
case *Struct:
if len(e.ElemList) == 0 {
break
@@ -1316,7 +1346,7 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
xkey := keyVal(x.val)
if asInterface(utyp.key) != nil {
for _, vtyp := range visited[xkey] {
- if check.identical(vtyp, x.typ) {
+ if Identical(vtyp, x.typ) {
duplicate = true
break
}
@@ -1448,13 +1478,24 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
case typexpr:
x.typ = &Pointer{base: x.typ}
default:
- if typ := asPointer(x.typ); typ != nil {
- x.mode = variable
- x.typ = typ.base
- } else {
- check.errorf(x, invalidOp+"cannot indirect %s", x)
+ var base Type
+ if !underIs(x.typ, func(u Type) bool {
+ p, _ := u.(*Pointer)
+ if p == nil {
+ check.errorf(x, invalidOp+"cannot indirect %s", x)
+ return false
+ }
+ if base != nil && !Identical(p.base, base) {
+ check.errorf(x, invalidOp+"pointers of %s must have identical base types", x)
+ return false
+ }
+ base = p.base
+ return true
+ }) {
goto Error
}
+ x.mode = variable
+ x.typ = base
}
break
}
@@ -1537,7 +1578,7 @@ func (check *Checker) typeAssertion(pos syntax.Pos, x *operand, xtyp *Interface,
}
var msg string
if wrongType != nil {
- if check.identical(method.typ, wrongType.typ) {
+ if Identical(method.typ, wrongType.typ) {
msg = fmt.Sprintf("missing method %s (%s has pointer receiver)", method.name, method.name)
} else {
msg = fmt.Sprintf("wrong type for method %s (have %s, want %s)", method.name, wrongType.typ, method.typ)
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index c94017a8fb..e8755a1a68 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -32,7 +32,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
return false
case value:
- if sig := asSignature(x.typ); sig != nil && len(sig.tparams) > 0 {
+ if sig := asSignature(x.typ); sig != nil && sig.TParams().Len() > 0 {
// function instantiation
return true
}
@@ -41,7 +41,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
// ordinary index expression
valid := false
length := int64(-1) // valid if >= 0
- switch typ := optype(x.typ).(type) {
+ switch typ := under(x.typ).(type) {
case *Basic:
if isString(typ) {
valid = true
@@ -80,7 +80,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
index := check.singleIndex(e)
if index == nil {
x.mode = invalid
- return
+ return false
}
var key operand
check.expr(&key, index)
@@ -89,87 +89,80 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
x.mode = mapindex
x.typ = typ.elem
x.expr = e
- return
+ return false
- case *Sum:
- // A sum type can be indexed if all of the sum's types
- // support indexing and have the same index and element
- // type. Special rules apply for maps in the sum type.
- var tkey, telem Type // key is for map types only
- nmaps := 0 // number of map types in sum type
- if typ.is(func(t Type) bool {
- var e Type
- switch t := under(t).(type) {
+ case *TypeParam:
+ // TODO(gri) report detailed failure cause for better error messages
+ var tkey, telem Type // tkey != nil if we have maps
+ if typ.underIs(func(u Type) bool {
+ var key, elem Type
+ alen := int64(-1) // valid if >= 0
+ switch t := u.(type) {
case *Basic:
- if isString(t) {
- e = universeByte
+ if !isString(t) {
+ return false
}
+ elem = universeByte
case *Array:
- e = t.elem
+ elem = t.elem
+ alen = t.len
case *Pointer:
- if t := asArray(t.base); t != nil {
- e = t.elem
+ a, _ := under(t.base).(*Array)
+ if a == nil {
+ return false
}
+ elem = a.elem
+ alen = a.len
case *Slice:
- e = t.elem
+ elem = t.elem
case *Map:
- // If there are multiple maps in the sum type,
- // they must have identical key types.
- // TODO(gri) We may be able to relax this rule
- // but it becomes complicated very quickly.
- if tkey != nil && !Identical(t.key, tkey) {
+ key = t.key
+ elem = t.elem
+ default:
+ return false
+ }
+ assert(elem != nil)
+ if telem == nil {
+ // first type
+ tkey, telem = key, elem
+ length = alen
+ } else {
+ // all map keys must be identical (incl. all nil)
+ if !Identical(key, tkey) {
return false
}
- tkey = t.key
- e = t.elem
- nmaps++
- case *TypeParam:
- check.errorf(x, "type of %s contains a type parameter - cannot index (implementation restriction)", x)
- case *instance:
- panic("unimplemented")
- }
- if e == nil || telem != nil && !Identical(e, telem) {
- return false
+ // all element types must be identical
+ if !Identical(elem, telem) {
+ return false
+ }
+ tkey, telem = key, elem
+ // track the minimal length for arrays
+ if alen >= 0 && alen < length {
+ length = alen
+ }
}
- telem = e
return true
}) {
- // If there are maps, the index expression must be assignable
- // to the map key type (as for simple map index expressions).
- if nmaps > 0 {
+ // For maps, the index expression must be assignable to the map key type.
+ if tkey != nil {
index := check.singleIndex(e)
if index == nil {
x.mode = invalid
- return
+ return false
}
var key operand
check.expr(&key, index)
check.assignment(&key, tkey, "map index")
// ok to continue even if indexing failed - map element type is known
-
- // If there are only maps, we are done.
- if nmaps == len(typ.types) {
- x.mode = mapindex
- x.typ = telem
- x.expr = e
- return
- }
-
- // Otherwise we have mix of maps and other types. For
- // now we require that the map key be an integer type.
- // TODO(gri) This is probably not good enough.
- valid = isInteger(tkey)
- // avoid 2nd indexing error if indexing failed above
- if !valid && key.mode == invalid {
- x.mode = invalid
- return
- }
- x.mode = value // map index expressions are not addressable
- } else {
- // no maps
- valid = true
- x.mode = variable
+ x.mode = mapindex
+ x.typ = telem
+ x.expr = e
+ return false
}
+
+ // no maps
+ valid = true
+ x.mode = variable
x.typ = telem
}
}
@@ -177,13 +170,13 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
if !valid {
check.errorf(x, invalidOp+"cannot index %s", x)
x.mode = invalid
- return
+ return false
}
index := check.singleIndex(e)
if index == nil {
x.mode = invalid
- return
+ return false
}
// In pathological (invalid) cases (e.g.: type T1 [][[]T1{}[0][0]]T0)
@@ -206,7 +199,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
valid := false
length := int64(-1) // valid if >= 0
- switch typ := optype(x.typ).(type) {
+ switch typ := under(x.typ).(type) {
case *Basic:
if isString(typ) {
if e.Full {
@@ -246,7 +239,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
valid = true
// x.typ doesn't change
- case *Sum, *TypeParam:
+ case *TypeParam:
check.error(x, "generic slice expressions not yet implemented")
x.mode = invalid
return
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index f37d7f6477..ff4bb3ea17 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -83,7 +83,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeName, targs []Type, p
// Substitute type arguments for their respective type parameters in params,
// if any. Note that nil targs entries are ignored by check.subst.
- // TODO(gri) Can we avoid this (we're setting known type argumemts below,
+ // TODO(gri) Can we avoid this (we're setting known type arguments below,
// but that doesn't impact the isParameterized check for now).
if params.Len() > 0 {
smap := makeSubstMap(tparams, targs)
@@ -94,7 +94,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeName, targs []Type, p
// Unify parameter and argument types for generic parameters with typed arguments
// and collect the indices of generic parameters with untyped arguments.
// Terminology: generic parameter = function parameter with a type-parameterized type
- u := newUnifier(check, false)
+ u := newUnifier(false)
u.x.init(tparams)
// Set the type arguments which we know already.
@@ -280,7 +280,7 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
}()
switch t := typ.(type) {
- case nil, *Basic: // TODO(gri) should nil be handled here?
+ case nil, *top, *Basic: // TODO(gri) should nil be handled here?
break
case *Array:
@@ -307,9 +307,6 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
}
}
- case *Sum:
- return w.isParameterizedList(t.types)
-
case *Signature:
// t.tparams may not be nil if we are looking at a signature
// of a generic function type (or an interface method) that is
@@ -321,24 +318,15 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return w.isParameterized(t.params) || w.isParameterized(t.results)
case *Interface:
- if t.allMethods != nil {
- // interface is complete - quick test
- for _, m := range t.allMethods {
- if w.isParameterized(m.typ) {
- return true
- }
+ tset := t.typeSet()
+ for _, m := range tset.methods {
+ if w.isParameterized(m.typ) {
+ return true
}
- return w.isParameterizedList(unpack(t.allTypes))
}
-
- return t.iterate(func(t *Interface) bool {
- for _, m := range t.methods {
- if w.isParameterized(m.typ) {
- return true
- }
- }
- return w.isParameterizedList(unpack(t.types))
- }, nil)
+ return tset.is(func(t *term) bool {
+ return w.isParameterized(t.typ)
+ })
case *Map:
return w.isParameterized(t.key) || w.isParameterized(t.elem)
@@ -347,15 +335,12 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return w.isParameterized(t.elem)
case *Named:
- return w.isParameterizedList(t.targs)
+ return w.isParameterizedTypeList(t.targs)
case *TypeParam:
// t must be one of w.tparams
return t.index < len(w.tparams) && w.tparams[t.index].typ == t
- case *instance:
- return w.isParameterizedList(t.targs)
-
default:
unreachable()
}
@@ -363,7 +348,7 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return false
}
-func (w *tpWalker) isParameterizedList(list []Type) bool {
+func (w *tpWalker) isParameterizedTypeList(list []Type) bool {
for _, t := range list {
if w.isParameterized(t) {
return true
@@ -385,7 +370,7 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
// Setup bidirectional unification between those structural bounds
// and the corresponding type arguments (which may be nil!).
- u := newUnifier(check, false)
+ u := newUnifier(false)
u.x.init(tparams)
u.y = u.x // type parameters between LHS and RHS of unification are identical
@@ -399,7 +384,7 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
// Unify type parameters with their structural constraints, if any.
for _, tpar := range tparams {
typ := tpar.typ.(*TypeParam)
- sbound := check.structuralType(typ.bound)
+ sbound := typ.structuralType()
if sbound != nil {
if !u.unify(typ, sbound) {
if report {
@@ -412,8 +397,8 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
// u.x.types() now contains the incoming type arguments plus any additional type
// arguments for which there were structural constraints. The newly inferred non-
- // nil entries may still contain references to other type parameters. For instance,
- // for [A any, B interface{type []C}, C interface{type *A}], if A == int
+ // nil entries may still contain references to other type parameters.
+ // For instance, for [A any, B interface{ []C }, C interface{ *A }], if A == int
// was given, unification produced the type list [int, []C, *A]. We eliminate the
// remaining type parameters by substituting the type parameters in this type list
// until nothing changes anymore.
@@ -472,16 +457,3 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
return
}
-
-// structuralType returns the structural type of a constraint, if any.
-func (check *Checker) structuralType(constraint Type) Type {
- if iface, _ := under(constraint).(*Interface); iface != nil {
- check.completeInterface(nopos, iface)
- types := unpack(iface.allTypes)
- if len(types) == 1 {
- return types[0]
- }
- return nil
- }
- return constraint
-}
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index 0df52e851c..0bb4ac956b 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// This file implements instantiation of generic types
+// through substitution of type parameters by type arguments.
+
package types2
import (
@@ -9,23 +12,26 @@ import (
"fmt"
)
-// Instantiate instantiates the type typ with the given type arguments.
-// typ must be a *Named or a *Signature type, it must be generic, and
-// its number of type parameters must match the number of provided type
-// arguments. The result is a new, instantiated (not generic) type of
-// the same kind (either a *Named or a *Signature). The type arguments
-// are not checked against the constraints of the type parameters.
+// Instantiate instantiates the type typ with the given type arguments
+// targs. To check type constraint satisfaction, verify must be set.
+// pos and posList correspond to the instantiation and type argument
+// positions respectively; posList may be nil or shorter than the number
+// of type arguments provided.
+// typ must be a *Named or a *Signature type, and its number of type
+// parameters must match the number of provided type arguments.
+// The receiver (check) may be nil if and only if verify is not set.
+// The result is a new, instantiated (not generic) type of the same kind
+// (either a *Named or a *Signature).
// Any methods attached to a *Named are simply copied; they are not
// instantiated.
-func Instantiate(pos syntax.Pos, typ Type, targs []Type) (res Type) {
- // TODO(gri) This code is basically identical to the prolog
- // in Checker.instantiate. Factor.
+func (check *Checker) Instantiate(pos syntax.Pos, typ Type, targs []Type, posList []syntax.Pos, verify bool) (res Type) {
+ // TODO(gri) What is better here: work with TypeParams, or work with TypeNames?
var tparams []*TypeName
switch t := typ.(type) {
case *Named:
- tparams = t.tparams
+ tparams = t.TParams().list()
case *Signature:
- tparams = t.tparams
+ tparams = t.TParams().list()
defer func() {
// If we had an unexpected failure somewhere don't panic below when
// asserting res.(*Signature). Check for *Signature in case Typ[Invalid]
@@ -44,20 +50,194 @@ func Instantiate(pos syntax.Pos, typ Type, targs []Type) (res Type) {
// anymore; we need to set tparams to nil.
res.(*Signature).tparams = nil
}()
-
default:
+ // only types and functions can be generic
panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
}
+ inst := check.instantiate(pos, typ, tparams, targs, posList)
+ if verify && len(tparams) == len(targs) {
+ check.verify(pos, tparams, targs, posList)
+ }
+ return inst
+}
+
+func (check *Checker) instantiate(pos syntax.Pos, typ Type, tparams []*TypeName, targs []Type, posList []syntax.Pos) (res Type) {
// the number of supplied types must match the number of type parameters
if len(targs) != len(tparams) {
+ // TODO(gri) provide better error message
+ if check != nil {
+ check.errorf(pos, "got %d arguments but %d type parameters", len(targs), len(tparams))
+ return Typ[Invalid]
+ }
panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, len(targs), len(tparams)))
}
+ if check != nil && check.conf.Trace {
+ check.trace(pos, "-- instantiating %s with %s", typ, typeListString(targs))
+ check.indent++
+ defer func() {
+ check.indent--
+ var under Type
+ if res != nil {
+ // Calling under() here may lead to endless instantiations.
+ // Test case: type T[P any] T[P]
+ // TODO(gri) investigate if that's a bug or to be expected.
+ under = res.Underlying()
+ }
+ check.trace(pos, "=> %s (under = %s)", res, under)
+ }()
+ }
+
+ assert(len(posList) <= len(targs))
+
if len(tparams) == 0 {
return typ // nothing to do (minor optimization)
}
+ return check.subst(pos, typ, makeSubstMap(tparams, targs))
+}
+
+// InstantiateLazy is like Instantiate, but avoids actually
+// instantiating the type until needed. typ must be a *Named
+// type.
+func (check *Checker) InstantiateLazy(pos syntax.Pos, typ Type, targs []Type, posList []syntax.Pos, verify bool) Type {
+ // Don't use asNamed here: we don't want to expand the base during lazy
+ // instantiation.
+ base := typ.(*Named)
+ if base == nil {
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ }
+
+ if verify && base.TParams().Len() == len(targs) {
+ check.later(func() {
+ check.verify(pos, base.tparams.list(), targs, posList)
+ })
+ }
+
+ h := instantiatedHash(base, targs)
+ if check != nil {
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ if named := check.typMap[h]; named != nil {
+ return named
+ }
+ }
+
+ tname := NewTypeName(pos, base.obj.pkg, base.obj.name, nil)
+ named := check.newNamed(tname, base, nil, nil, nil) // methods and tparams are set when named is loaded
+ named.targs = targs
+ named.instance = &instance{pos, posList}
+ if check != nil {
+ check.typMap[h] = named
+ }
+
+ return named
+}
+
+func (check *Checker) verify(pos syntax.Pos, tparams []*TypeName, targs []Type, posList []syntax.Pos) {
+ if check == nil {
+ panic("cannot have nil Checker if verifying constraints")
+ }
+
smap := makeSubstMap(tparams, targs)
- return (*Checker)(nil).subst(pos, typ, smap)
+ for i, tname := range tparams {
+ // best position for error reporting
+ pos := pos
+ if i < len(posList) {
+ pos = posList[i]
+ }
+
+ // stop checking bounds after the first failure
+ if !check.satisfies(pos, targs[i], tname.typ.(*TypeParam), smap) {
+ break
+ }
+ }
+}
+
+// satisfies reports whether the type argument targ satisfies the constraint of type parameter
+// parameter tpar (after any of its type parameters have been substituted through smap).
+// A suitable error is reported if the result is false.
+// TODO(gri) This should be a method of interfaces or type sets.
+func (check *Checker) satisfies(pos syntax.Pos, targ Type, tpar *TypeParam, smap *substMap) bool {
+ iface := tpar.iface()
+ if iface.Empty() {
+ return true // no type bound
+ }
+
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiate
+ // the parameterized type.
+ iface = check.subst(pos, iface, smap).(*Interface)
+
+ // if iface is comparable, targ must be comparable
+ // TODO(gri) the error messages needs to be better, here
+ if iface.IsComparable() && !Comparable(targ) {
+ if tpar := asTypeParam(targ); tpar != nil && tpar.iface().typeSet().IsAll() {
+ check.softErrorf(pos, "%s has no constraints", targ)
+ return false
+ }
+ check.softErrorf(pos, "%s does not satisfy comparable", targ)
+ return false
+ }
+
+ // targ must implement iface (methods)
+ // - check only if we have methods
+ if iface.NumMethods() > 0 {
+ // If the type argument is a pointer to a type parameter, the type argument's
+ // method set is empty.
+ // TODO(gri) is this what we want? (spec question)
+ if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
+ check.errorf(pos, "%s has no methods", targ)
+ return false
+ }
+ if m, wrong := check.missingMethod(targ, iface, true); m != nil {
+ // TODO(gri) needs to print updated name to avoid major confusion in error message!
+ // (print warning for now)
+ // Old warning:
+ // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
+ if wrong != nil {
+ // TODO(gri) This can still report uninstantiated types which makes the error message
+ // more difficult to read then necessary.
+ check.softErrorf(pos,
+ "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
+ targ, tpar.bound, wrong, m,
+ )
+ } else {
+ check.softErrorf(pos, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
+ }
+ return false
+ }
+ }
+
+ // targ's underlying type must also be one of the interface types listed, if any
+ if !iface.typeSet().hasTerms() {
+ return true // nothing to do
+ }
+
+ // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
+ // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
+ if targ := asTypeParam(targ); targ != nil {
+ targBound := targ.iface()
+ if !targBound.typeSet().hasTerms() {
+ check.softErrorf(pos, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
+ return false
+ }
+ if !targBound.typeSet().subsetOf(iface.typeSet()) {
+ // TODO(gri) need better error message
+ check.softErrorf(pos, "%s does not satisfy %s", targ, tpar.bound)
+ return false
+ }
+ return true
+ }
+
+ // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
+ if !iface.typeSet().includes(targ) {
+ // TODO(gri) better error message
+ check.softErrorf(pos, "%s does not satisfy %s", targ, tpar.bound)
+ return false
+ }
+
+ return true
}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
new file mode 100644
index 0000000000..2617f748de
--- /dev/null
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -0,0 +1,226 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// ----------------------------------------------------------------------------
+// API
+
+// An Interface represents an interface type.
+type Interface struct {
+ obj *TypeName // corresponding declared object; or nil (for better error messages)
+ methods []*Func // ordered list of explicitly declared methods
+ embeddeds []Type // ordered list of explicitly embedded elements
+ embedPos *[]syntax.Pos // positions of embedded elements; or nil (for error messages) - use pointer to save space
+ complete bool // indicates that all fields (except for tset) are set up
+
+ tset *TypeSet // type set described by this interface, computed lazily
+}
+
+// typeSet returns the type set for interface t.
+func (t *Interface) typeSet() *TypeSet { return computeInterfaceTypeSet(nil, nopos, t) }
+
+// emptyInterface represents the empty interface
+var emptyInterface = Interface{complete: true, tset: &topTypeSet}
+
+// NewInterface returns a new interface for the given methods and embedded types.
+// NewInterface takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
+//
+// Deprecated: Use NewInterfaceType instead which allows arbitrary embedded types.
+func NewInterface(methods []*Func, embeddeds []*Named) *Interface {
+ tnames := make([]Type, len(embeddeds))
+ for i, t := range embeddeds {
+ tnames[i] = t
+ }
+ return NewInterfaceType(methods, tnames)
+}
+
+// NewInterfaceType returns a new interface for the given methods and embedded types.
+// NewInterfaceType takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
+func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
+ if len(methods) == 0 && len(embeddeds) == 0 {
+ return &emptyInterface
+ }
+
+ // set method receivers if necessary
+ typ := new(Interface)
+ for _, m := range methods {
+ if sig := m.typ.(*Signature); sig.recv == nil {
+ sig.recv = NewVar(m.pos, m.pkg, "", typ)
+ }
+ }
+
+ // sort for API stability
+ sortMethods(methods)
+
+ typ.methods = methods
+ typ.embeddeds = embeddeds
+ typ.complete = true
+
+ return typ
+}
+
+// NumExplicitMethods returns the number of explicitly declared methods of interface t.
+func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
+
+// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
+
+// NumEmbeddeds returns the number of embedded types in interface t.
+func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
+
+// Embedded returns the i'th embedded defined (*Named) type of interface t for 0 <= i < t.NumEmbeddeds().
+// The result is nil if the i'th embedded type is not a defined type.
+//
+// Deprecated: Use EmbeddedType which is not restricted to defined (*Named) types.
+func (t *Interface) Embedded(i int) *Named { tname, _ := t.embeddeds[i].(*Named); return tname }
+
+// EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
+func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
+
+// NumMethods returns the total number of methods of interface t.
+func (t *Interface) NumMethods() int { return t.typeSet().NumMethods() }
+
+// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) Method(i int) *Func { return t.typeSet().Method(i) }
+
+// Empty reports whether t is the empty interface.
+func (t *Interface) Empty() bool { return t.typeSet().IsAll() }
+
+// IsComparable reports whether each type in interface t's type set is comparable.
+func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable() }
+
+// IsConstraint reports whether interface t is not just a method set.
+func (t *Interface) IsConstraint() bool { return !t.typeSet().IsMethodSet() }
+
+func (t *Interface) Underlying() Type { return t }
+func (t *Interface) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
+ var tlist []syntax.Expr // types collected from all type lists
+ var tname *syntax.Name // most recent "type" name
+
+ addEmbedded := func(pos syntax.Pos, typ Type) {
+ ityp.embeddeds = append(ityp.embeddeds, typ)
+ if ityp.embedPos == nil {
+ ityp.embedPos = new([]syntax.Pos)
+ }
+ *ityp.embedPos = append(*ityp.embedPos, pos)
+ }
+
+ for _, f := range iface.MethodList {
+ if f.Name == nil {
+ // We have an embedded type; possibly a union of types.
+ addEmbedded(posFor(f.Type), parseUnion(check, flattenUnion(nil, f.Type)))
+ continue
+ }
+ // f.Name != nil
+
+ // We have a method with name f.Name, or a type of a type list (f.Name.Value == "type").
+ name := f.Name.Value
+ if name == "_" {
+ if check.conf.CompilerErrorMessages {
+ check.error(f.Name, "methods must have a unique non-blank name")
+ } else {
+ check.error(f.Name, "invalid method name _")
+ }
+ continue // ignore
+ }
+
+ // TODO(gri) Remove type list handling once the parser doesn't accept type lists anymore.
+ if name == "type" {
+ // Report an error for the first type list per interface
+ // if we don't allow type lists, but continue.
+ if !check.conf.AllowTypeLists && tlist == nil {
+ check.softErrorf(f.Name, "use generalized embedding syntax instead of a type list")
+ }
+ // For now, collect all type list entries as if it
+ // were a single union, where each union element is
+ // of the form ~T.
+ op := new(syntax.Operation)
+ // We should also set the position (but there is no setter);
+ // we don't care because this code will eventually go away.
+ op.Op = syntax.Tilde
+ op.X = f.Type
+ tlist = append(tlist, op)
+ // Report an error if we have multiple type lists in an
+ // interface, but only if they are permitted in the first place.
+ if check.conf.AllowTypeLists && tname != nil && tname != f.Name {
+ check.error(f.Name, "cannot have multiple type lists in an interface")
+ }
+ tname = f.Name
+ continue
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.errorf(f.Type, invalidAST+"%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (This extra check is needed here because interface method signatures don't have
+ // a receiver specification.)
+ if sig.tparams != nil && !acceptMethodTypeParams {
+ check.error(f.Type, "methods cannot have type parameters")
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ recvTyp = def
+ }
+ sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
+ check.recordDef(f.Name, m)
+ ityp.methods = append(ityp.methods, m)
+ }
+
+ // If we saw a type list, add it like an embedded union.
+ if tlist != nil {
+ // Types T in a type list are added as ~T expressions but we don't
+ // have the position of the '~'. Use the first type position instead.
+ addEmbedded(tlist[0].(*syntax.Operation).X.Pos(), parseUnion(check, tlist))
+ }
+
+ // All methods and embedded elements for this interface are collected;
+ // i.e., this interface is may be used in a type set computation.
+ ityp.complete = true
+
+ if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.tset = &topTypeSet
+ return
+ }
+
+ // sort for API stability
+ // (don't sort embeddeds: they must correspond to *embedPos entries)
+ sortMethods(ityp.methods)
+
+ // Compute type set with a non-nil *Checker as soon as possible
+ // to report any errors. Subsequent uses of type sets will use
+ // this computed type set and won't need to pass in a *Checker.
+ check.later(func() { computeInterfaceTypeSet(check, iface.Pos(), ityp) })
+}
+
+func flattenUnion(list []syntax.Expr, x syntax.Expr) []syntax.Expr {
+ if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or {
+ list = flattenUnion(list, o.X)
+ x = o.Y
+ }
+ return append(list, x)
+}
diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go
index e716a48038..9890b79323 100644
--- a/src/cmd/compile/internal/types2/issues_test.go
+++ b/src/cmd/compile/internal/types2/issues_test.go
@@ -321,7 +321,7 @@ func TestIssue25627(t *testing.T) {
}
}
- syntax.Walk(f, func(n syntax.Node) bool {
+ syntax.Crawl(f, func(n syntax.Node) bool {
if decl, _ := n.(*syntax.TypeDecl); decl != nil {
if tv, ok := info.Types[decl.Type]; ok && decl.Name.Value == "T" {
want := strings.Count(src, ";") + 1
@@ -402,8 +402,9 @@ func TestIssue28282(t *testing.T) {
// create type interface { error }
et := Universe.Lookup("error").Type()
it := NewInterfaceType(nil, []Type{et})
- it.Complete()
// verify that after completing the interface, the embedded method remains unchanged
+ // (interfaces are "completed" lazily now, so the completion happens implicitly when
+ // accessing Method(0))
want := et.Underlying().(*Interface).Method(0)
got := it.Method(0)
if got != want {
diff --git a/src/cmd/compile/internal/types2/labels.go b/src/cmd/compile/internal/types2/labels.go
index d3206988b5..6f02e2fc96 100644
--- a/src/cmd/compile/internal/types2/labels.go
+++ b/src/cmd/compile/internal/types2/labels.go
@@ -32,7 +32,8 @@ func (check *Checker) labels(body *syntax.BlockStmt) {
}
// spec: "It is illegal to define a label that is never used."
- for _, obj := range all.elems {
+ for name, obj := range all.elems {
+ obj = resolve(name, obj)
if lbl := obj.(*Label); !lbl.used {
check.softErrorf(lbl.pos, "label %s declared but not used", lbl.name)
}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index 78299502e9..0363008ad9 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -6,6 +6,11 @@
package types2
+// Internal use of LookupFieldOrMethod: If the obj result is a method
+// associated with a concrete (non-interface) type, the method's signature
+// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
+// the method's type.
+
// LookupFieldOrMethod looks up a field or method with given package and name
// in T and returns the corresponding *Var or *Func, an index sequence, and a
// bool indicating if there were any pointer indirections on the path to the
@@ -33,19 +38,6 @@ package types2
// the method's formal receiver base type, nor was the receiver addressable.
//
func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
- return (*Checker)(nil).lookupFieldOrMethod(T, addressable, pkg, name)
-}
-
-// Internal use of Checker.lookupFieldOrMethod: If the obj result is a method
-// associated with a concrete (non-interface) type, the method's signature
-// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
-// the method's type.
-// TODO(gri) Now that we provide the *Checker, we can probably remove this
-// caveat by calling Checker.objDecl from lookupFieldOrMethod. Investigate.
-
-// lookupFieldOrMethod is like the external version but completes interfaces
-// as necessary.
-func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// Methods cannot be associated to a named pointer type
// (spec: "The type denoted by T is called the receiver base type;
// it must not be a pointer or interface type and it must be declared
@@ -54,8 +46,8 @@ func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package
// pointer type but discard the result if it is a method since we would
// not have found it for T (see also issue 8590).
if t := asNamed(T); t != nil {
- if p, _ := t.underlying.(*Pointer); p != nil {
- obj, index, indirect = check.rawLookupFieldOrMethod(p, false, pkg, name)
+ if p, _ := t.Underlying().(*Pointer); p != nil {
+ obj, index, indirect = lookupFieldOrMethod(p, false, pkg, name)
if _, ok := obj.(*Func); ok {
return nil, nil, false
}
@@ -63,7 +55,7 @@ func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package
}
}
- return check.rawLookupFieldOrMethod(T, addressable, pkg, name)
+ return lookupFieldOrMethod(T, addressable, pkg, name)
}
// TODO(gri) The named type consolidation and seen maps below must be
@@ -71,10 +63,9 @@ func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package
// types always have only one representation (even when imported
// indirectly via different packages.)
-// rawLookupFieldOrMethod should only be called by lookupFieldOrMethod and missingMethod.
-func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+// lookupFieldOrMethod should only be called by LookupFieldOrMethod and missingMethod.
+func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// WARNING: The code in this function is extremely subtle - do not modify casually!
- // This function and NewMethodSet should be kept in sync.
if name == "_" {
return // blank fields/methods are never found
@@ -82,10 +73,12 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
typ, isPtr := deref(T)
- // *typ where typ is an interface has no methods.
- // Be cautious: typ may be nil (issue 39634, crash #3).
- if typ == nil || isPtr && IsInterface(typ) {
- return
+ // *typ where typ is an interface or type parameter has no methods.
+ switch under(typ).(type) {
+ case *Interface, *TypeParam:
+ if isPtr {
+ return
+ }
}
// Start with typ as single entry at shallowest depth.
@@ -126,6 +119,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
seen[named] = true
// look for a matching attached method
+ named.load()
if i, m := lookupMethod(named.methods, pkg, name); m != nil {
// potential match
// caution: method may not have a proper signature yet
@@ -181,9 +175,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
case *Interface:
// look for a matching method
- // TODO(gri) t.allMethods is sorted - use binary search
- check.completeInterface(nopos, t)
- if i, m := lookupMethod(t.allMethods, pkg, name); m != nil {
+ if i, m := t.typeSet().LookupMethod(pkg, name); m != nil {
assert(m.typ != nil)
index = concat(e.index, i)
if obj != nil || e.multiples {
@@ -194,7 +186,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
}
case *TypeParam:
- if i, m := lookupMethod(t.Bound().allMethods, pkg, name); m != nil {
+ if i, m := t.iface().typeSet().LookupMethod(pkg, name); m != nil {
assert(m.typ != nil)
index = concat(e.index, i)
if obj != nil || e.multiples {
@@ -229,7 +221,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
return
}
- current = check.consolidateMultiples(next)
+ current = consolidateMultiples(next)
}
return nil, nil, false // not found
@@ -246,7 +238,7 @@ type embeddedType struct {
// consolidateMultiples collects multiple list entries with the same type
// into a single entry marked as containing multiples. The result is the
// consolidated list.
-func (check *Checker) consolidateMultiples(list []embeddedType) []embeddedType {
+func consolidateMultiples(list []embeddedType) []embeddedType {
if len(list) <= 1 {
return list // at most one entry - nothing to do
}
@@ -254,7 +246,7 @@ func (check *Checker) consolidateMultiples(list []embeddedType) []embeddedType {
n := 0 // number of entries w/ unique type
prev := make(map[Type]int) // index at which type was previously seen
for _, e := range list {
- if i, found := check.lookupType(prev, e.typ); found {
+ if i, found := lookupType(prev, e.typ); found {
list[i].multiples = true
// ignore this entry
} else {
@@ -266,14 +258,14 @@ func (check *Checker) consolidateMultiples(list []embeddedType) []embeddedType {
return list[:n]
}
-func (check *Checker) lookupType(m map[Type]int, typ Type) (int, bool) {
+func lookupType(m map[Type]int, typ Type) (int, bool) {
// fast path: maybe the types are equal
if i, found := m[typ]; found {
return i, true
}
for t, i := range m {
- if check.identical(t, typ) {
+ if Identical(t, typ) {
return i, true
}
}
@@ -306,22 +298,18 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b
// To improve error messages, also report the wrong signature
// when the method exists on *V instead of V.
func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method, wrongType *Func) {
- check.completeInterface(nopos, T)
-
// fast path for common case
if T.Empty() {
return
}
if ityp := asInterface(V); ityp != nil {
- check.completeInterface(nopos, ityp)
- // TODO(gri) allMethods is sorted - can do this more efficiently
- for _, m := range T.allMethods {
- _, f := lookupMethod(ityp.allMethods, m.pkg, m.name)
+ // TODO(gri) the methods are sorted - could do this more efficiently
+ for _, m := range T.typeSet().methods {
+ _, f := ityp.typeSet().LookupMethod(m.pkg, m.name)
if f == nil {
- // if m is the magic method == we're ok (interfaces are comparable)
- if m.name == "==" || !static {
+ if !static {
continue
}
return m, f
@@ -330,17 +318,20 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
- if len(ftyp.tparams) != len(mtyp.tparams) {
+ if ftyp.TParams().Len() != mtyp.TParams().Len() {
return m, f
}
+ if !acceptMethodTypeParams && ftyp.TParams().Len() > 0 {
+ panic("method with type parameters")
+ }
// If the methods have type parameters we don't care whether they
// are the same or not, as long as they match up. Use unification
// to see if they can be made to match.
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ u := newUnifier(true)
+ u.x.init(ftyp.TParams().list())
if !u.unify(ftyp, mtyp) {
return m, f
}
@@ -352,14 +343,14 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// A concrete type implements T if it implements all methods of T.
Vd, _ := deref(V)
Vn := asNamed(Vd)
- for _, m := range T.allMethods {
+ for _, m := range T.typeSet().methods {
// TODO(gri) should this be calling lookupFieldOrMethod instead (and why not)?
- obj, _, _ := check.rawLookupFieldOrMethod(V, false, m.pkg, m.name)
+ obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name)
// Check if *V implements this method of T.
if obj == nil {
ptr := NewPointer(V)
- obj, _, _ = check.rawLookupFieldOrMethod(ptr, false, m.pkg, m.name)
+ obj, _, _ = lookupFieldOrMethod(ptr, false, m.pkg, m.name)
if obj != nil {
return m, obj.(*Func)
}
@@ -368,10 +359,6 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// we must have a method (not a field of matching function type)
f, _ := obj.(*Func)
if f == nil {
- // if m is the magic method == and V is comparable, we're ok
- if m.name == "==" && Comparable(V) {
- continue
- }
return m, nil
}
@@ -383,9 +370,12 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
- if len(ftyp.tparams) != len(mtyp.tparams) {
+ if ftyp.TParams().Len() != mtyp.TParams().Len() {
return m, f
}
+ if !acceptMethodTypeParams && ftyp.TParams().Len() > 0 {
+ panic("method with type parameters")
+ }
// If V is a (instantiated) generic type, its methods are still
// parameterized using the original (declaration) receiver type
@@ -394,17 +384,17 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// In order to compare the signatures, substitute the receiver
// type parameters of ftyp with V's instantiation type arguments.
// This lazily instantiates the signature of method f.
- if Vn != nil && len(Vn.tparams) > 0 {
+ if Vn != nil && Vn.TParams().Len() > 0 {
// Be careful: The number of type arguments may not match
// the number of receiver parameters. If so, an error was
// reported earlier but the length discrepancy is still
// here. Exit early in this case to prevent an assertion
// failure in makeSubstMap.
// TODO(gri) Can we avoid this check by fixing the lengths?
- if len(ftyp.rparams) != len(Vn.targs) {
+ if len(ftyp.RParams().list()) != len(Vn.targs) {
return
}
- ftyp = check.subst(nopos, ftyp, makeSubstMap(ftyp.rparams, Vn.targs)).(*Signature)
+ ftyp = check.subst(nopos, ftyp, makeSubstMap(ftyp.RParams().list(), Vn.targs)).(*Signature)
}
// If the methods have type parameters we don't care whether they
@@ -412,8 +402,21 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// to see if they can be made to match.
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ u := newUnifier(true)
+ if ftyp.TParams().Len() > 0 {
+ // We reach here only if we accept method type parameters.
+ // In this case, unification must consider any receiver
+ // and method type parameters as "free" type parameters.
+ assert(acceptMethodTypeParams)
+ // We don't have a test case for this at the moment since
+ // we can't parse method type parameters. Keeping the
+ // unimplemented call so that we test this code if we
+ // enable method type parameters.
+ unimplemented()
+ u.x.init(append(ftyp.RParams().list(), ftyp.TParams().list()...))
+ } else {
+ u.x.init(ftyp.RParams().list())
+ }
if !u.unify(ftyp, mtyp) {
return m, f
}
diff --git a/src/cmd/compile/internal/types2/map.go b/src/cmd/compile/internal/types2/map.go
new file mode 100644
index 0000000000..0d3464caae
--- /dev/null
+++ b/src/cmd/compile/internal/types2/map.go
@@ -0,0 +1,24 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Map represents a map type.
+type Map struct {
+ key, elem Type
+}
+
+// NewMap returns a new map for the given key and element types.
+func NewMap(key, elem Type) *Map {
+ return &Map{key: key, elem: elem}
+}
+
+// Key returns the key type of map m.
+func (m *Map) Key() Type { return m.key }
+
+// Elem returns the element type of map m.
+func (m *Map) Elem() Type { return m.elem }
+
+func (t *Map) Underlying() Type { return t }
+func (t *Map) String() string { return TypeString(t, nil) }
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
new file mode 100644
index 0000000000..adf3eb3822
--- /dev/null
+++ b/src/cmd/compile/internal/types2/named.go
@@ -0,0 +1,278 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "sync"
+)
+
+// TODO(gri) Clean up Named struct below; specifically the fromRHS field (can we use underlying?).
+
+// A Named represents a named (defined) type.
+type Named struct {
+ check *Checker
+ info typeInfo // for cycle detection
+ obj *TypeName // corresponding declared object
+ orig *Named // original, uninstantiated type
+ fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
+ underlying Type // possibly a *Named during setup; never a *Named once set up completely
+ instance *instance // position information for lazy instantiation, or nil
+ tparams *TypeParams // type parameters, or nil
+ targs []Type // type arguments (after instantiation), or nil
+ methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
+
+ resolve func(*Named) ([]*TypeName, Type, []*Func)
+ once sync.Once
+}
+
+// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
+// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
+// The underlying type must not be a *Named.
+func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ if _, ok := underlying.(*Named); ok {
+ panic("underlying type must not be *Named")
+ }
+ return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
+}
+
+func (t *Named) load() *Named {
+ // If t is an instantiated type, it derives its methods and tparams from its
+ // base type. Since we expect type parameters and methods to be set after a
+ // call to load, we must load the base and copy here.
+ //
+ // underlying is set when t is expanded.
+ //
+ // By convention, a type instance is loaded iff its tparams are set.
+ if len(t.targs) > 0 && t.tparams == nil {
+ t.orig.load()
+ t.tparams = t.orig.tparams
+ t.methods = t.orig.methods
+ }
+ if t.resolve == nil {
+ return t
+ }
+
+ t.once.Do(func() {
+ // TODO(mdempsky): Since we're passing t to resolve anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions
+ // (like SetTParams).
+
+ tparams, underlying, methods := t.resolve(t)
+
+ switch underlying.(type) {
+ case nil, *Named:
+ panic("invalid underlying type")
+ }
+
+ t.tparams = bindTParams(tparams)
+ t.underlying = underlying
+ t.methods = methods
+ })
+ return t
+}
+
+// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
+func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams *TypeParams, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
+ if typ.orig == nil {
+ typ.orig = typ
+ }
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ // Ensure that typ is always expanded, at which point the check field can be
+ // nilled out.
+ //
+ // Note that currently we cannot nil out check inside typ.under(), because
+ // it's possible that typ is expanded multiple times.
+ //
+ // TODO(gri): clean this up so that under is the only function mutating
+ // named types.
+ if check != nil {
+ check.later(func() {
+ switch typ.under().(type) {
+ case *Named:
+ panic("unexpanded underlying type")
+ }
+ typ.check = nil
+ })
+ }
+ return typ
+}
+
+// Obj returns the type name for the named type t.
+func (t *Named) Obj() *TypeName { return t.obj }
+
+// Orig returns the original generic type an instantiated type is derived from.
+// If t is not an instantiated type, the result is t.
+func (t *Named) Orig() *Named { return t.orig }
+
+// TODO(gri) Come up with a better representation and API to distinguish
+// between parameterized instantiated and non-instantiated types.
+
+// TParams returns the type parameters of the named type t, or nil.
+// The result is non-nil for an (originally) parameterized type even if it is instantiated.
+func (t *Named) TParams() *TypeParams { return t.load().tparams }
+
+// SetTParams sets the type parameters of the named type t.
+func (t *Named) SetTParams(tparams []*TypeName) { t.load().tparams = bindTParams(tparams) }
+
+// TArgs returns the type arguments after instantiation of the named type t, or nil if not instantiated.
+func (t *Named) TArgs() []Type { return t.targs }
+
+// NumMethods returns the number of explicit methods whose receiver is named type t.
+func (t *Named) NumMethods() int { return len(t.load().methods) }
+
+// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
+func (t *Named) Method(i int) *Func { return t.load().methods[i] }
+
+// SetUnderlying sets the underlying type and marks t as complete.
+func (t *Named) SetUnderlying(underlying Type) {
+ if underlying == nil {
+ panic("underlying type must not be nil")
+ }
+ if _, ok := underlying.(*Named); ok {
+ panic("underlying type must not be *Named")
+ }
+ t.load().underlying = underlying
+}
+
+// AddMethod adds method m unless it is already in the method list.
+func (t *Named) AddMethod(m *Func) {
+ t.load()
+ if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
+ t.methods = append(t.methods, m)
+ }
+}
+
+func (t *Named) Underlying() Type { return t.load().underlying }
+func (t *Named) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// under returns the expanded underlying type of n0; possibly by following
+// forward chains of named types. If an underlying type is found, resolve
+// the chain by setting the underlying type for each defined type in the
+// chain before returning it. If no underlying type is found or a cycle
+// is detected, the result is Typ[Invalid]. If a cycle is detected and
+// n0.check != nil, the cycle is reported.
+func (n0 *Named) under() Type {
+ n0.expand()
+
+ u := n0.Underlying()
+
+ if u == Typ[Invalid] {
+ return u
+ }
+
+ // If the underlying type of a defined type is not a defined
+ // (incl. instance) type, then that is the desired underlying
+ // type.
+ switch u.(type) {
+ case nil:
+ return Typ[Invalid]
+ default:
+ // common case
+ return u
+ case *Named:
+ // handled below
+ }
+
+ if n0.check == nil {
+ panic("Named.check == nil but type is incomplete")
+ }
+
+ // Invariant: after this point n0 as well as any named types in its
+ // underlying chain should be set up when this function exits.
+ check := n0.check
+
+ // If we can't expand u at this point, it is invalid.
+ n := asNamed(u)
+ if n == nil {
+ n0.underlying = Typ[Invalid]
+ return n0.underlying
+ }
+
+ // Otherwise, follow the forward chain.
+ seen := map[*Named]int{n0: 0}
+ path := []Object{n0.obj}
+ for {
+ u = n.Underlying()
+ if u == nil {
+ u = Typ[Invalid]
+ break
+ }
+ var n1 *Named
+ switch u1 := u.(type) {
+ case *Named:
+ u1.expand()
+ n1 = u1
+ }
+ if n1 == nil {
+ break // end of chain
+ }
+
+ seen[n] = len(seen)
+ path = append(path, n.obj)
+ n = n1
+
+ if i, ok := seen[n]; ok {
+ // cycle
+ check.cycleError(path[i:])
+ u = Typ[Invalid]
+ break
+ }
+ }
+
+ for n := range seen {
+ // We should never have to update the underlying type of an imported type;
+ // those underlying types should have been resolved during the import.
+ // Also, doing so would lead to a race condition (was issue #31749).
+ // Do this check always, not just in debug mode (it's cheap).
+ if n.obj.pkg != check.pkg {
+ panic("imported type with unresolved underlying type")
+ }
+ n.underlying = u
+ }
+
+ return u
+}
+
+func (n *Named) setUnderlying(typ Type) {
+ if n != nil {
+ n.underlying = typ
+ }
+}
+
+// instance holds position information for use in lazy instantiation.
+//
+// TODO(rfindley): come up with a better name for this type, now that its usage
+// has changed.
+type instance struct {
+ pos syntax.Pos // position of type instantiation; for error reporting only
+ posList []syntax.Pos // position of each targ; for error reporting only
+}
+
+// expand ensures that the underlying type of n is instantiated.
+// The underlying type will be Typ[Invalid] if there was an error.
+func (n *Named) expand() {
+ if n.instance != nil {
+ // n must be loaded before instantiation, in order to have accurate
+ // tparams. This is done implicitly by the call to n.TParams, but making it
+ // explicit is harmless: load is idempotent.
+ n.load()
+ inst := n.check.instantiate(n.instance.pos, n.orig.underlying, n.TParams().list(), n.targs, n.instance.posList)
+ n.underlying = inst
+ n.fromRHS = inst
+ n.instance = nil
+ }
+}
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index 844bc34b6a..8263ccae0c 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -186,6 +186,45 @@ func (obj *object) sameId(pkg *Package, name string) bool {
return pkg.path == obj.pkg.path
}
+// less reports whether object a is ordered before object b.
+//
+// Objects are ordered nil before non-nil, exported before
+// non-exported, then by name, and finally (for non-exported
+// functions) by package height and path.
+func (a *object) less(b *object) bool {
+ if a == b {
+ return false
+ }
+
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
+ // Exported functions before non-exported.
+ ea := isExported(a.name)
+ eb := isExported(b.name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package.
+ if a.name != b.name {
+ return a.name < b.name
+ }
+ if !ea {
+ if a.pkg.height != b.pkg.height {
+ return a.pkg.height < b.pkg.height
+ }
+ return a.pkg.path < b.pkg.path
+ }
+
+ return false
+}
+
// A PkgName represents an imported Go package.
// PkgNames don't have a type.
type PkgName struct {
@@ -237,6 +276,14 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName
return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}}
}
+// NewTypeNameLazy returns a new defined type like NewTypeName, but it
+// lazily calls resolve to finish constructing the Named object.
+func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, resolve func(named *Named) (tparams []*TypeName, underlying Type, methods []*Func)) *TypeName {
+ obj := NewTypeName(pos, pkg, name, nil)
+ NewNamed(obj, nil, nil).resolve = resolve
+ return obj
+}
+
// IsAlias reports whether obj is an alias name for a type.
func (obj *TypeName) IsAlias() bool {
switch t := obj.typ.(type) {
@@ -329,36 +376,6 @@ func (obj *Func) FullName() string {
// Scope returns the scope of the function's body block.
func (obj *Func) Scope() *Scope { return obj.typ.(*Signature).scope }
-// Less reports whether function a is ordered before function b.
-//
-// Functions are ordered exported before non-exported, then by name,
-// and finally (for non-exported functions) by package path.
-//
-// TODO(gri) The compiler also sorts by package height before package
-// path for non-exported names.
-func (a *Func) less(b *Func) bool {
- if a == b {
- return false
- }
-
- // Exported functions before non-exported.
- ea := isExported(a.name)
- eb := isExported(b.name)
- if ea != eb {
- return ea
- }
-
- // Order by name and then (for non-exported names) by package.
- if a.name != b.name {
- return a.name < b.name
- }
- if !ea {
- return a.pkg.path < b.pkg.path
- }
-
- return false
-}
-
func (*Func) isDependency() {} // a function may be a dependency of an initialization expression
// A Label represents a declared label.
@@ -458,6 +475,9 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
if _, ok := typ.(*Basic); ok {
return
}
+ if named, _ := typ.(*Named); named != nil && named.TParams().Len() > 0 {
+ writeTParamList(buf, named.TParams().list(), qf, nil)
+ }
if tname.IsAlias() {
buf.WriteString(" =")
} else {
diff --git a/src/cmd/compile/internal/types2/object_test.go b/src/cmd/compile/internal/types2/object_test.go
index 7f63c79332..a86733a5c9 100644
--- a/src/cmd/compile/internal/types2/object_test.go
+++ b/src/cmd/compile/internal/types2/object_test.go
@@ -25,7 +25,7 @@ func TestIsAlias(t *testing.T) {
check(Unsafe.Scope().Lookup("Pointer").(*TypeName), false)
for _, name := range Universe.Names() {
if obj, _ := Universe.Lookup(name).(*TypeName); obj != nil {
- check(obj, name == "byte" || name == "rune")
+ check(obj, name == "any" || name == "byte" || name == "rune")
}
}
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
index 455d8b5dd1..8336451e9c 100644
--- a/src/cmd/compile/internal/types2/operand.go
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -176,16 +176,20 @@ func operandString(x *operand, qf Qualifier) string {
if hasType {
if x.typ != Typ[Invalid] {
var intro string
- switch {
- case isGeneric(x.typ):
- intro = " of generic type "
- case asTypeParam(x.typ) != nil:
- intro = " of type parameter type "
- default:
+ var tpar *TypeParam
+ if isGeneric(x.typ) {
+ intro = " of parameterized type "
+ } else if tpar = asTypeParam(x.typ); tpar != nil {
+ intro = " of type parameter "
+ } else {
intro = " of type "
}
buf.WriteString(intro)
WriteType(&buf, x.typ, qf)
+ if tpar != nil {
+ buf.WriteString(" constrained by ")
+ WriteType(&buf, tpar.bound, qf) // do not compute interface type sets here
+ }
} else {
buf.WriteString(" with invalid type")
}
@@ -248,20 +252,35 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
V := x.typ
+ const debugAssignableTo = false
+ if debugAssignableTo && check != nil {
+ check.dump("V = %s", V)
+ check.dump("T = %s", T)
+ }
+
// x's type is identical to T
- if check.identical(V, T) {
+ if Identical(V, T) {
return true, 0
}
Vu := optype(V)
Tu := optype(T)
+ if debugAssignableTo && check != nil {
+ check.dump("Vu = %s", Vu)
+ check.dump("Tu = %s", Tu)
+ }
+
// x is an untyped value representable by a value of type T.
if isUntyped(Vu) {
- if t, ok := Tu.(*Sum); ok {
- return t.is(func(t Type) bool {
+ if t, ok := Tu.(*TypeParam); ok {
+ return t.is(func(t *term) bool {
// TODO(gri) this could probably be more efficient
- ok, _ := x.assignableTo(check, t, reason)
+ if t.tilde {
+ // TODO(gri) We need to check assignability
+ // for the underlying type of x.
+ }
+ ok, _ := x.assignableTo(check, t.typ, reason)
return ok
}), _IncompatibleAssign
}
@@ -272,7 +291,7 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
// x's type V and T have identical underlying types
// and at least one of V or T is not a named type
- if check.identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) {
+ if Identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) {
return true, 0
}
@@ -281,7 +300,7 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
if m, wrongType := check.missingMethod(V, Ti, true); m != nil /* Implements(V, Ti) */ {
if reason != nil {
if wrongType != nil {
- if check.identical(m.typ, wrongType.typ) {
+ if Identical(m.typ, wrongType.typ) {
*reason = fmt.Sprintf("missing method %s (%s has pointer receiver)", m.name, m.name)
} else {
*reason = fmt.Sprintf("wrong type for method %s (have %s, want %s)", m.Name(), wrongType.typ, m.typ)
@@ -300,7 +319,7 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
// type, x's type V and T have identical element types,
// and at least one of V or T is not a named type
if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv {
- if Tc, ok := Tu.(*Chan); ok && check.identical(Vc.elem, Tc.elem) {
+ if Tc, ok := Tu.(*Chan); ok && Identical(Vc.elem, Tc.elem) {
return !isNamed(V) || !isNamed(T), _InvalidChanAssign
}
}
diff --git a/src/cmd/compile/internal/types2/package.go b/src/cmd/compile/internal/types2/package.go
index 31b1e71787..8044e7e6a7 100644
--- a/src/cmd/compile/internal/types2/package.go
+++ b/src/cmd/compile/internal/types2/package.go
@@ -13,8 +13,9 @@ type Package struct {
path string
name string
scope *Scope
- complete bool
imports []*Package
+ height int
+ complete bool
fake bool // scope lookup errors are silently dropped if package is fake (internal use only)
cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go
}
@@ -22,8 +23,14 @@ type Package struct {
// NewPackage returns a new Package for the given package path and name.
// The package is not complete and contains no explicit imports.
func NewPackage(path, name string) *Package {
+ return NewPackageHeight(path, name, 0)
+}
+
+// NewPackageHeight is like NewPackage, but allows specifying the
+// package's height.
+func NewPackageHeight(path, name string, height int) *Package {
scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path))
- return &Package{path: path, name: name, scope: scope}
+ return &Package{path: path, name: name, scope: scope, height: height}
}
// Path returns the package path.
@@ -32,13 +39,22 @@ func (pkg *Package) Path() string { return pkg.path }
// Name returns the package name.
func (pkg *Package) Name() string { return pkg.name }
+// Height returns the package height.
+func (pkg *Package) Height() int { return pkg.height }
+
// SetName sets the package name.
func (pkg *Package) SetName(name string) { pkg.name = name }
// Scope returns the (complete or incomplete) package scope
// holding the objects declared at package level (TypeNames,
// Consts, Vars, and Funcs).
-func (pkg *Package) Scope() *Scope { return pkg.scope }
+// For a nil pkg receiver, Scope returns the Universe scope.
+func (pkg *Package) Scope() *Scope {
+ if pkg != nil {
+ return pkg.scope
+ }
+ return Universe
+}
// A package is complete if its scope contains (at least) all
// exported objects; otherwise it is incomplete.
diff --git a/src/cmd/compile/internal/types2/pointer.go b/src/cmd/compile/internal/types2/pointer.go
new file mode 100644
index 0000000000..63055fc6b0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/pointer.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Pointer represents a pointer type.
+type Pointer struct {
+ base Type // element type
+}
+
+// NewPointer returns a new pointer type for the given element (base) type.
+func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} }
+
+// Elem returns the element type for the given pointer p.
+func (p *Pointer) Elem() Type { return p.base }
+
+func (p *Pointer) Underlying() Type { return p }
+func (p *Pointer) String() string { return TypeString(p, nil) }
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index ae186a0b5d..1541b3f416 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -10,7 +10,7 @@ package types2
// isNamed may be called with types that are not fully set up.
func isNamed(typ Type) bool {
switch typ.(type) {
- case *Basic, *Named, *TypeParam, *instance:
+ case *Basic, *Named, *TypeParam:
return true
}
return false
@@ -21,15 +21,15 @@ func isNamed(typ Type) bool {
func isGeneric(typ Type) bool {
// A parameterized type is only instantiated if it doesn't have an instantiation already.
named, _ := typ.(*Named)
- return named != nil && named.obj != nil && named.tparams != nil && named.targs == nil
+ return named != nil && named.obj != nil && named.targs == nil && named.TParams() != nil
}
func is(typ Type, what BasicInfo) bool {
- switch t := optype(typ).(type) {
+ switch t := under(typ).(type) {
case *Basic:
return t.info&what != 0
- case *Sum:
- return t.is(func(typ Type) bool { return is(typ, what) })
+ case *TypeParam:
+ return t.underIs(func(t Type) bool { return is(t, what) })
}
return false
}
@@ -56,10 +56,7 @@ func isNumericOrString(typ Type) bool { return is(typ, IsNumeric|IsString) }
// are not fully set up.
func isTyped(typ Type) bool {
// isTyped is called with types that are not fully
- // set up. Must not call Basic()!
- // A *Named or *instance type is always typed, so
- // we only need to check if we have a true *Basic
- // type.
+ // set up. Must not call asBasic()!
t, _ := typ.(*Basic)
return t == nil || t.info&IsUntyped == 0
}
@@ -96,19 +93,7 @@ func comparable(T Type, seen map[Type]bool) bool {
}
seen[T] = true
- // If T is a type parameter not constrained by any type
- // list (i.e., it's underlying type is the top type),
- // T is comparable if it has the == method. Otherwise,
- // the underlying type "wins". For instance
- //
- // interface{ comparable; type []byte }
- //
- // is not comparable because []byte is not comparable.
- if t := asTypeParam(T); t != nil && optype(t) == theTop {
- return t.Bound().IsComparable()
- }
-
- switch t := optype(T).(type) {
+ switch t := under(T).(type) {
case *Basic:
// assume invalid types to be comparable
// to avoid follow-up errors
@@ -124,42 +109,25 @@ func comparable(T Type, seen map[Type]bool) bool {
return true
case *Array:
return comparable(t.elem, seen)
- case *Sum:
- pred := func(t Type) bool {
- return comparable(t, seen)
- }
- return t.is(pred)
case *TypeParam:
- return t.Bound().IsComparable()
+ return t.iface().IsComparable()
}
return false
}
// hasNil reports whether a type includes the nil value.
func hasNil(typ Type) bool {
- switch t := optype(typ).(type) {
+ switch t := under(typ).(type) {
case *Basic:
return t.kind == UnsafePointer
case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
return true
- case *Sum:
- return t.is(hasNil)
+ case *TypeParam:
+ return t.underIs(hasNil)
}
return false
}
-// identical reports whether x and y are identical types.
-// Receivers of Signature types are ignored.
-func (check *Checker) identical(x, y Type) bool {
- return check.identical0(x, y, true, nil)
-}
-
-// identicalIgnoreTags reports whether x and y are identical types if tags are ignored.
-// Receivers of Signature types are ignored.
-func (check *Checker) identicalIgnoreTags(x, y Type) bool {
- return check.identical0(x, y, false, nil)
-}
-
// An ifacePair is a node in a stack of interface type pairs compared for identity.
type ifacePair struct {
x, y *Interface
@@ -171,11 +139,7 @@ func (p *ifacePair) identical(q *ifacePair) bool {
}
// For changes to this code the corresponding changes should be made to unifier.nify.
-func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
- // types must be expanded for comparison
- x = expandf(x)
- y = expandf(y)
-
+func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
if x == y {
return true
}
@@ -195,13 +159,13 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
if y, ok := y.(*Array); ok {
// If one or both array lengths are unknown (< 0) due to some error,
// assume they are the same to avoid spurious follow-on errors.
- return (x.len < 0 || y.len < 0 || x.len == y.len) && check.identical0(x.elem, y.elem, cmpTags, p)
+ return (x.len < 0 || y.len < 0 || x.len == y.len) && identical(x.elem, y.elem, cmpTags, p)
}
case *Slice:
// Two slice types are identical if they have identical element types.
if y, ok := y.(*Slice); ok {
- return check.identical0(x.elem, y.elem, cmpTags, p)
+ return identical(x.elem, y.elem, cmpTags, p)
}
case *Struct:
@@ -216,7 +180,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
if f.embedded != g.embedded ||
cmpTags && x.Tag(i) != y.Tag(i) ||
!f.sameId(g.pkg, g.name) ||
- !check.identical0(f.typ, g.typ, cmpTags, p) {
+ !identical(f.typ, g.typ, cmpTags, p) {
return false
}
}
@@ -227,7 +191,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
case *Pointer:
// Two pointer types are identical if they have identical base types.
if y, ok := y.(*Pointer); ok {
- return check.identical0(x.base, y.base, cmpTags, p)
+ return identical(x.base, y.base, cmpTags, p)
}
case *Tuple:
@@ -238,7 +202,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
if x != nil {
for i, v := range x.vars {
w := y.vars[i]
- if !check.identical0(v.typ, w.typ, cmpTags, p) {
+ if !identical(v.typ, w.typ, cmpTags, p) {
return false
}
}
@@ -256,49 +220,27 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
// parameter names.
if y, ok := y.(*Signature); ok {
return x.variadic == y.variadic &&
- check.identicalTParams(x.tparams, y.tparams, cmpTags, p) &&
- check.identical0(x.params, y.params, cmpTags, p) &&
- check.identical0(x.results, y.results, cmpTags, p)
- }
-
- case *Sum:
- // Two sum types are identical if they contain the same types.
- // (Sum types always consist of at least two types. Also, the
- // the set (list) of types in a sum type consists of unique
- // types - each type appears exactly once. Thus, two sum types
- // must contain the same number of types to have chance of
- // being equal.
- if y, ok := y.(*Sum); ok && len(x.types) == len(y.types) {
- // Every type in x.types must be in y.types.
- // Quadratic algorithm, but probably good enough for now.
- // TODO(gri) we need a fast quick type ID/hash for all types.
- L:
- for _, x := range x.types {
- for _, y := range y.types {
- if Identical(x, y) {
- continue L // x is in y.types
- }
- }
- return false // x is not in y.types
- }
- return true
+ identicalTParams(x.TParams().list(), y.TParams().list(), cmpTags, p) &&
+ identical(x.params, y.params, cmpTags, p) &&
+ identical(x.results, y.results, cmpTags, p)
}
case *Interface:
+ // Two interface types are identical if they describe the same type sets.
+ // With the existing implementation restriction, this simplifies to:
+ //
// Two interface types are identical if they have the same set of methods with
- // the same names and identical function types. Lower-case method names from
- // different packages are always different. The order of the methods is irrelevant.
+ // the same names and identical function types, and if any type restrictions
+ // are the same. Lower-case method names from different packages are always
+ // different. The order of the methods is irrelevant.
if y, ok := y.(*Interface); ok {
- // If identical0 is called (indirectly) via an external API entry point
- // (such as Identical, IdenticalIgnoreTags, etc.), check is nil. But in
- // that case, interfaces are expected to be complete and lazy completion
- // here is not needed.
- if check != nil {
- check.completeInterface(nopos, x)
- check.completeInterface(nopos, y)
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if !xset.terms.equal(yset.terms) {
+ return false
}
- a := x.allMethods
- b := y.allMethods
+ a := xset.methods
+ b := yset.methods
if len(a) == len(b) {
// Interface types are the only types where cycles can occur
// that are not "terminated" via named types; and such cycles
@@ -335,7 +277,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
}
for i, f := range a {
g := b[i]
- if f.Id() != g.Id() || !check.identical0(f.typ, g.typ, cmpTags, q) {
+ if f.Id() != g.Id() || !identical(f.typ, g.typ, cmpTags, q) {
return false
}
}
@@ -346,20 +288,22 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
case *Map:
// Two map types are identical if they have identical key and value types.
if y, ok := y.(*Map); ok {
- return check.identical0(x.key, y.key, cmpTags, p) && check.identical0(x.elem, y.elem, cmpTags, p)
+ return identical(x.key, y.key, cmpTags, p) && identical(x.elem, y.elem, cmpTags, p)
}
case *Chan:
// Two channel types are identical if they have identical value types
// and the same direction.
if y, ok := y.(*Chan); ok {
- return x.dir == y.dir && check.identical0(x.elem, y.elem, cmpTags, p)
+ return x.dir == y.dir && identical(x.elem, y.elem, cmpTags, p)
}
case *Named:
// Two named types are identical if their type names originate
// in the same type declaration.
if y, ok := y.(*Named); ok {
+ x.expand()
+ y.expand()
// TODO(gri) Why is x == y not sufficient? And if it is,
// we can just return false here because x == y
// is caught in the very beginning of this function.
@@ -369,13 +313,9 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
case *TypeParam:
// nothing to do (x and y being equal is caught in the very beginning of this function)
- // case *instance:
- // unreachable since types are expanded
-
- case *bottom, *top:
- // Either both types are theBottom, or both are theTop in which
- // case the initial x == y check will have caught them. Otherwise
- // they are not identical.
+ case *top:
+ // Either both types are theTop in which case the initial x == y check
+ // will have caught them. Otherwise they are not identical.
case nil:
// avoid a crash in case of nil type
@@ -387,13 +327,13 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
return false
}
-func (check *Checker) identicalTParams(x, y []*TypeName, cmpTags bool, p *ifacePair) bool {
+func identicalTParams(x, y []*TypeName, cmpTags bool, p *ifacePair) bool {
if len(x) != len(y) {
return false
}
for i, x := range x {
y := y[i]
- if !check.identical0(x.typ.(*TypeParam).bound, y.typ.(*TypeParam).bound, cmpTags, p) {
+ if !identical(x.typ.(*TypeParam).bound, y.typ.(*TypeParam).bound, cmpTags, p) {
return false
}
}
diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go
index fa30650bd4..018a20cfb2 100644
--- a/src/cmd/compile/internal/types2/resolver.go
+++ b/src/cmd/compile/internal/types2/resolver.go
@@ -196,6 +196,7 @@ func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package {
// methods with receiver base type names.
func (check *Checker) collectObjects() {
pkg := check.pkg
+ pkg.height = 0
// pkgImports is the set of packages already imported by any package file seen
// so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate
@@ -253,6 +254,15 @@ func (check *Checker) collectObjects() {
continue
}
+ if imp == Unsafe {
+ // typecheck ignores imports of package unsafe for
+ // calculating height.
+ // TODO(mdempsky): Revisit this. This seems fine, but I
+ // don't remember explicitly considering this case.
+ } else if h := imp.height + 1; h > pkg.height {
+ pkg.height = h
+ }
+
// local name overrides imported package name
name := imp.name
if s.LocalPkgName != nil {
@@ -265,7 +275,7 @@ func (check *Checker) collectObjects() {
}
if name == "init" {
- check.error(s.LocalPkgName, "cannot import package as init - init must be a func")
+ check.error(s, "cannot import package as init - init must be a func")
continue
}
@@ -298,22 +308,26 @@ func (check *Checker) collectObjects() {
check.dotImportMap = make(map[dotImportKey]*PkgName)
}
// merge imported scope with file scope
- for _, obj := range imp.scope.elems {
+ for name, obj := range imp.scope.elems {
+ // Note: Avoid eager resolve(name, obj) here, so we only
+ // resolve dot-imported objects as needed.
+
// A package scope may contain non-exported objects,
// do not import them!
- if obj.Exported() {
+ if isExported(name) {
// declare dot-imported object
// (Do not use check.declare because it modifies the object
// via Object.setScopePos, which leads to a race condition;
// the object may be imported into more than one file scope
// concurrently. See issue #32154.)
- if alt := fileScope.Insert(obj); alt != nil {
+ if alt := fileScope.Lookup(name); alt != nil {
var err error_
- err.errorf(s.LocalPkgName, "%s redeclared in this block", obj.Name())
+ err.errorf(s.LocalPkgName, "%s redeclared in this block", alt.Name())
err.recordAltDecl(alt)
check.report(&err)
} else {
- check.dotImportMap[dotImportKey{fileScope, obj}] = pkgName
+ fileScope.insert(name, obj)
+ check.dotImportMap[dotImportKey{fileScope, name}] = pkgName
}
}
}
@@ -459,8 +473,9 @@ func (check *Checker) collectObjects() {
// verify that objects in package and file scopes have different names
for _, scope := range fileScopes {
- for _, obj := range scope.elems {
- if alt := pkg.scope.Lookup(obj.Name()); alt != nil {
+ for name, obj := range scope.elems {
+ if alt := pkg.scope.Lookup(name); alt != nil {
+ obj = resolve(name, obj)
var err error_
if pkg, ok := obj.(*PkgName); ok {
err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported())
diff --git a/src/cmd/compile/internal/types2/resolver_test.go b/src/cmd/compile/internal/types2/resolver_test.go
index aee435ff5f..a02abce081 100644
--- a/src/cmd/compile/internal/types2/resolver_test.go
+++ b/src/cmd/compile/internal/types2/resolver_test.go
@@ -143,7 +143,7 @@ func TestResolveIdents(t *testing.T) {
// check that qualified identifiers are resolved
for _, f := range files {
- syntax.Walk(f, func(n syntax.Node) bool {
+ syntax.Crawl(f, func(n syntax.Node) bool {
if s, ok := n.(*syntax.SelectorExpr); ok {
if x, ok := s.X.(*syntax.Name); ok {
obj := uses[x]
@@ -177,7 +177,7 @@ func TestResolveIdents(t *testing.T) {
foundDefs := make(map[*syntax.Name]bool)
var both []string
for _, f := range files {
- syntax.Walk(f, func(n syntax.Node) bool {
+ syntax.Crawl(f, func(n syntax.Node) bool {
if x, ok := n.(*syntax.Name); ok {
var objects int
if _, found := uses[x]; found {
diff --git a/src/cmd/compile/internal/types2/sanitize.go b/src/cmd/compile/internal/types2/sanitize.go
deleted file mode 100644
index 64a2dedc7d..0000000000
--- a/src/cmd/compile/internal/types2/sanitize.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types2
-
-// sanitizeInfo walks the types contained in info to ensure that all instances
-// are expanded.
-//
-// This includes some objects that may be shared across concurrent
-// type-checking passes (such as those in the universe scope), so we are
-// careful here not to write types that are already sanitized. This avoids a
-// data race as any shared types should already be sanitized.
-func sanitizeInfo(info *Info) {
- var s sanitizer = make(map[Type]Type)
-
- // Note: Some map entries are not references.
- // If modified, they must be assigned back.
-
- for e, tv := range info.Types {
- if typ := s.typ(tv.Type); typ != tv.Type {
- tv.Type = typ
- info.Types[e] = tv
- }
- }
-
- for e, inf := range info.Inferred {
- changed := false
- for i, targ := range inf.Targs {
- if typ := s.typ(targ); typ != targ {
- inf.Targs[i] = typ
- changed = true
- }
- }
- if typ := s.typ(inf.Sig); typ != inf.Sig {
- inf.Sig = typ.(*Signature)
- changed = true
- }
- if changed {
- info.Inferred[e] = inf
- }
- }
-
- for _, obj := range info.Defs {
- if obj != nil {
- if typ := s.typ(obj.Type()); typ != obj.Type() {
- obj.setType(typ)
- }
- }
- }
-
- for _, obj := range info.Uses {
- if obj != nil {
- if typ := s.typ(obj.Type()); typ != obj.Type() {
- obj.setType(typ)
- }
- }
- }
-
- // TODO(gri) sanitize as needed
- // - info.Implicits
- // - info.Selections
- // - info.Scopes
- // - info.InitOrder
-}
-
-type sanitizer map[Type]Type
-
-func (s sanitizer) typ(typ Type) Type {
- if typ == nil {
- return nil
- }
-
- if t, found := s[typ]; found {
- return t
- }
- s[typ] = typ
-
- switch t := typ.(type) {
- case *Basic, *bottom, *top:
- // nothing to do
-
- case *Array:
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Slice:
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Struct:
- s.varList(t.fields)
-
- case *Pointer:
- if base := s.typ(t.base); base != t.base {
- t.base = base
- }
-
- case *Tuple:
- s.tuple(t)
-
- case *Signature:
- s.var_(t.recv)
- s.tuple(t.params)
- s.tuple(t.results)
-
- case *Sum:
- s.typeList(t.types)
-
- case *Interface:
- s.funcList(t.methods)
- if types := s.typ(t.types); types != t.types {
- t.types = types
- }
- s.typeList(t.embeddeds)
- s.funcList(t.allMethods)
- if allTypes := s.typ(t.allTypes); allTypes != t.allTypes {
- t.allTypes = allTypes
- }
-
- case *Map:
- if key := s.typ(t.key); key != t.key {
- t.key = key
- }
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Chan:
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Named:
- if orig := s.typ(t.fromRHS); orig != t.fromRHS {
- t.fromRHS = orig
- }
- if under := s.typ(t.underlying); under != t.underlying {
- t.underlying = under
- }
- s.typeList(t.targs)
- s.funcList(t.methods)
-
- case *TypeParam:
- if bound := s.typ(t.bound); bound != t.bound {
- t.bound = bound
- }
-
- case *instance:
- typ = t.expand()
- s[t] = typ
-
- default:
- panic("unimplemented")
- }
-
- return typ
-}
-
-func (s sanitizer) var_(v *Var) {
- if v != nil {
- if typ := s.typ(v.typ); typ != v.typ {
- v.typ = typ
- }
- }
-}
-
-func (s sanitizer) varList(list []*Var) {
- for _, v := range list {
- s.var_(v)
- }
-}
-
-func (s sanitizer) tuple(t *Tuple) {
- if t != nil {
- s.varList(t.vars)
- }
-}
-
-func (s sanitizer) func_(f *Func) {
- if f != nil {
- if typ := s.typ(f.typ); typ != f.typ {
- f.typ = typ
- }
- }
-}
-
-func (s sanitizer) funcList(list []*Func) {
- for _, f := range list {
- s.func_(f)
- }
-}
-
-func (s sanitizer) typeList(list []Type) {
- for i, t := range list {
- if typ := s.typ(t); typ != t {
- list[i] = typ
- }
- }
-}
diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go
index ade0a79b31..095875d94b 100644
--- a/src/cmd/compile/internal/types2/scope.go
+++ b/src/cmd/compile/internal/types2/scope.go
@@ -13,6 +13,7 @@ import (
"io"
"sort"
"strings"
+ "sync"
)
// A Scope maintains a set of objects and links to its containing
@@ -22,6 +23,7 @@ import (
type Scope struct {
parent *Scope
children []*Scope
+ number int // parent.children[number-1] is this scope; 0 if there is no parent
elems map[string]Object // lazily allocated
pos, end syntax.Pos // scope extent; may be invalid
comment string // for debugging only
@@ -31,10 +33,11 @@ type Scope struct {
// NewScope returns a new, empty scope contained in the given parent
// scope, if any. The comment is for debugging only.
func NewScope(parent *Scope, pos, end syntax.Pos, comment string) *Scope {
- s := &Scope{parent, nil, nil, pos, end, comment, false}
+ s := &Scope{parent, nil, 0, nil, pos, end, comment, false}
// don't add children to Universe scope!
if parent != nil && parent != Universe {
parent.children = append(parent.children, s)
+ s.number = len(parent.children)
}
return s
}
@@ -66,7 +69,7 @@ func (s *Scope) Child(i int) *Scope { return s.children[i] }
// Lookup returns the object in scope s with the given name if such an
// object exists; otherwise the result is nil.
func (s *Scope) Lookup(name string) Object {
- return s.elems[name]
+ return resolve(name, s.elems[name])
}
// LookupParent follows the parent chain of scopes starting with s until
@@ -81,7 +84,7 @@ func (s *Scope) Lookup(name string) Object {
// whose scope is the scope of the package that exported them.
func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) {
for ; s != nil; s = s.parent {
- if obj := s.elems[name]; obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) {
+ if obj := s.Lookup(name); obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) {
return s, obj
}
}
@@ -95,19 +98,38 @@ func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) {
// if not already set, and returns nil.
func (s *Scope) Insert(obj Object) Object {
name := obj.Name()
- if alt := s.elems[name]; alt != nil {
+ if alt := s.Lookup(name); alt != nil {
return alt
}
- if s.elems == nil {
- s.elems = make(map[string]Object)
- }
- s.elems[name] = obj
+ s.insert(name, obj)
if obj.Parent() == nil {
obj.setParent(s)
}
return nil
}
+// InsertLazy is like Insert, but allows deferring construction of the
+// inserted object until it's accessed with Lookup. The Object
+// returned by resolve must have the same name as given to InsertLazy.
+// If s already contains an alternative object with the same name,
+// InsertLazy leaves s unchanged and returns false. Otherwise it
+// records the binding and returns true. The object's parent scope
+// will be set to s after resolve is called.
+func (s *Scope) InsertLazy(name string, resolve func() Object) bool {
+ if s.elems[name] != nil {
+ return false
+ }
+ s.insert(name, &lazyObject{parent: s, resolve: resolve})
+ return true
+}
+
+func (s *Scope) insert(name string, obj Object) {
+ if s.elems == nil {
+ s.elems = make(map[string]Object)
+ }
+ s.elems[name] = obj
+}
+
// Squash merges s with its parent scope p by adding all
// objects of s to p, adding all children of s to the
// children of p, and removing s from p's children.
@@ -117,7 +139,8 @@ func (s *Scope) Insert(obj Object) Object {
func (s *Scope) Squash(err func(obj, alt Object)) {
p := s.parent
assert(p != nil)
- for _, obj := range s.elems {
+ for name, obj := range s.elems {
+ obj = resolve(name, obj)
obj.setParent(nil)
if alt := p.Insert(obj); alt != nil {
err(obj, alt)
@@ -196,7 +219,7 @@ func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) {
indn1 := indn + ind
for _, name := range s.Names() {
- fmt.Fprintf(w, "%s%s\n", indn1, s.elems[name])
+ fmt.Fprintf(w, "%s%s\n", indn1, s.Lookup(name))
}
if recurse {
@@ -214,3 +237,57 @@ func (s *Scope) String() string {
s.WriteTo(&buf, 0, false)
return buf.String()
}
+
+// A lazyObject represents an imported Object that has not been fully
+// resolved yet by its importer.
+type lazyObject struct {
+ parent *Scope
+ resolve func() Object
+ obj Object
+ once sync.Once
+}
+
+// resolve returns the Object represented by obj, resolving lazy
+// objects as appropriate.
+func resolve(name string, obj Object) Object {
+ if lazy, ok := obj.(*lazyObject); ok {
+ lazy.once.Do(func() {
+ obj := lazy.resolve()
+
+ if _, ok := obj.(*lazyObject); ok {
+ panic("recursive lazy object")
+ }
+ if obj.Name() != name {
+ panic("lazy object has unexpected name")
+ }
+
+ if obj.Parent() == nil {
+ obj.setParent(lazy.parent)
+ }
+ lazy.obj = obj
+ })
+
+ obj = lazy.obj
+ }
+ return obj
+}
+
+// stub implementations so *lazyObject implements Object and we can
+// store them directly into Scope.elems.
+func (*lazyObject) Parent() *Scope { panic("unreachable") }
+func (*lazyObject) Pos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) Pkg() *Package { panic("unreachable") }
+func (*lazyObject) Name() string { panic("unreachable") }
+func (*lazyObject) Type() Type { panic("unreachable") }
+func (*lazyObject) Exported() bool { panic("unreachable") }
+func (*lazyObject) Id() string { panic("unreachable") }
+func (*lazyObject) String() string { panic("unreachable") }
+func (*lazyObject) order() uint32 { panic("unreachable") }
+func (*lazyObject) color() color { panic("unreachable") }
+func (*lazyObject) setType(Type) { panic("unreachable") }
+func (*lazyObject) setOrder(uint32) { panic("unreachable") }
+func (*lazyObject) setColor(color color) { panic("unreachable") }
+func (*lazyObject) setParent(*Scope) { panic("unreachable") }
+func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") }
+func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) setScopePos(pos syntax.Pos) { panic("unreachable") }
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
new file mode 100644
index 0000000000..48b11b289c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -0,0 +1,393 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Signature represents a (non-builtin) function or method type.
+// The receiver is ignored when comparing signatures for identity.
+type Signature struct {
+ // We need to keep the scope in Signature (rather than passing it around
+ // and store it in the Func Object) because when type-checking a function
+ // literal we call the general type checker which returns a general Type.
+ // We then unpack the *Signature and use the scope for the literal body.
+ rparams *TypeParams // receiver type parameters from left to right, or nil
+ tparams *TypeParams // type parameters from left to right, or nil
+ scope *Scope // function scope, present for package-local signatures
+ recv *Var // nil if not a method
+ params *Tuple // (incoming) parameters from left to right; or nil
+ results *Tuple // (outgoing) results from left to right; or nil
+ variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
+}
+
+// NewSignature returns a new function type for the given receiver, parameters,
+// and results, either of which may be nil. If variadic is set, the function
+// is variadic, it must have at least one parameter, and the last parameter
+// must be of unnamed slice type.
+func NewSignature(recv *Var, params, results *Tuple, variadic bool) *Signature {
+ if variadic {
+ n := params.Len()
+ if n == 0 {
+ panic("variadic function must have at least one parameter")
+ }
+ if _, ok := params.At(n - 1).typ.(*Slice); !ok {
+ panic("variadic parameter must be of unnamed slice type")
+ }
+ }
+ return &Signature{recv: recv, params: params, results: results, variadic: variadic}
+}
+
+// Recv returns the receiver of signature s (if a method), or nil if a
+// function. It is ignored when comparing signatures for identity.
+//
+// For an abstract method, Recv returns the enclosing interface either
+// as a *Named or an *Interface. Due to embedding, an interface may
+// contain methods whose receiver type is a different interface.
+func (s *Signature) Recv() *Var { return s.recv }
+
+// TParams returns the type parameters of signature s, or nil.
+func (s *Signature) TParams() *TypeParams { return s.tparams }
+
+// SetTParams sets the type parameters of signature s.
+func (s *Signature) SetTParams(tparams []*TypeName) { s.tparams = bindTParams(tparams) }
+
+// RParams returns the receiver type parameters of signature s, or nil.
+func (s *Signature) RParams() *TypeParams { return s.rparams }
+
+// SetRParams sets the receiver type params of signature s.
+func (s *Signature) SetRParams(rparams []*TypeName) { s.rparams = bindTParams(rparams) }
+
+// Params returns the parameters of signature s, or nil.
+func (s *Signature) Params() *Tuple { return s.params }
+
+// Results returns the results of signature s, or nil.
+func (s *Signature) Results() *Tuple { return s.results }
+
+// Variadic reports whether the signature s is variadic.
+func (s *Signature) Variadic() bool { return s.variadic }
+
+func (s *Signature) Underlying() Type { return s }
+func (s *Signature) String() string { return TypeString(s, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// Disabled by default, but enabled when running tests (via types_test.go).
+var acceptMethodTypeParams bool
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ var recvTyp syntax.Expr // rewritten receiver type; valid if != nil
+ if recvPar != nil {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.Type, true)
+ if len(rparams) > 0 {
+ // Blank identifiers don't get declared and regular type-checking of the instantiated
+ // parameterized receiver type expression fails in Checker.collectParams of receiver.
+ // Identify blank type parameters and substitute each with a unique new identifier named
+ // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
+ // name.
+ var smap map[*syntax.Name]*syntax.Name // substitution map from "_" to "!n" identifiers
+ for i, p := range rparams {
+ if p.Value == "_" {
+ new := *p
+ new.Value = fmt.Sprintf("%d_", i)
+ rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
+ if smap == nil {
+ smap = make(map[*syntax.Name]*syntax.Name)
+ }
+ smap[p] = &new
+ }
+ }
+ if smap != nil {
+ // blank identifiers were found => use rewritten receiver type
+ recvTyp = isubst(recvPar.Type, smap)
+ }
+ rlist := make([]*TypeName, len(rparams))
+ for i, rparam := range rparams {
+ rlist[i] = check.declareTypeParam(rparam)
+ }
+ sig.rparams = bindTParams(rlist)
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeName
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv := asNamed(check.genericType(rname, false)); recv != nil {
+ recvTParams = recv.TParams().list()
+ }
+ }
+ // provide type parameter bounds
+ // - only do this if we have the right number (otherwise an error is reported elsewhere)
+ if sig.RParams().Len() == len(recvTParams) {
+ // We have a list of *TypeNames but we need a list of Types.
+ list := make([]Type, sig.RParams().Len())
+ for i, t := range sig.RParams().list() {
+ list[i] = t.typ
+ }
+ smap := makeSubstMap(recvTParams, list)
+ for i, tname := range sig.RParams().list() {
+ bound := recvTParams[i].typ.(*TypeParam).bound
+ // bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the
+ // current context.
+ // TODO(gri) should we assume now that bounds always exist?
+ // (no bound == empty interface)
+ if bound != nil {
+ bound = check.subst(tname.pos, bound, smap)
+ tname.typ.(*TypeParam).bound = bound
+ }
+ }
+ }
+ }
+ }
+
+ if tparams != nil {
+ sig.tparams = check.collectTypeParams(tparams)
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (A separate check is needed when type-checking interface method signatures because
+ // they don't have a receiver specification.)
+ if recvPar != nil && !acceptMethodTypeParams {
+ check.error(ftyp, "methods cannot have type parameters")
+ }
+ }
+
+ // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
+ // declarations and then squash that scope into the parent scope (and report any redeclarations at
+ // that time).
+ scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
+ var recvList []*Var // TODO(gri) remove the need for making a list here
+ if recvPar != nil {
+ recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, recvTyp, false) // use rewritten receiver type, if any
+ }
+ params, variadic := check.collectParams(scope, ftyp.ParamList, nil, true)
+ results, _ := check.collectParams(scope, ftyp.ResultList, nil, false)
+ scope.Squash(func(obj, alt Object) {
+ var err error_
+ err.errorf(obj, "%s redeclared in this block", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+
+ // TODO(gri) We should delay rtyp expansion to when we actually need the
+ // receiver; thus all checks here should be delayed to later.
+ rtyp, _ := deref(recv.typ)
+
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if rtyp != Typ[Invalid] {
+ var err string
+ switch T := rtyp.(type) {
+ case *Named:
+ T.expand()
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ // The underlying type of a receiver base type can be a type parameter;
+ // e.g. for methods with a generic receiver T[P] with type T[P any] P.
+ underIs(T, func(u Type) bool {
+ switch u := u.(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ return false
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ return false
+ }
+ return true
+ })
+ }
+ case *Basic:
+ err = "basic or unnamed type"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ default:
+ check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
+ }
+ if err != "" {
+ check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
+ // ok to continue
+ }
+ }
+ sig.recv = recv
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list. If type0 != nil, it is used instead of the first type in list.
+func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 syntax.Expr, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+
+ var typ Type
+ var prev syntax.Expr
+ for i, field := range list {
+ ftype := field.Type
+ // type-check type of grouped fields only once
+ if ftype != prev {
+ prev = ftype
+ if i == 0 && type0 != nil {
+ ftype = type0
+ }
+ if t, _ := ftype.(*syntax.DotsType); t != nil {
+ ftype = t.Elem
+ if variadicOk && i == len(list)-1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ = check.varType(ftype)
+ }
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if field.Name != nil {
+ // named parameter
+ name := field.Name.Value
+ if name == "" {
+ check.error(field.Name, invalidAST+"anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(field.Name.Pos(), check.pkg, name, typ)
+ check.declare(scope, field.Name, par, scope.pos)
+ params = append(params, par)
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(field.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.error(list[0], invalidAST+"list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
+
+// isubst returns an x with identifiers substituted per the substitution map smap.
+// isubst only handles the case of (valid) method receiver type expressions correctly.
+func isubst(x syntax.Expr, smap map[*syntax.Name]*syntax.Name) syntax.Expr {
+ switch n := x.(type) {
+ case *syntax.Name:
+ if alt := smap[n]; alt != nil {
+ return alt
+ }
+ // case *syntax.StarExpr:
+ // X := isubst(n.X, smap)
+ // if X != n.X {
+ // new := *n
+ // new.X = X
+ // return &new
+ // }
+ case *syntax.Operation:
+ if n.Op == syntax.Mul && n.Y == nil {
+ X := isubst(n.X, smap)
+ if X != n.X {
+ new := *n
+ new.X = X
+ return &new
+ }
+ }
+ case *syntax.IndexExpr:
+ Index := isubst(n.Index, smap)
+ if Index != n.Index {
+ new := *n
+ new.Index = Index
+ return &new
+ }
+ case *syntax.ListExpr:
+ var elems []syntax.Expr
+ for i, elem := range n.ElemList {
+ new := isubst(elem, smap)
+ if new != elem {
+ if elems == nil {
+ elems = make([]syntax.Expr, len(n.ElemList))
+ copy(elems, n.ElemList)
+ }
+ elems[i] = new
+ }
+ }
+ if elems != nil {
+ new := *n
+ new.ElemList = elems
+ return &new
+ }
+ case *syntax.ParenExpr:
+ return isubst(n.X, smap) // no need to keep parentheses
+ default:
+ // Other receiver type expressions are invalid.
+ // It's fine to ignore those here as they will
+ // be checked elsewhere.
+ }
+ return x
+}
diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go
index 236feb0404..d2f53258f0 100644
--- a/src/cmd/compile/internal/types2/sizeof_test.go
+++ b/src/cmd/compile/internal/types2/sizeof_test.go
@@ -26,15 +26,14 @@ func TestSizeof(t *testing.T) {
{Struct{}, 24, 48},
{Pointer{}, 8, 16},
{Tuple{}, 12, 24},
- {Signature{}, 44, 88},
- {Sum{}, 12, 24},
- {Interface{}, 60, 120},
+ {Signature{}, 28, 56},
+ {Union{}, 16, 32},
+ {Interface{}, 40, 80},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 68, 136},
+ {Named{}, 80, 152},
{TypeParam{}, 28, 48},
- {instance{}, 52, 96},
- {bottom{}, 0, 0},
+ {term{}, 12, 24},
{top{}, 0, 0},
// Objects
@@ -48,8 +47,9 @@ func TestSizeof(t *testing.T) {
{Nil{}, 56, 88},
// Misc
- {Scope{}, 56, 96},
+ {Scope{}, 60, 104},
{Package{}, 40, 80},
+ {TypeSet{}, 28, 56},
}
for _, test := range tests {
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index aa0fbf40fc..6a3d19d8ea 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -48,7 +48,7 @@ type StdSizes struct {
func (s *StdSizes) Alignof(T Type) int64 {
// For arrays and structs, alignment is defined in terms
// of alignment of the elements and fields, respectively.
- switch t := optype(T).(type) {
+ switch t := under(T).(type) {
case *Array:
// spec: "For a variable x of array type: unsafe.Alignof(x)
// is the same as unsafe.Alignof(x[0]), but at least 1."
@@ -73,6 +73,8 @@ func (s *StdSizes) Alignof(T Type) int64 {
if t.Info()&IsString != 0 {
return s.WordSize
}
+ case *TypeParam, *Union:
+ unreachable()
}
a := s.Sizeof(T) // may be 0
// spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
@@ -118,7 +120,7 @@ var basicSizes = [...]byte{
}
func (s *StdSizes) Sizeof(T Type) int64 {
- switch t := optype(T).(type) {
+ switch t := under(T).(type) {
case *Basic:
assert(isTyped(T))
k := t.kind
@@ -148,10 +150,10 @@ func (s *StdSizes) Sizeof(T Type) int64 {
}
offsets := s.Offsetsof(t.fields)
return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
- case *Sum:
- panic("Sizeof unimplemented for type sum")
case *Interface:
return s.WordSize * 2
+ case *TypeParam, *Union:
+ unreachable()
}
return s.WordSize // catch-all
}
diff --git a/src/cmd/compile/internal/types2/slice.go b/src/cmd/compile/internal/types2/slice.go
new file mode 100644
index 0000000000..9c22a6fb1b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/slice.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Slice represents a slice type.
+type Slice struct {
+ elem Type
+}
+
+// NewSlice returns a new slice type for the given element type.
+func NewSlice(elem Type) *Slice { return &Slice{elem: elem} }
+
+// Elem returns the element type of slice s.
+func (s *Slice) Elem() Type { return s.elem }
+
+func (s *Slice) Underlying() Type { return s }
+func (s *Slice) String() string { return TypeString(s, nil) }
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
index c3e646c80c..7865c2d4f4 100644
--- a/src/cmd/compile/internal/types2/stmt.go
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -14,7 +14,7 @@ import (
func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body *syntax.BlockStmt, iota constant.Value) {
if check.conf.IgnoreFuncBodies {
- panic("internal error: function body not ignored")
+ panic("function body not ignored")
}
if check.conf.Trace {
@@ -64,7 +64,8 @@ func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body
func (check *Checker) usage(scope *Scope) {
var unused []*Var
- for _, elem := range scope.elems {
+ for name, elem := range scope.elems {
+ elem = resolve(name, elem)
if v, _ := elem.(*Var); v != nil && !v.used {
unused = append(unused, v)
}
@@ -255,7 +256,7 @@ L:
// look for duplicate types for a given value
// (quadratic algorithm, but these lists tend to be very short)
for _, vt := range seen[val] {
- if check.identical(v.typ, vt.typ) {
+ if Identical(v.typ, vt.typ) {
var err error_
err.errorf(&v, "duplicate case %s in expression switch", &v)
err.errorf(vt.pos, "previous case")
@@ -281,7 +282,7 @@ L:
// look for duplicate types
// (quadratic algorithm, but type switches tend to be reasonably small)
for t, other := range seen {
- if T == nil && t == nil || T != nil && t != nil && check.identical(T, t) {
+ if T == nil && t == nil || T != nil && t != nil && Identical(T, t) {
// talk about "case" rather than "type" because of nil case
Ts := "nil"
if T != nil {
@@ -351,25 +352,33 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
check.errorf(&x, "%s %s", &x, msg)
case *syntax.SendStmt:
- var ch, x operand
+ var ch, val operand
check.expr(&ch, s.Chan)
- check.expr(&x, s.Value)
- if ch.mode == invalid || x.mode == invalid {
+ check.expr(&val, s.Value)
+ if ch.mode == invalid || val.mode == invalid {
return
}
-
- tch := asChan(ch.typ)
- if tch == nil {
- check.errorf(s, invalidOp+"cannot send to non-chan type %s", ch.typ)
- return
- }
-
- if tch.dir == RecvOnly {
- check.errorf(s, invalidOp+"cannot send to receive-only type %s", tch)
+ var elem Type
+ if !underIs(ch.typ, func(u Type) bool {
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.errorf(s, invalidOp+"cannot send to non-channel %s", &ch)
+ return false
+ }
+ if uch.dir == RecvOnly {
+ check.errorf(s, invalidOp+"cannot send to receive-only channel %s", &ch)
+ return false
+ }
+ if elem != nil && !Identical(uch.elem, elem) {
+ check.errorf(s, invalidOp+"channels of %s must have the same element type", &ch)
+ return false
+ }
+ elem = uch.elem
+ return true
+ }) {
return
}
-
- check.assignment(&x, tch.elem, "send")
+ check.assignment(&val, elem, "send")
case *syntax.AssignStmt:
lhs := unpackExpr(s.Lhs)
@@ -780,9 +789,9 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
// determine key/value types
var key, val Type
if x.mode != invalid {
+ // Ranging over a type parameter is permitted if it has a structural type.
typ := optype(x.typ)
if _, ok := typ.(*Chan); ok && sValue != nil {
- // TODO(gri) this also needs to happen for channels in generic variables
check.softErrorf(sValue, "range over %s permits only one iteration variable", &x)
// ok to continue
}
@@ -891,7 +900,7 @@ func isVarName(x syntax.Expr) bool {
// variables are used or present; this matters if we range over a generic
// type where not all keys or values are of the same type.
func rangeKeyVal(typ Type, wantKey, wantVal bool) (Type, Type, string) {
- switch typ := typ.(type) {
+ switch typ := arrayPtrDeref(typ).(type) {
case *Basic:
if isString(typ) {
return Typ[Int], universeRune, "" // use 'rune' name
@@ -900,10 +909,6 @@ func rangeKeyVal(typ Type, wantKey, wantVal bool) (Type, Type, string) {
return Typ[Int], typ.elem, ""
case *Slice:
return Typ[Int], typ.elem, ""
- case *Pointer:
- if typ := asArray(typ.base); typ != nil {
- return Typ[Int], typ.elem, ""
- }
case *Map:
return typ.key, typ.elem, ""
case *Chan:
@@ -912,32 +917,9 @@ func rangeKeyVal(typ Type, wantKey, wantVal bool) (Type, Type, string) {
msg = "receive from send-only channel"
}
return typ.elem, Typ[Invalid], msg
- case *Sum:
- first := true
- var key, val Type
- var msg string
- typ.is(func(t Type) bool {
- k, v, m := rangeKeyVal(under(t), wantKey, wantVal)
- if k == nil || m != "" {
- key, val, msg = k, v, m
- return false
- }
- if first {
- key, val, msg = k, v, m
- first = false
- return true
- }
- if wantKey && !Identical(key, k) {
- key, val, msg = nil, nil, "all possible values must have the same key type"
- return false
- }
- if wantVal && !Identical(val, v) {
- key, val, msg = nil, nil, "all possible values must have the same element type"
- return false
- }
- return true
- })
- return key, val, msg
+ case *top:
+ // we have a type parameter with no structural type
+ return nil, nil, "no structural type"
}
return nil, nil, ""
}
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
new file mode 100644
index 0000000000..f0c27c0150
--- /dev/null
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -0,0 +1,213 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "strconv"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Struct represents a struct type.
+type Struct struct {
+ fields []*Var
+ tags []string // field tags; nil if there are no tags
+}
+
+// NewStruct returns a new struct with the given fields and corresponding field tags.
+// If a field with index i has a tag, tags[i] must be that tag, but len(tags) may be
+// only as long as required to hold the tag with the largest index i. Consequently,
+// if no field has a tag, tags may be nil.
+func NewStruct(fields []*Var, tags []string) *Struct {
+ var fset objset
+ for _, f := range fields {
+ if f.name != "_" && fset.insert(f) != nil {
+ panic("multiple fields with the same name")
+ }
+ }
+ if len(tags) > len(fields) {
+ panic("more tags than fields")
+ }
+ return &Struct{fields: fields, tags: tags}
+}
+
+// NumFields returns the number of fields in the struct (including blank and embedded fields).
+func (s *Struct) NumFields() int { return len(s.fields) }
+
+// Field returns the i'th field for 0 <= i < NumFields().
+func (s *Struct) Field(i int) *Var { return s.fields[i] }
+
+// Tag returns the i'th field tag for 0 <= i < NumFields().
+func (s *Struct) Tag(i int) string {
+ if i < len(s.tags) {
+ return s.tags[i]
+ }
+ return ""
+}
+
+func (s *Struct) Underlying() Type { return s }
+func (s *Struct) String() string { return TypeString(s, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
+ if e.FieldList == nil {
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Value
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (issue #25627).
+ addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ var prev syntax.Expr
+ for i, f := range e.FieldList {
+ // Fields declared syntactically with the same type (e.g.: a, b, c T)
+ // share the same type expression. Only check type if it's a new type.
+ if i == 0 || f.Type != prev {
+ typ = check.varType(f.Type)
+ prev = f.Type
+ }
+ tag = ""
+ if i < len(e.TagList) {
+ tag = check.tag(e.TagList[i])
+ }
+ if f.Name != nil {
+ // named field
+ add(f.Name, false, f.Name.Pos())
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := syntax.StartPos(f.Type)
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ check.errorf(pos, "invalid embedded field type %s", f.Type)
+ name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, pos)
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+ embeddedTyp := typ // for closure below
+ embeddedPos := pos
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch t := under(t).(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if t.kind == UnsafePointer {
+ check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.error(embeddedPos, "embedded field type cannot be a pointer")
+ case *TypeParam:
+ check.error(embeddedPos, "embedded field type cannot be a (pointer to a) type parameter")
+ case *Interface:
+ if isPtr {
+ check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ })
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+}
+
+func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
+ switch e := e.(type) {
+ case *syntax.Name:
+ return e
+ case *syntax.Operation:
+ if base := ptrBase(e); base != nil {
+ // *T is valid, but **T is not
+ if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
+ return embeddedFieldIdent(e.X)
+ }
+ }
+ case *syntax.SelectorExpr:
+ return e.Sel
+ case *syntax.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ var err error_
+ err.errorf(pos, "%s redeclared", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *syntax.BasicLit) string {
+ // If t.Bad, an error was reported during parsing.
+ if t != nil && !t.Bad {
+ if t.Kind == syntax.StringLit {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
+
+func ptrBase(x *syntax.Operation) syntax.Expr {
+ if x.Op == syntax.Mul && x.Y == nil {
+ return x.X
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index c8e428c183..26796fc604 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file implements instantiation of generic types
-// through substitution of type parameters by actual
-// types.
+// This file implements type parameter substitution.
package types2
@@ -28,12 +26,7 @@ func makeSubstMap(tpars []*TypeName, targs []Type) *substMap {
assert(len(tpars) == len(targs))
proj := make(map[*TypeParam]Type, len(tpars))
for i, tpar := range tpars {
- // We must expand type arguments otherwise *instance
- // types end up as components in composite types.
- // TODO(gri) explain why this causes problems, if it does
- targ := expand(targs[i]) // possibly nil
- targs[i] = targ
- proj[tpar.typ.(*TypeParam)] = targ
+ proj[tpar.typ.(*TypeParam)] = targs[i]
}
return &substMap{targs, proj}
}
@@ -53,158 +46,6 @@ func (m *substMap) lookup(tpar *TypeParam) Type {
return tpar
}
-func (check *Checker) instantiate(pos syntax.Pos, typ Type, targs []Type, poslist []syntax.Pos) (res Type) {
- if check.conf.Trace {
- check.trace(pos, "-- instantiating %s with %s", typ, typeListString(targs))
- check.indent++
- defer func() {
- check.indent--
- var under Type
- if res != nil {
- // Calling under() here may lead to endless instantiations.
- // Test case: type T[P any] T[P]
- // TODO(gri) investigate if that's a bug or to be expected.
- under = res.Underlying()
- }
- check.trace(pos, "=> %s (under = %s)", res, under)
- }()
- }
-
- assert(len(poslist) <= len(targs))
-
- // TODO(gri) What is better here: work with TypeParams, or work with TypeNames?
- var tparams []*TypeName
- switch t := typ.(type) {
- case *Named:
- tparams = t.tparams
- case *Signature:
- tparams = t.tparams
- defer func() {
- // If we had an unexpected failure somewhere don't panic below when
- // asserting res.(*Signature). Check for *Signature in case Typ[Invalid]
- // is returned.
- if _, ok := res.(*Signature); !ok {
- return
- }
- // If the signature doesn't use its type parameters, subst
- // will not make a copy. In that case, make a copy now (so
- // we can set tparams to nil w/o causing side-effects).
- if t == res {
- copy := *t
- res = &copy
- }
- // After instantiating a generic signature, it is not generic
- // anymore; we need to set tparams to nil.
- res.(*Signature).tparams = nil
- }()
-
- default:
- check.dump("%v: cannot instantiate %v", pos, typ)
- unreachable() // only defined types and (defined) functions can be generic
-
- }
-
- // the number of supplied types must match the number of type parameters
- if len(targs) != len(tparams) {
- // TODO(gri) provide better error message
- check.errorf(pos, "got %d arguments but %d type parameters", len(targs), len(tparams))
- return Typ[Invalid]
- }
-
- if len(tparams) == 0 {
- return typ // nothing to do (minor optimization)
- }
-
- smap := makeSubstMap(tparams, targs)
-
- // check bounds
- for i, tname := range tparams {
- tpar := tname.typ.(*TypeParam)
- iface := tpar.Bound()
- if iface.Empty() {
- continue // no type bound
- }
-
- targ := targs[i]
-
- // best position for error reporting
- pos := pos
- if i < len(poslist) {
- pos = poslist[i]
- }
-
- // The type parameter bound is parameterized with the same type parameters
- // as the instantiated type; before we can use it for bounds checking we
- // need to instantiate it with the type arguments with which we instantiate
- // the parameterized type.
- iface = check.subst(pos, iface, smap).(*Interface)
-
- // targ must implement iface (methods)
- // - check only if we have methods
- check.completeInterface(nopos, iface)
- if len(iface.allMethods) > 0 {
- // If the type argument is a pointer to a type parameter, the type argument's
- // method set is empty.
- // TODO(gri) is this what we want? (spec question)
- if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
- check.errorf(pos, "%s has no methods", targ)
- break
- }
- if m, wrong := check.missingMethod(targ, iface, true); m != nil {
- // TODO(gri) needs to print updated name to avoid major confusion in error message!
- // (print warning for now)
- // Old warning:
- // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
- if m.name == "==" {
- // We don't want to report "missing method ==".
- check.softErrorf(pos, "%s does not satisfy comparable", targ)
- } else if wrong != nil {
- // TODO(gri) This can still report uninstantiated types which makes the error message
- // more difficult to read then necessary.
- check.softErrorf(pos,
- "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
- targ, tpar.bound, wrong, m,
- )
- } else {
- check.softErrorf(pos, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
- }
- break
- }
- }
-
- // targ's underlying type must also be one of the interface types listed, if any
- if iface.allTypes == nil {
- continue // nothing to do
- }
-
- // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
- // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
- if targ := asTypeParam(targ); targ != nil {
- targBound := targ.Bound()
- if targBound.allTypes == nil {
- check.softErrorf(pos, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
- break
- }
- for _, t := range unpack(targBound.allTypes) {
- if !iface.isSatisfiedBy(t) {
- // TODO(gri) match this error message with the one below (or vice versa)
- check.softErrorf(pos, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, t, iface.allTypes)
- break
- }
- }
- break
- }
-
- // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
- if !iface.isSatisfiedBy(targ) {
- check.softErrorf(pos, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, under(targ), iface.allTypes)
- break
- }
- }
-
- return check.subst(pos, typ, smap)
-}
-
// subst returns the type typ with its type parameters tpars replaced by
// the corresponding type arguments targs, recursively.
// subst is functional in the sense that it doesn't modify the incoming
@@ -224,15 +65,28 @@ func (check *Checker) subst(pos syntax.Pos, typ Type, smap *substMap) Type {
}
// general case
- subst := subster{check, pos, make(map[Type]Type), smap}
+ var subst subster
+ subst.pos = pos
+ subst.smap = smap
+ if check != nil {
+ subst.check = check
+ subst.typMap = check.typMap
+ } else {
+ // If we don't have a *Checker and its global type map,
+ // use a local version. Besides avoiding duplicate work,
+ // the type map prevents infinite recursive substitution
+ // for recursive types (example: type T[P any] *T[P]).
+ subst.typMap = make(map[string]*Named)
+ }
+
return subst.typ(typ)
}
type subster struct {
- check *Checker
- pos syntax.Pos
- cache map[Type]Type
- smap *substMap
+ pos syntax.Pos
+ smap *substMap
+ check *Checker // nil if called via Instantiate
+ typMap map[string]*Named
}
func (subst *subster) typ(typ Type) Type {
@@ -241,7 +95,7 @@ func (subst *subster) typ(typ Type) Type {
// Call typOrNil if it's possible that typ is nil.
panic("nil typ")
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
@@ -290,29 +144,20 @@ func (subst *subster) typ(typ Type) Type {
}
}
- case *Sum:
- types, copied := subst.typeList(t.types)
+ case *Union:
+ terms, copied := subst.termlist(t.terms)
if copied {
- // Don't do it manually, with a Sum literal: the new
- // types list may not be unique and NewSum may remove
- // duplicates.
- return NewSum(types)
+ // term list substitution may introduce duplicate terms (unlikely but possible).
+ // This is ok; lazy type set computation will determine the actual type set
+ // in normal form.
+ return &Union{terms, nil}
}
case *Interface:
methods, mcopied := subst.funcList(t.methods)
- types := t.types
- if t.types != nil {
- types = subst.typ(t.types)
- }
embeddeds, ecopied := subst.typeList(t.embeddeds)
- if mcopied || types != t.types || ecopied {
- iface := &Interface{methods: methods, types: types, embeddeds: embeddeds}
- if subst.check == nil {
- panic("internal error: cannot instantiate interfaces yet")
- }
- subst.check.posMap[iface] = subst.check.posMap[t] // satisfy completeInterface requirement
- subst.check.completeInterface(nopos, iface)
+ if mcopied || ecopied {
+ iface := &Interface{methods: methods, embeddeds: embeddeds, complete: t.complete}
return iface
}
@@ -342,7 +187,7 @@ func (subst *subster) typ(typ Type) Type {
}
}
- if t.tparams == nil {
+ if t.TParams().Len() == 0 {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
@@ -352,7 +197,7 @@ func (subst *subster) typ(typ Type) Type {
if len(t.targs) > 0 {
// already instantiated
dump(">>> %s already instantiated", t)
- assert(len(t.targs) == len(t.tparams))
+ assert(len(t.targs) == t.TParams().Len())
// For each (existing) type argument targ, determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
@@ -362,7 +207,7 @@ func (subst *subster) typ(typ Type) Type {
if new_targ != targ {
dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
if new_targs == nil {
- new_targs = make([]Type, len(t.tparams))
+ new_targs = make([]Type, t.TParams().Len())
copy(new_targs, t.targs)
}
new_targs[i] = new_targ
@@ -382,26 +227,23 @@ func (subst *subster) typ(typ Type) Type {
// before creating a new named type, check if we have this one already
h := instantiatedHash(t, new_targs)
dump(">>> new type hash: %s", h)
- if subst.check != nil {
- if named, found := subst.check.typMap[h]; found {
- dump(">>> found %s", named)
- subst.cache[t] = named
- return named
- }
+ if named, found := subst.typMap[h]; found {
+ dump(">>> found %s", named)
+ return named
}
- // create a new named type and populate caches to avoid endless recursion
+ // create a new named type and populate typMap to avoid endless recursion
tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil)
- named := subst.check.newNamed(tname, t, t.underlying, t.tparams, t.methods) // method signatures are updated lazily
+ named := subst.check.newNamed(tname, t, t.Underlying(), t.TParams(), t.methods) // method signatures are updated lazily
named.targs = new_targs
- if subst.check != nil {
- subst.check.typMap[h] = named
- }
- subst.cache[t] = named
+ subst.typMap[h] = named
+ t.expand() // must happen after typMap update to avoid infinite recursion
// do the substitution
dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, new_targs)
- named.underlying = subst.typOrNil(t.underlying)
+ named.underlying = subst.typOrNil(t.Underlying())
+ dump(">>> underlying: %v", named.underlying)
+ assert(named.underlying != nil)
named.fromRHS = named.underlying // for cycle detection (Checker.validType)
return named
@@ -409,10 +251,6 @@ func (subst *subster) typ(typ Type) Type {
case *TypeParam:
return subst.smap.lookup(t)
- case *instance:
- // TODO(gri) can we avoid the expansion here and just substitute the type parameters?
- return subst.typ(t.expand())
-
default:
unimplemented()
}
@@ -420,14 +258,17 @@ func (subst *subster) typ(typ Type) Type {
return typ
}
-// TODO(gri) Eventually, this should be more sophisticated.
-// It won't work correctly for locally declared types.
+var instanceHashing = 0
+
func instantiatedHash(typ *Named, targs []Type) string {
+ assert(instanceHashing == 0)
+ instanceHashing++
var buf bytes.Buffer
writeTypeName(&buf, typ.obj, nil)
buf.WriteByte('[')
writeTypeList(&buf, targs, nil, nil)
buf.WriteByte(']')
+ instanceHashing--
// With respect to the represented type, whether a
// type is fully expanded or stored as instance
@@ -545,3 +386,21 @@ func (subst *subster) typeList(in []Type) (out []Type, copied bool) {
}
return
}
+
+func (subst *subster) termlist(in []*Term) (out []*Term, copied bool) {
+ out = in
+ for i, t := range in {
+ if u := subst.typ(t.typ); u != t.typ {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]*Term, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = NewTerm(t.tilde, u)
+ }
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/types2/termlist.go b/src/cmd/compile/internal/types2/termlist.go
new file mode 100644
index 0000000000..378ba6b8f4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/termlist.go
@@ -0,0 +1,167 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "bytes"
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" ∪ ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// If the type set represented by xl is specified by a single (non-𝓤) term,
+// structuralType returns that type. Otherwise it returns nil.
+func (xl termlist) structuralType() Type {
+ if nl := xl.norm(); len(nl) == 1 {
+ return nl[0].typ // if nl.isAll() then typ is nil, which is ok
+ }
+ return nil
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/types2/termlist_test.go b/src/cmd/compile/internal/types2/termlist_test.go
new file mode 100644
index 0000000000..2f3772ddeb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/termlist_test.go
@@ -0,0 +1,313 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "strings"
+ "testing"
+)
+
+// maketl makes a term list from a string of the term list.
+func maketl(s string) termlist {
+ s = strings.Replace(s, " ", "", -1)
+ names := strings.Split(s, "∪")
+ r := make(termlist, len(names))
+ for i, n := range names {
+ r[i] = testTerm(n)
+ }
+ return r
+}
+
+func TestTermlistTop(t *testing.T) {
+ if !allTermlist.isAll() {
+ t.Errorf("allTermlist is not the set of all types")
+ }
+}
+
+func TestTermlistString(t *testing.T) {
+ for _, want := range []string{
+ "∅",
+ "𝓤",
+ "int",
+ "~int",
+ "myInt",
+ "∅ ∪ ∅",
+ "𝓤 ∪ 𝓤",
+ "∅ ∪ 𝓤 ∪ int",
+ "∅ ∪ 𝓤 ∪ int ∪ myInt",
+ } {
+ if got := maketl(want).String(); got != want {
+ t.Errorf("(%v).String() == %v", want, got)
+ }
+ }
+}
+
+func TestTermlistIsEmpty(t *testing.T) {
+ for test, want := range map[string]bool{
+ "∅": true,
+ "∅ ∪ ∅": true,
+ "∅ ∪ ∅ ∪ 𝓤": false,
+ "∅ ∪ ∅ ∪ myInt": false,
+ "𝓤": false,
+ "𝓤 ∪ int": false,
+ "𝓤 ∪ myInt ∪ ∅": false,
+ } {
+ xl := maketl(test)
+ got := xl.isEmpty()
+ if got != want {
+ t.Errorf("(%v).isEmpty() == %v; want %v", test, got, want)
+ }
+ }
+}
+
+func TestTermlistIsAll(t *testing.T) {
+ for test, want := range map[string]bool{
+ "∅": false,
+ "∅ ∪ ∅": false,
+ "int ∪ ~string": false,
+ "~int ∪ myInt": false,
+ "∅ ∪ ∅ ∪ 𝓤": true,
+ "𝓤": true,
+ "𝓤 ∪ int": true,
+ "myInt ∪ 𝓤": true,
+ } {
+ xl := maketl(test)
+ got := xl.isAll()
+ if got != want {
+ t.Errorf("(%v).isAll() == %v; want %v", test, got, want)
+ }
+ }
+}
+
+func TestTermlistNorm(t *testing.T) {
+ for _, test := range []struct {
+ xl, want string
+ }{
+ {"∅", "∅"},
+ {"∅ ∪ ∅", "∅"},
+ {"∅ ∪ int", "int"},
+ {"∅ ∪ myInt", "myInt"},
+ {"𝓤 ∪ int", "𝓤"},
+ {"𝓤 ∪ myInt", "𝓤"},
+ {"int ∪ myInt", "int ∪ myInt"},
+ {"~int ∪ int", "~int"},
+ {"~int ∪ myInt", "~int"},
+ {"int ∪ ~string ∪ int", "int ∪ ~string"},
+ {"~int ∪ string ∪ 𝓤 ∪ ~string ∪ int", "𝓤"},
+ {"~int ∪ string ∪ myInt ∪ ~string ∪ int", "~int ∪ ~string"},
+ } {
+ xl := maketl(test.xl)
+ got := maketl(test.xl).norm()
+ if got.String() != test.want {
+ t.Errorf("(%v).norm() = %v; want %v", xl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistStructuralType(t *testing.T) {
+ // helper to deal with nil types
+ tstring := func(typ Type) string {
+ if typ == nil {
+ return "nil"
+ }
+ return typ.String()
+ }
+
+ for test, want := range map[string]string{
+ "∅": "nil",
+ "𝓤": "nil",
+ "int": "int",
+ "myInt": "myInt",
+ "~int": "int",
+ "~int ∪ string": "nil",
+ "~int ∪ myInt": "int",
+ "∅ ∪ int": "int",
+ "∅ ∪ ~int": "int",
+ "∅ ∪ ~int ∪ string": "nil",
+ } {
+ xl := maketl(test)
+ got := tstring(xl.structuralType())
+ if got != want {
+ t.Errorf("(%v).structuralType() == %v; want %v", test, got, want)
+ }
+ }
+}
+
+func TestTermlistUnion(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl, want string
+ }{
+
+ {"∅", "∅", "∅"},
+ {"∅", "𝓤", "𝓤"},
+ {"∅", "int", "int"},
+ {"𝓤", "~int", "𝓤"},
+ {"int", "~int", "~int"},
+ {"int", "string", "int ∪ string"},
+ {"int", "myInt", "int ∪ myInt"},
+ {"~int", "myInt", "~int"},
+ {"int ∪ string", "~string", "int ∪ ~string"},
+ {"~int ∪ string", "~string ∪ int", "~int ∪ ~string"},
+ {"~int ∪ string ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
+ {"~int ∪ myInt ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
+ {"~int ∪ string ∪ 𝓤", "~string ∪ int", "𝓤"},
+ {"~int ∪ string ∪ myInt", "~string ∪ int", "~int ∪ ~string"},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.union(yl).String()
+ if got != test.want {
+ t.Errorf("(%v).union(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistIntersect(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl, want string
+ }{
+
+ {"∅", "∅", "∅"},
+ {"∅", "𝓤", "∅"},
+ {"∅", "int", "∅"},
+ {"∅", "myInt", "∅"},
+ {"𝓤", "~int", "~int"},
+ {"𝓤", "myInt", "myInt"},
+ {"int", "~int", "int"},
+ {"int", "string", "∅"},
+ {"int", "myInt", "∅"},
+ {"~int", "myInt", "myInt"},
+ {"int ∪ string", "~string", "string"},
+ {"~int ∪ string", "~string ∪ int", "int ∪ string"},
+ {"~int ∪ string ∪ ∅", "~string ∪ int", "int ∪ string"},
+ {"~int ∪ myInt ∪ ∅", "~string ∪ int", "int"},
+ {"~int ∪ string ∪ 𝓤", "~string ∪ int", "int ∪ ~string"},
+ {"~int ∪ string ∪ myInt", "~string ∪ int", "int ∪ string"},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.intersect(yl).String()
+ if got != test.want {
+ t.Errorf("(%v).intersect(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistEqual(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", false},
+ {"𝓤", "𝓤", true},
+ {"𝓤 ∪ int", "𝓤", true},
+ {"𝓤 ∪ int", "string ∪ 𝓤", true},
+ {"𝓤 ∪ myInt", "string ∪ 𝓤", true},
+ {"int ∪ ~string", "string ∪ int", false},
+ {"~int ∪ string", "string ∪ myInt", false},
+ {"int ∪ ~string ∪ ∅", "string ∪ int ∪ ~string", true},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.equal(yl)
+ if got != test.want {
+ t.Errorf("(%v).equal(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistIncludes(t *testing.T) {
+ for _, test := range []struct {
+ xl, typ string
+ want bool
+ }{
+ {"∅", "int", false},
+ {"𝓤", "int", true},
+ {"~int", "int", true},
+ {"int", "string", false},
+ {"~int", "string", false},
+ {"~int", "myInt", true},
+ {"int ∪ string", "string", true},
+ {"~int ∪ string", "int", true},
+ {"~int ∪ string", "myInt", true},
+ {"~int ∪ myInt ∪ ∅", "myInt", true},
+ {"myInt ∪ ∅ ∪ 𝓤", "int", true},
+ } {
+ xl := maketl(test.xl)
+ yl := testTerm(test.typ).typ
+ got := xl.includes(yl)
+ if got != test.want {
+ t.Errorf("(%v).includes(%v) = %v; want %v", test.xl, yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistSupersetOf(t *testing.T) {
+ for _, test := range []struct {
+ xl, typ string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", false},
+ {"∅", "int", false},
+ {"𝓤", "∅", true},
+ {"𝓤", "𝓤", true},
+ {"𝓤", "int", true},
+ {"𝓤", "~int", true},
+ {"𝓤", "myInt", true},
+ {"~int", "int", true},
+ {"~int", "~int", true},
+ {"~int", "myInt", true},
+ {"int", "~int", false},
+ {"myInt", "~int", false},
+ {"int", "string", false},
+ {"~int", "string", false},
+ {"int ∪ string", "string", true},
+ {"int ∪ string", "~string", false},
+ {"~int ∪ string", "int", true},
+ {"~int ∪ string", "myInt", true},
+ {"~int ∪ string ∪ ∅", "string", true},
+ {"~string ∪ ∅ ∪ 𝓤", "myInt", true},
+ } {
+ xl := maketl(test.xl)
+ y := testTerm(test.typ)
+ got := xl.supersetOf(y)
+ if got != test.want {
+ t.Errorf("(%v).supersetOf(%v) = %v; want %v", test.xl, y, got, test.want)
+ }
+ }
+}
+
+func TestTermlistSubsetOf(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", true},
+ {"𝓤", "∅", false},
+ {"𝓤", "𝓤", true},
+ {"int", "int ∪ string", true},
+ {"~int", "int ∪ string", false},
+ {"~int", "myInt ∪ string", false},
+ {"myInt", "~int ∪ string", true},
+ {"~int", "string ∪ string ∪ int ∪ ~int", true},
+ {"myInt", "string ∪ string ∪ ~int", true},
+ {"int ∪ string", "string", false},
+ {"int ∪ string", "string ∪ int", true},
+ {"int ∪ ~string", "string ∪ int", false},
+ {"myInt ∪ ~string", "string ∪ int ∪ 𝓤", true},
+ {"int ∪ ~string", "string ∪ int ∪ ∅ ∪ string", false},
+ {"int ∪ myInt", "string ∪ ~int ∪ ∅ ∪ string", true},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.subsetOf(yl)
+ if got != test.want {
+ t.Errorf("(%v).subsetOf(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.go2 b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
index 3918d836b5..0cfea93bf6 100644
--- a/src/cmd/compile/internal/types2/testdata/check/builtins.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
@@ -6,48 +6,222 @@
package builtins
-type Bmc interface {
- type map[rune]string, chan int
+import "unsafe"
+
+// close
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ close(ch /* ERROR cannot close non-channel */)
+}
+
+func _[T C0](ch T) {
+ close(ch /* ERROR cannot close non-channel */)
+}
+
+func _[T C1](ch T) {
+ close(ch)
+}
+
+func _[T C2](ch T) {
+ close(ch /* ERROR cannot close receive-only channel */)
+}
+
+func _[T C3](ch T) {
+ close(ch)
+}
+
+func _[T C4](ch T) {
+ close(ch)
+}
+
+func _[T C5[X], X any](ch T) {
+ close(ch)
+}
+
+// delete
+
+type M0 interface{ int }
+type M1 interface{ map[string]int }
+type M2 interface { map[string]int | map[string]float64 }
+type M3 interface{ map[string]int | map[rune]int }
+type M4[K comparable, V any] interface{ map[K]V | map[rune]V }
+
+func _[T any](m T) {
+ delete(m /* ERROR not a map */, "foo")
}
-type Bms interface {
- type map[string]int, []int
+func _[T M0](m T) {
+ delete(m /* ERROR not a map */, "foo")
}
-type Bcs interface {
- type chan bool, []float64
+func _[T M1](m T) {
+ delete(m, "foo")
}
-type Bss interface {
- type []int, []string
+func _[T M2](m T) {
+ delete(m, "foo")
+ delete(m, 0 /* ERROR cannot use .* as string */)
}
-func _[T any] () {
- _ = make(T /* ERROR invalid argument */ )
- _ = make(T /* ERROR invalid argument */ , 10)
- _ = make(T /* ERROR invalid argument */ , 10, 20)
+func _[T M3](m T) {
+ delete(m /* ERROR must have identical key types */, "foo")
}
-func _[T Bmc] () {
- _ = make(T)
- _ = make(T, 10)
- _ = make /* ERROR expects 1 or 2 arguments */ (T, 10, 20)
+func _[T M4[rune, V], V any](m T) {
+ delete(m, 'k')
}
-func _[T Bms] () {
- _ = make /* ERROR expects 2 arguments */ (T)
- _ = make(T, 10)
- _ = make /* ERROR expects 2 arguments */ (T, 10, 20)
+func _[T M4[K, V], K comparable, V any](m T) {
+ delete(m /* ERROR must have identical key types */, "foo")
}
-func _[T Bcs] () {
- _ = make /* ERROR expects 2 arguments */ (T)
- _ = make(T, 10)
- _ = make /* ERROR expects 2 arguments */ (T, 10, 20)
+// make
+
+func _[
+ S1 interface{ []int },
+ S2 interface{ []int | chan int },
+
+ M1 interface{ map[string]int },
+ M2 interface{ map[string]int | chan int },
+
+ C1 interface{ chan int },
+ C2 interface{ chan int | chan string },
+]() {
+ type S0 []int
+ _ = make([]int, 10)
+ _ = make(S0, 10)
+ _ = make(S1, 10)
+ _ = make /* ERROR not enough arguments */ ()
+ _ = make /* ERROR expects 2 or 3 arguments */ (S1)
+ _ = make(S1, 10, 20)
+ _ = make /* ERROR expects 2 or 3 arguments */ (S1, 10, 20, 30)
+ _ = make(S2 /* ERROR cannot make .* no structural type */ , 10)
+
+ type M0 map[string]int
+ _ = make(map[string]int)
+ _ = make(M0)
+ _ = make(M1)
+ _ = make(M1, 10)
+ _ = make/* ERROR expects 1 or 2 arguments */(M1, 10, 20)
+ _ = make(M2 /* ERROR cannot make .* no structural type */ )
+
+ type C0 chan int
+ _ = make(chan int)
+ _ = make(C0)
+ _ = make(C1)
+ _ = make(C1, 10)
+ _ = make/* ERROR expects 1 or 2 arguments */(C1, 10, 20)
+ _ = make(C2 /* ERROR cannot make .* no structural type */ )
}
-func _[T Bss] () {
- _ = make /* ERROR expects 2 or 3 arguments */ (T)
- _ = make(T, 10)
- _ = make(T, 10, 20)
+// unsafe.Alignof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Alignof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Alignof(p)
+ assert(pp == 8)
+ const ll = unsafe.Alignof(l)
+ assert(ll == 8)
+ const ff = unsafe.Alignof(f)
+ assert(ff == 8)
+ const ii = unsafe.Alignof(i)
+ assert(ii == 8)
+ const cc = unsafe.Alignof(c)
+ assert(cc == 8)
+ const mm = unsafe.Alignof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
+
+// unsafe.Offsetof
+
+func _[T comparable]() {
+ var (
+ b struct{ _, f int64 }
+ a struct{ _, f [10]T }
+ s struct{ _, f struct{ f T } }
+ p struct{ _, f *T }
+ l struct{ _, f []T }
+ f struct{ _, f func(T) }
+ i struct{ _, f interface{ m() T } }
+ c struct{ _, f chan T }
+ m struct{ _, f map[T]T }
+ t struct{ _, f T }
+ )
+
+ const bb = unsafe.Offsetof(b.f)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Offsetof(p.f)
+ assert(pp == 8)
+ const ll = unsafe.Offsetof(l.f)
+ assert(ll == 24)
+ const ff = unsafe.Offsetof(f.f)
+ assert(ff == 8)
+ const ii = unsafe.Offsetof(i.f)
+ assert(ii == 16)
+ const cc = unsafe.Offsetof(c.f)
+ assert(cc == 8)
+ const mm = unsafe.Offsetof(m.f)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
+
+// unsafe.Sizeof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Sizeof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Sizeof(p)
+ assert(pp == 8)
+ const ll = unsafe.Sizeof(l)
+ assert(ll == 24)
+ const ff = unsafe.Sizeof(f)
+ assert(ff == 8)
+ const ii = unsafe.Sizeof(i)
+ assert(ii == 16)
+ const cc = unsafe.Sizeof(c)
+ assert(cc == 8)
+ const mm = unsafe.Sizeof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
}
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.src b/src/cmd/compile/internal/types2/testdata/check/builtins.src
index 6d1f47129b..17e4068d65 100644
--- a/src/cmd/compile/internal/types2/testdata/check/builtins.src
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.src
@@ -144,7 +144,7 @@ func close1() {
var r <-chan int
close() // ERROR not enough arguments
close(1, 2) // ERROR too many arguments
- close(42 /* ERROR not a channel */)
+ close(42 /* ERROR cannot close non-channel */)
close(r /* ERROR receive-only channel */)
close(c)
_ = close /* ERROR used as value */ (c)
diff --git a/src/cmd/compile/internal/types2/testdata/check/const0.src b/src/cmd/compile/internal/types2/testdata/check/const0.src
index 5608b1549b..3cffdf904c 100644
--- a/src/cmd/compile/internal/types2/testdata/check/const0.src
+++ b/src/cmd/compile/internal/types2/testdata/check/const0.src
@@ -27,7 +27,7 @@ const (
ub1 = true
ub2 = 2 < 1
ub3 = ui1 == uf1
- ub4 = true /* ERROR "cannot convert" */ == 0
+ ub4 = true /* ERROR "mismatched types untyped bool and untyped int" */ == 0
// integer values
ui0 = 0
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles4.src b/src/cmd/compile/internal/types2/testdata/check/cycles4.src
index 445babca68..924aabf475 100644
--- a/src/cmd/compile/internal/types2/testdata/check/cycles4.src
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles4.src
@@ -4,6 +4,8 @@
package p
+import "unsafe"
+
// Check that all methods of T are collected before
// determining the result type of m (which embeds
// all methods of T).
@@ -13,7 +15,7 @@ type T interface {
E
}
-var _ = T.m(nil).m().e()
+var _ int = T.m(nil).m().e()
type E interface {
e() int
@@ -22,7 +24,7 @@ type E interface {
// Check that unresolved forward chains are followed
// (see also comment in resolver.go, checker.typeDecl).
-var _ = C.m(nil).m().e()
+var _ int = C.m(nil).m().e()
type A B
@@ -108,3 +110,12 @@ type Element interface {
type Event interface {
Target() Element
}
+
+// Check that accessing an interface method too early doesn't lead
+// to follow-on errors due to an incorrectly computed type set.
+
+type T8 interface {
+ m() [unsafe.Sizeof(T8.m /* ERROR undefined */ )]int
+}
+
+var _ = T8.m // no error expected here
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls0.src b/src/cmd/compile/internal/types2/testdata/check/decls0.src
index e78d8867e0..f051a4f2ac 100644
--- a/src/cmd/compile/internal/types2/testdata/check/decls0.src
+++ b/src/cmd/compile/internal/types2/testdata/check/decls0.src
@@ -4,7 +4,7 @@
// type declarations
-package decls0
+package go1_17 // don't permit non-interface elements in interfaces
import "unsafe"
@@ -185,10 +185,10 @@ func f2(x *f2 /* ERROR "not a type" */ ) {}
func f3() (x f3 /* ERROR "not a type" */ ) { return }
func f4() (x *f4 /* ERROR "not a type" */ ) { return }
-func (S0) m1(x S0 /* ERROR value .* is not a type */ .m1) {}
-func (S0) m2(x *S0 /* ERROR value .* is not a type */ .m2) {}
-func (S0) m3() (x S0 /* ERROR value .* is not a type */ .m3) { return }
-func (S0) m4() (x *S0 /* ERROR value .* is not a type */ .m4) { return }
+func (S0) m1(x S0 /* ERROR illegal cycle in method declaration */ .m1) {}
+func (S0) m2(x *S0 /* ERROR illegal cycle in method declaration */ .m2) {}
+func (S0) m3() (x S0 /* ERROR illegal cycle in method declaration */ .m3) { return }
+func (S0) m4() (x *S0 /* ERROR illegal cycle in method declaration */ .m4) { return }
// interfaces may not have any blank methods
type BlankI interface {
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls1.src b/src/cmd/compile/internal/types2/testdata/check/decls1.src
index e6beb78358..1167ced366 100644
--- a/src/cmd/compile/internal/types2/testdata/check/decls1.src
+++ b/src/cmd/compile/internal/types2/testdata/check/decls1.src
@@ -83,7 +83,7 @@ var (
// Constant expression initializations
var (
- v1 = 1 /* ERROR "cannot convert" */ + "foo"
+ v1 = 1 /* ERROR "mismatched types untyped int and untyped string" */ + "foo"
v2 = c + 255
v3 = c + 256 /* ERROR "overflows" */
v4 = r + 2147483647
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr1.src b/src/cmd/compile/internal/types2/testdata/check/expr1.src
index 4ead815158..85ad234bbb 100644
--- a/src/cmd/compile/internal/types2/testdata/check/expr1.src
+++ b/src/cmd/compile/internal/types2/testdata/check/expr1.src
@@ -111,10 +111,10 @@ type mystring string
func _(x, y string, z mystring) {
x = x + "foo"
x = x /* ERROR not defined */ - "foo"
- x = x + 1 // ERROR cannot convert
+ x = x + 1 // ERROR mismatched types string and untyped int
x = x + y
x = x /* ERROR not defined */ - y
- x = x * 10 // ERROR cannot convert
+ x = x * 10 // ERROR mismatched types string and untyped int
}
func f() (a, b int) { return }
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr2.src b/src/cmd/compile/internal/types2/testdata/check/expr2.src
index 0c959e8011..f9726b5de5 100644
--- a/src/cmd/compile/internal/types2/testdata/check/expr2.src
+++ b/src/cmd/compile/internal/types2/testdata/check/expr2.src
@@ -10,7 +10,7 @@ func _bool() {
const t = true == true
const f = true == false
_ = t /* ERROR "cannot compare" */ < f
- _ = 0 /* ERROR "cannot convert" */ == t
+ _ = 0 /* ERROR "mismatched types untyped int and untyped bool" */ == t
var b bool
var x, y float32
b = x < y
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr3.src b/src/cmd/compile/internal/types2/testdata/check/expr3.src
index eab3f72c4d..fd28421dc8 100644
--- a/src/cmd/compile/internal/types2/testdata/check/expr3.src
+++ b/src/cmd/compile/internal/types2/testdata/check/expr3.src
@@ -104,7 +104,7 @@ func indexes() {
var ok mybool
_, ok = m["bar"]
_ = ok
- _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "cannot convert"
+ _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "mismatched types int and untyped string"
var t string
_ = t[- /* ERROR "negative" */ 1]
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.go2 b/src/cmd/compile/internal/types2/testdata/check/issues.go2
index 1c73b5da92..effc2db7ae 100644
--- a/src/cmd/compile/internal/types2/testdata/check/issues.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.go2
@@ -24,25 +24,23 @@ func _() {
eql[io.Reader](nil, nil)
}
-// If we have a receiver of pointer type (below: *T) we must ignore
-// the pointer in the implementation of the method lookup because
-// the type bound of T is an interface and pointer to interface types
-// have no methods and then the lookup would fail.
+// If we have a receiver of pointer to type parameter type (below: *T)
+// we don't have any methods, like for interfaces.
type C[T any] interface {
m()
}
// using type bound C
func _[T C[T]](x *T) {
- x.m()
+ x.m /* ERROR x\.m undefined */ ()
}
// using an interface literal as bound
func _[T interface{ m() }](x *T) {
- x.m()
+ x.m /* ERROR x\.m undefined */ ()
}
-func f2[_ interface{ m1(); m2() }]()
+func f2[_ interface{ m1(); m2() }]() {}
type T struct{}
func (T) m1()
@@ -57,15 +55,15 @@ func _() {
// type with a type list constraint, all of the type argument's types in its
// bound, but at least one (!), must be in the type list of the bound of the
// corresponding parameterized type's type parameter.
-type T1[P interface{type uint}] struct{}
+type T1[P interface{~uint}] struct{}
func _[P any]() {
- _ = T1[P /* ERROR P has no type constraints */ ]{}
+ _ = T1[P /* ERROR P has no constraints */ ]{}
}
// This is the original (simplified) program causing the same issue.
type Unsigned interface {
- type uint
+ ~uint
}
type T2[U Unsigned] struct {
@@ -76,8 +74,8 @@ func (u T2[U]) Add1() U {
return u.s + 1
}
-func NewT2[U any]() T2[U /* ERROR U has no type constraints */ ] {
- return T2[U /* ERROR U has no type constraints */ ]{}
+func NewT2[U any]() T2[U /* ERROR U has no constraints */ ] {
+ return T2[U /* ERROR U has no constraints */ ]{}
}
func _() {
@@ -156,7 +154,7 @@ type inf2[T any] struct{ inf2 /* ERROR illegal cycle */ [T] }
// predicate disjunction in the implementation was wrong because if a type list
// contains both an integer and a floating-point type, the type parameter is
// neither an integer or a floating-point number.
-func convert[T1, T2 interface{type int, uint, float32}](v T1) T2 {
+func convert[T1, T2 interface{~int | ~uint | ~float32}](v T1) T2 {
return T2(v)
}
@@ -168,12 +166,12 @@ func _() {
// both numeric, or both strings. The implementation had the same problem
// with this check as the conversion issue above (issue #39623).
-func issue39623[T interface{type int, string}](x, y T) T {
+func issue39623[T interface{~int | ~string}](x, y T) T {
return x + y
}
// Simplified, from https://go2goplay.golang.org/p/efS6x6s-9NI:
-func Sum[T interface{type int, string}](s []T) (sum T) {
+func Sum[T interface{~int | ~string}](s []T) (sum T) {
for _, v := range s {
sum += v
}
@@ -182,19 +180,19 @@ func Sum[T interface{type int, string}](s []T) (sum T) {
// Assignability of an unnamed pointer type to a type parameter that
// has a matching underlying type.
-func _[T interface{}, PT interface{type *T}] (x T) PT {
+func _[T interface{}, PT interface{~*T}] (x T) PT {
return &x
}
// Indexing of generic types containing type parameters in their type list:
-func at[T interface{ type []E }, E interface{}](x T, i int) E {
+func at[T interface{ ~[]E }, E interface{}](x T, i int) E {
return x[i]
}
// A generic type inside a function acts like a named type. Its underlying
// type is itself, its "operational type" is defined by the type list in
// the tybe bound, if any.
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
type myint int
var _ int = int(x)
var _ T = 42
@@ -203,24 +201,24 @@ func _[T interface{type int}](x T) {
// Indexing a generic type with an array type bound checks length.
// (Example by mdempsky@.)
-func _[T interface { type [10]int }](x T) {
+func _[T interface { ~[10]int }](x T) {
_ = x[9] // ok
_ = x[20 /* ERROR out of bounds */ ]
}
// Pointer indirection of a generic type.
-func _[T interface{ type *int }](p T) int {
+func _[T interface{ ~*int }](p T) int {
return *p
}
// Channel sends and receives on generic types.
-func _[T interface{ type chan int }](ch T) int {
+func _[T interface{ ~chan int }](ch T) int {
ch <- 0
return <- ch
}
// Calling of a generic variable.
-func _[T interface{ type func() }](f T) {
+func _[T interface{ ~func() }](f T) {
f()
go f()
}
@@ -232,9 +230,9 @@ func _[T interface{ type func() }](f T) {
// type parameter that was substituted with a defined type.
// Test case from an (originally) failing example.
-type sliceOf[E any] interface{ type []E }
+type sliceOf[E any] interface{ ~[]E }
-func append[T interface{}, S sliceOf[T], T2 interface{ type T }](s S, t ...T2) S
+func append[T interface{}, S sliceOf[T], T2 interface{}](s S, t ...T2) S { panic(0) }
var f func()
var cancelSlice []context.CancelFunc
@@ -242,7 +240,7 @@ var _ = append[context.CancelFunc, []context.CancelFunc, context.CancelFunc](can
// A generic function must be instantiated with a type, not a value.
-func g[T any](T) T
+func g[T any](T) T { panic(0) }
var _ = g[int]
var _ = g[nil /* ERROR is not a type */ ]
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.src b/src/cmd/compile/internal/types2/testdata/check/issues.src
index 21aa208cc7..692ed37ef4 100644
--- a/src/cmd/compile/internal/types2/testdata/check/issues.src
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.src
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package issues
+package go1_17 // don't permit non-interface elements in interfaces
import (
"fmt"
- syn "cmd/compile/internal/syntax"
+ syn "regexp/syntax"
t1 "text/template"
t2 "html/template"
)
@@ -329,10 +329,10 @@ func (... /* ERROR can only use ... with final parameter in list */ TT) f()
func issue28281g() (... /* ERROR can only use ... with final parameter in list */ TT)
// Issue #26234: Make various field/method lookup errors easier to read by matching cmd/compile's output
-func issue26234a(f *syn.File) {
+func issue26234a(f *syn.Prog) {
// The error message below should refer to the actual package name (syntax)
// not the local package name (syn).
- f.foo /* ERROR f.foo undefined \(type \*syntax.File has no field or method foo\) */
+ f.foo /* ERROR f\.foo undefined \(type \*syntax\.Prog has no field or method foo\) */
}
type T struct {
@@ -357,11 +357,11 @@ func issue35895() {
var _ T = 0 // ERROR cannot use 0 \(untyped int constant\) as T
// There is only one package with name syntax imported, only use the (global) package name in error messages.
- var _ *syn.File = 0 // ERROR cannot use 0 \(untyped int constant\) as \*syntax.File
+ var _ *syn.Prog = 0 // ERROR cannot use 0 \(untyped int constant\) as \*syntax.Prog
// Because both t1 and t2 have the same global package name (template),
// qualify packages with full path name in this case.
- var _ t1.Template = t2 /* ERROR cannot use .* \(value of type "html/template".Template\) as "text/template".Template */ .Template{}
+ var _ t1.Template = t2 /* ERROR cannot use .* \(value of type .html/template.\.Template\) as .text/template.\.Template */ .Template{}
}
func issue42989(s uint) {
diff --git a/src/cmd/compile/internal/types2/testdata/check/linalg.go2 b/src/cmd/compile/internal/types2/testdata/check/linalg.go2
index 0d27603a58..efc090a1d1 100644
--- a/src/cmd/compile/internal/types2/testdata/check/linalg.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/linalg.go2
@@ -9,10 +9,10 @@ import "math"
// Numeric is type bound that matches any numeric type.
// It would likely be in a constraints package in the standard library.
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
func DotProduct[T Numeric](s1, s2 []T) T {
@@ -42,14 +42,14 @@ func AbsDifference[T NumericAbs[T]](a, b T) T {
// OrderedNumeric is a type bound that matches numeric types that support the < operator.
type OrderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex is a type bound that matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// OrderedAbs is a helper type that defines an Abs method for
diff --git a/src/cmd/compile/internal/types2/testdata/check/map2.go2 b/src/cmd/compile/internal/types2/testdata/check/map2.go2
index 2833445662..be2c49f621 100644
--- a/src/cmd/compile/internal/types2/testdata/check/map2.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/map2.go2
@@ -114,7 +114,7 @@ func (it *Iterator[K, V]) Next() (K, V, bool) {
// chans
-func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T])
+func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T]) { panic(0) }
// A sender is used to send values to a Receiver.
type chans_Sender[T any] struct {
diff --git a/src/cmd/compile/internal/types2/testdata/check/mtypeparams.go2 b/src/cmd/compile/internal/types2/testdata/check/mtypeparams.go2
index c2f282bae1..1b406593f8 100644
--- a/src/cmd/compile/internal/types2/testdata/check/mtypeparams.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/mtypeparams.go2
@@ -10,7 +10,7 @@ package p
type S struct{}
-func (S) m[T any](v T)
+func (S) m[T any](v T) {}
// TODO(gri) Once we collect interface method type parameters
// in the parser, we can enable these tests again.
diff --git a/src/cmd/compile/internal/types2/testdata/check/stmt0.src b/src/cmd/compile/internal/types2/testdata/check/stmt0.src
index bedcbe5fce..d744f2ba81 100644
--- a/src/cmd/compile/internal/types2/testdata/check/stmt0.src
+++ b/src/cmd/compile/internal/types2/testdata/check/stmt0.src
@@ -49,18 +49,18 @@ func assignments1() {
b = true
i += 1
- i += "foo" /* ERROR "cannot convert.*int" */
+ i += "foo" /* ERROR "mismatched types int and untyped string" */
f -= 1
f /= 0
f = float32(0)/0 /* ERROR "division by zero" */
- f -= "foo" /* ERROR "cannot convert.*float64" */
+ f -= "foo" /* ERROR "mismatched types float64 and untyped string" */
c *= 1
c /= 0
s += "bar"
- s += 1 /* ERROR "cannot convert.*string" */
+ s += 1 /* ERROR "mismatched types string and untyped int" */
var u64 uint64
u64 += 1<<u64
@@ -937,13 +937,13 @@ func issue6766b() {
// errors reported).
func issue10148() {
for y /* ERROR declared but not used */ := range "" {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
}
for range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
}
for y := range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
}
}
diff --git a/src/cmd/compile/internal/types2/testdata/check/tinference.go2 b/src/cmd/compile/internal/types2/testdata/check/tinference.go2
index a53fde0a2a..0afb77c1e4 100644
--- a/src/cmd/compile/internal/types2/testdata/check/tinference.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/tinference.go2
@@ -8,36 +8,38 @@ import "strconv"
type any interface{}
-func f0[A any, B interface{type C}, C interface{type D}, D interface{type A}](A, B, C, D)
-func _() {
- f := f0[string]
- f("a", "b", "c", "d")
- f0("a", "b", "c", "d")
-}
-
-func f1[A any, B interface{type A}](A, B)
-func _() {
- f := f1[int]
- f(int(0), int(0))
- f1(int(0), int(0))
-}
-
-func f2[A any, B interface{type []A}](A, B)
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// func f0[A any, B interface{~C}, C interface{~D}, D interface{~A}](A, B, C, D)
+// func _() {
+// f := f0[string]
+// f("a", "b", "c", "d")
+// f0("a", "b", "c", "d")
+// }
+//
+// func f1[A any, B interface{~A}](A, B)
+// func _() {
+// f := f1[int]
+// f(int(0), int(0))
+// f1(int(0), int(0))
+// }
+
+func f2[A any, B interface{~[]A}](A, B) {}
func _() {
f := f2[byte]
f(byte(0), []byte{})
f2(byte(0), []byte{})
}
-func f3[A any, B interface{type C}, C interface{type *A}](A, B, C)
-func _() {
- f := f3[int]
- var x int
- f(x, &x, &x)
- f3(x, &x, &x)
-}
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// func f3[A any, B interface{~C}, C interface{~*A}](A, B, C)
+// func _() {
+// f := f3[int]
+// var x int
+// f(x, &x, &x)
+// f3(x, &x, &x)
+// }
-func f4[A any, B interface{type []C}, C interface{type *A}](A, B, C)
+func f4[A any, B interface{~[]C}, C interface{~*A}](A, B, C) {}
func _() {
f := f4[int]
var x int
@@ -45,14 +47,14 @@ func _() {
f4(x, []*int{}, &x)
}
-func f5[A interface{type struct{b B; c C}}, B any, C interface{type *B}](x B) A
+func f5[A interface{~struct{b B; c C}}, B any, C interface{~*B}](x B) A { panic(0) }
func _() {
x := f5(1.2)
var _ float64 = x.b
var _ float64 = *x.c
}
-func f6[A any, B interface{type struct{f []A}}](B) A
+func f6[A any, B interface{~struct{f []A}}](B) A { panic(0) }
func _() {
x := f6(struct{f []string}{})
var _ string = x
@@ -60,11 +62,11 @@ func _() {
// TODO(gri) Need to flag invalid recursive constraints. At the
// moment these cause infinite recursions and stack overflow.
-// func f7[A interface{type B}, B interface{type A}]()
+// func f7[A interface{type B}, B interface{~A}]()
// More realistic examples
-func Double[S interface{ type []E }, E interface{ type int, int8, int16, int32, int64 }](s S) S {
+func Double[S interface{ ~[]E }, E interface{ ~int | ~int8 | ~int16 | ~int32 | ~int64 }](s S) S {
r := make(S, len(s))
for i, v := range s {
r[i] = v + v
@@ -80,7 +82,7 @@ var _ = Double(MySlice{1})
type Setter[B any] interface {
Set(string)
- type *B
+ ~*B
}
func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2 b/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
index 6e2104a515..d087c26a47 100644
--- a/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
@@ -85,7 +85,7 @@ type NumericAbs[T any] interface {
Abs() T
}
-func AbsDifference[T NumericAbs[T]](x T)
+func AbsDifference[T NumericAbs[T]](x T) { panic(0) }
type OrderedAbs[T any] T
@@ -97,7 +97,7 @@ func OrderedAbsDifference[T any](x T) {
// same code, reduced to essence
-func g[P interface{ m() P }](x P)
+func g[P interface{ m() P }](x P) { panic(0) }
type T4[P any] P
@@ -148,15 +148,15 @@ func _[T any](r R2[T, int], p *R2[string, T]) {
p.pm()
}
-// An interface can (explicitly) declare at most one type list.
+// It is ok to have multiple embedded unions.
type _ interface {
m0()
- type int, string, bool
- type /* ERROR multiple type lists */ float32, float64
+ ~int | ~string | ~bool
+ ~float32 | ~float64
m1()
m2()
- type /* ERROR multiple type lists */ complex64, complex128
- type /* ERROR multiple type lists */ rune
+ ~complex64 | ~complex128
+ ~rune
}
// Interface type lists may contain each type at most once.
@@ -164,23 +164,24 @@ type _ interface {
// for them to be all in a single list, and we report the error
// as well.)
type _ interface {
- type int, int /* ERROR duplicate type int */
- type /* ERROR multiple type lists */ int /* ERROR duplicate type int */
+ ~int|~int /* ERROR overlapping terms ~int */
+ ~int|int /* ERROR overlapping terms int */
+ int|int /* ERROR overlapping terms int */
}
type _ interface {
- type struct{f int}, struct{g int}, struct /* ERROR duplicate type */ {f int}
+ ~struct{f int} | ~struct{g int} | ~struct /* ERROR overlapping terms */ {f int}
}
// Interface type lists can contain any type, incl. *Named types.
// Verify that we use the underlying type to compute the operational type.
type MyInt int
-func add1[T interface{type MyInt}](x T) T {
+func add1[T interface{MyInt}](x T) T {
return x + 1
}
type MyString string
-func double[T interface{type MyInt, MyString}](x T) T {
+func double[T interface{MyInt|MyString}](x T) T {
return x + x
}
@@ -189,22 +190,22 @@ func double[T interface{type MyInt, MyString}](x T) T {
// type lists.
type E0 interface {
- type int, bool, string
+ ~int | ~bool | ~string
}
type E1 interface {
- type int, float64, string
+ ~int | ~float64 | ~string
}
type E2 interface {
- type float64
+ ~float64
}
type I0 interface {
E0
}
-func f0[T I0]()
+func f0[T I0]() {}
var _ = f0[int]
var _ = f0[bool]
var _ = f0[string]
@@ -215,7 +216,7 @@ type I01 interface {
E1
}
-func f01[T I01]()
+func f01[T I01]() {}
var _ = f01[int]
var _ = f01[bool /* ERROR does not satisfy I0 */ ]
var _ = f01[string]
@@ -227,7 +228,7 @@ type I012 interface {
E2
}
-func f012[T I012]()
+func f012[T I012]() {}
var _ = f012[int /* ERROR does not satisfy I012 */ ]
var _ = f012[bool /* ERROR does not satisfy I012 */ ]
var _ = f012[string /* ERROR does not satisfy I012 */ ]
@@ -238,7 +239,7 @@ type I12 interface {
E2
}
-func f12[T I12]()
+func f12[T I12]() {}
var _ = f12[int /* ERROR does not satisfy I12 */ ]
var _ = f12[bool /* ERROR does not satisfy I12 */ ]
var _ = f12[string /* ERROR does not satisfy I12 */ ]
@@ -246,10 +247,10 @@ var _ = f12[float64]
type I0_ interface {
E0
- type int
+ ~int
}
-func f0_[T I0_]()
+func f0_[T I0_]() {}
var _ = f0_[int]
var _ = f0_[bool /* ERROR does not satisfy I0_ */ ]
var _ = f0_[string /* ERROR does not satisfy I0_ */ ]
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeparams.go2 b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
index badda01105..1ad80b1e1b 100644
--- a/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
@@ -6,18 +6,18 @@ package p
// import "io" // for type assertion tests
-// The predeclared identifier "any" is only visible as a constraint
+// The predeclared identifier "any" can only be used as a constraint
// in a type parameter list.
-var _ any // ERROR undeclared
-func _[_ any /* ok here */ , _ interface{any /* ERROR undeclared */ }](any /* ERROR undeclared */ ) {
- var _ any /* ERROR undeclared */
+var _ any // ERROR cannot use any outside constraint position
+func _[_ any /* ok here */ , _ interface{any /* ERROR constraint */ }](any /* ERROR constraint */ ) {
+ var _ any /* ERROR constraint */
}
func identity[T any](x T) T { return x }
-func _[_ any](x int) int
-func _[T any](T /* ERROR redeclared */ T)()
-func _[T, T /* ERROR redeclared */ any]()
+func _[_ any](x int) int { panic(0) }
+func _[T any](T /* ERROR redeclared */ T)() {}
+func _[T, T /* ERROR redeclared */ any]() {}
// Constraints (incl. any) may be parenthesized.
func _[_ (any)]() {}
@@ -52,22 +52,22 @@ func swapswap[A, B any](a A, b B) (A, B) {
type F[A, B any] func(A, B) (B, A)
-func min[T interface{ type int }](x, y T) T {
+func min[T interface{ ~int }](x, y T) T {
if x < y {
return x
}
return y
}
-func _[T interface{type int, float32}](x, y T) bool { return x < y }
+func _[T interface{~int | ~float32}](x, y T) bool { return x < y }
func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
-func _[T interface{type int, float32, bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{~int | ~float32 | ~bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C2[T]](x, y T) bool { return x < y }
type C1[T any] interface{}
-type C2[T any] interface{ type int, float32 }
+type C2[T any] interface{ ~int | ~float32 }
func new[T any]() *T {
var x T
@@ -77,66 +77,71 @@ func new[T any]() *T {
var _ = new /* ERROR cannot use generic function new */
var _ *int = new[int]()
-func _[T any](map[T /* ERROR invalid map key type T \(missing comparable constraint\) */]int) // w/o constraint we don't know if T is comparable
+func _[T any](map[T /* ERROR invalid map key type T \(missing comparable constraint\) */]int) {} // w/o constraint we don't know if T is comparable
-func f1[T1 any](struct{T1}) int
+func f1[T1 any](struct{T1 /* ERROR cannot be a .* type parameter */ }) int { panic(0) }
var _ = f1[int](struct{T1}{})
type T1 = int
-func f2[t1 any](struct{t1; x float32}) int
+func f2[t1 any](struct{t1 /* ERROR cannot be a .* type parameter */ ; x float32}) int { panic(0) }
var _ = f2[t1](struct{t1; x float32}{})
type t1 = int
-func f3[A, B, C any](A, struct{x B}, func(A, struct{x B}, *C)) int
+func f3[A, B, C any](A, struct{x B}, func(A, struct{x B}, *C)) int { panic(0) }
var _ = f3[int, rune, bool](1, struct{x rune}{}, nil)
// indexing
func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type [10]int, *[20]int, map[int]int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type string, []byte }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int, [1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string, []rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-
-// indexing with various combinations of map types in type lists (see issue #42616)
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = x[i] }
-func _[T interface{ type []E }, E any](x T, i int) { _ = &x[i] }
-func _[T interface{ type map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
-func _[T interface{ type []E, map[int]E, map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
-func _[T interface{ type []E, map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[int]int }] (x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+
+// indexing with various combinations of map types in type sets (see issue #42616)
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+func _[T interface{ ~[]E }, E any](x T, i int) { _ = &x[i] }
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
+func _[T interface{ ~map[int]E | ~map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
+func _[T interface{ ~[]E | ~map[string]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+
+// indexing with various combinations of array and other types in type sets
+func _[T interface{ [10]int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
+func _[T interface{ [10]byte | string }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
+func _[T interface{ [10]int | *[20]int | []int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
// slicing
// TODO(gri) implement this
-func _[T interface{ type string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR generic slice expressions not yet implemented */ [i:j:k] }
// len/cap built-ins
func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = len(x) }
-func _[T interface{ type [10]int }](x T) { _ = len(x) }
-func _[T interface{ type []byte }](x T) { _ = len(x) }
-func _[T interface{ type map[int]int }](x T) { _ = len(x) }
-func _[T interface{ type chan int }](x T) { _ = len(x) }
-func _[T interface{ type string, []byte, chan int }](x T) { _ = len(x) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type [10]int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte }](x T) { _ = cap(x) }
-func _[T interface{ type map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type chan int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte, chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
// range iteration
@@ -144,64 +149,136 @@ func _[T interface{}](x T) {
for range x /* ERROR cannot range */ {}
}
-func _[T interface{ type string, []string }](x T) {
- for range x {}
- for i := range x { _ = i }
- for i, _ := range x { _ = i }
- for i, e := range x /* ERROR must have the same element type */ { _ = i }
- for _, e := range x /* ERROR must have the same element type */ {}
- var e rune
- _ = e
- for _, (e) = range x /* ERROR must have the same element type */ {}
-}
+type myString string
+func _[
+ B1 interface{ string },
+ B2 interface{ string | myString },
-func _[T interface{ type string, []rune, map[int]rune }](x T) {
- for _, e := range x { _ = e }
- for i, e := range x { _ = i; _ = e }
-}
+ C1 interface{ chan int },
+ C2 interface{ chan int | <-chan int },
+ C3 interface{ chan<- int },
-func _[T interface{ type string, []rune, map[string]rune }](x T) {
- for _, e := range x { _ = e }
- for i, e := range x /* ERROR must have the same key type */ { _ = e }
-}
+ S1 interface{ []int },
+ S2 interface{ []int | [10]int },
-func _[T interface{ type string, chan int }](x T) {
- for range x {}
- for i := range x { _ = i }
- for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
-}
+ A1 interface{ [10]int },
+ A2 interface{ [10]int | []int },
+
+ P1 interface{ *[10]int },
+ P2 interface{ *[10]int | *[]int },
+
+ M1 interface{ map[string]int },
+ M2 interface{ map[string]int | map[string]string },
+]() {
+ var b0 string
+ for range b0 {}
+ for _ = range b0 {}
+ for _, _ = range b0 {}
+
+ var b1 B1
+ for range b1 {}
+ for _ = range b1 {}
+ for _, _ = range b1 {}
+
+ var b2 B2
+ for range b2 /* ERROR cannot range over b2 .* no structural type */ {}
+
+ var c0 chan int
+ for range c0 {}
+ for _ = range c0 {}
+ for _, _ /* ERROR permits only one iteration variable */ = range c0 {}
+
+ var c1 C1
+ for range c1 {}
+ for _ = range c1 {}
+ for _, _ /* ERROR permits only one iteration variable */ = range c1 {}
+
+ var c2 C2
+ for range c2 /* ERROR cannot range over c2 .* no structural type */ {}
+
+ var c3 C3
+ for range c3 /* ERROR receive from send-only channel */ {}
+
+ var s0 []int
+ for range s0 {}
+ for _ = range s0 {}
+ for _, _ = range s0 {}
+
+ var s1 S1
+ for range s1 {}
+ for _ = range s1 {}
+ for _, _ = range s1 {}
+
+ var s2 S2
+ for range s2 /* ERROR cannot range over s2 .* no structural type */ {}
+
+ var a0 []int
+ for range a0 {}
+ for _ = range a0 {}
+ for _, _ = range a0 {}
+
+ var a1 A1
+ for range a1 {}
+ for _ = range a1 {}
+ for _, _ = range a1 {}
+
+ var a2 A2
+ for range a2 /* ERROR cannot range over a2 .* no structural type */ {}
+
+ var p0 *[10]int
+ for range p0 {}
+ for _ = range p0 {}
+ for _, _ = range p0 {}
+
+ var p1 P1
+ for range p1 {}
+ for _ = range p1 {}
+ for _, _ = range p1 {}
+
+ var p2 P2
+ for range p2 /* ERROR cannot range over p2 .* no structural type */ {}
+
+ var m0 map[string]int
+ for range m0 {}
+ for _ = range m0 {}
+ for _, _ = range m0 {}
+
+ var m1 M1
+ for range m1 {}
+ for _ = range m1 {}
+ for _, _ = range m1 {}
-func _[T interface{ type string, chan<-int }](x T) {
- for i := range x /* ERROR send-only channel */ { _ = i }
+ var m2 M2
+ for range m2 /* ERROR cannot range over m2 .* no structural type */ {}
}
// type inference checks
var _ = new() /* ERROR cannot infer T */
-func f4[A, B, C any](A, B) C
+func f4[A, B, C any](A, B) C { panic(0) }
var _ = f4(1, 2) /* ERROR cannot infer C */
var _ = f4[int, float32, complex128](1, 2)
-func f5[A, B, C any](A, []*B, struct{f []C}) int
+func f5[A, B, C any](A, []*B, struct{f []C}) int { panic(0) }
var _ = f5[int, float32, complex128](0, nil, struct{f []complex128}{})
var _ = f5(0, nil, struct{f []complex128}{}) // ERROR cannot infer
var _ = f5(0, []*float32{new[float32]()}, struct{f []complex128}{})
-func f6[A any](A, []A) int
+func f6[A any](A, []A) int { panic(0) }
var _ = f6(0, nil)
-func f6nil[A any](A) int
+func f6nil[A any](A) int { panic(0) }
var _ = f6nil(nil) // ERROR cannot infer
// type inference with variadic functions
-func f7[T any](...T) T
+func f7[T any](...T) T { panic(0) }
var _ int = f7() /* ERROR cannot infer T */
var _ int = f7(1)
@@ -214,7 +291,7 @@ var _ = f7(float64(1), 2.3)
var _ = f7(1, 2.3 /* ERROR does not match */ )
var _ = f7(1.2, 3 /* ERROR does not match */ )
-func f8[A, B any](A, B, ...B) int
+func f8[A, B any](A, B, ...B) int { panic(0) }
var _ = f8(1) /* ERROR not enough arguments */
var _ = f8(1, 2.3)
@@ -241,7 +318,7 @@ func (T) m3[P any]() {}
type S1[P any] struct { f P }
-func f9[P any](x S1[P])
+func f9[P any](x S1[P]) {}
func _() {
f9[int](S1[int]{42})
@@ -250,7 +327,7 @@ func _() {
type S2[A, B, C any] struct{}
-func f10[X, Y, Z any](a S2[X, int, Z], b S2[X, Y, bool])
+func f10[X, Y, Z any](a S2[X, int, Z], b S2[X, Y, bool]) {}
func _[P any]() {
f10[int, float32, string](S2[int, int, string]{}, S2[int, float32, bool]{})
@@ -261,7 +338,7 @@ func _[P any]() {
// corner case for type inference
// (was bug: after instanting f11, the type-checker didn't mark f11 as non-generic)
-func f11[T any]()
+func f11[T any]() {}
func _() {
f11[int]()
@@ -269,7 +346,7 @@ func _() {
// the previous example was extracted from
-func f12[T interface{m() T}]()
+func f12[T interface{m() T}]() {}
type A[T any] T
@@ -297,15 +374,15 @@ func _[T any] (x T) {
type R0 struct{}
-func (R0) _[T any](x T)
-func (R0 /* ERROR invalid receiver */ ) _[R0 any]() // scope of type parameters starts at "func"
+func (R0) _[T any](x T) {}
+func (R0 /* ERROR invalid receiver */ ) _[R0 any]() {} // scope of type parameters starts at "func"
type R1[A, B any] struct{}
func (_ R1[A, B]) m0(A, B)
-func (_ R1[A, B]) m1[T any](A, B, T) T
+func (_ R1[A, B]) m1[T any](A, B, T) T { panic(0) }
func (_ R1 /* ERROR not a generic type */ [R1, _]) _()
-func (_ R1[A, B]) _[A /* ERROR redeclared */ any](B)
+func (_ R1[A, B]) _[A /* ERROR redeclared */ any](B) {}
func _() {
var r R1[int, string]
@@ -400,7 +477,7 @@ func _[T any](x T) {
}
}
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
_ = x /* ERROR not an interface */ .(int)
switch x /* ERROR not an interface */ .(type) {
}
diff --git a/src/cmd/compile/internal/types2/testdata/check/unions.go2 b/src/cmd/compile/internal/types2/testdata/check/unions.go2
new file mode 100644
index 0000000000..bcd7de6644
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/unions.go2
@@ -0,0 +1,66 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check that overlong unions don't bog down type checking.
+// Disallow them for now.
+
+package p
+
+type t int
+
+type (
+ t00 t; t01 t; t02 t; t03 t; t04 t; t05 t; t06 t; t07 t; t08 t; t09 t
+ t10 t; t11 t; t12 t; t13 t; t14 t; t15 t; t16 t; t17 t; t18 t; t19 t
+ t20 t; t21 t; t22 t; t23 t; t24 t; t25 t; t26 t; t27 t; t28 t; t29 t
+ t30 t; t31 t; t32 t; t33 t; t34 t; t35 t; t36 t; t37 t; t38 t; t39 t
+ t40 t; t41 t; t42 t; t43 t; t44 t; t45 t; t46 t; t47 t; t48 t; t49 t
+ t50 t; t51 t; t52 t; t53 t; t54 t; t55 t; t56 t; t57 t; t58 t; t59 t
+ t60 t; t61 t; t62 t; t63 t; t64 t; t65 t; t66 t; t67 t; t68 t; t69 t
+ t70 t; t71 t; t72 t; t73 t; t74 t; t75 t; t76 t; t77 t; t78 t; t79 t
+ t80 t; t81 t; t82 t; t83 t; t84 t; t85 t; t86 t; t87 t; t88 t; t89 t
+ t90 t; t91 t; t92 t; t93 t; t94 t; t95 t; t96 t; t97 t; t98 t; t99 t
+)
+
+type u99 interface {
+ t00|t01|t02|t03|t04|t05|t06|t07|t08|t09|
+ t10|t11|t12|t13|t14|t15|t16|t17|t18|t19|
+ t20|t21|t22|t23|t24|t25|t26|t27|t28|t29|
+ t30|t31|t32|t33|t34|t35|t36|t37|t38|t39|
+ t40|t41|t42|t43|t44|t45|t46|t47|t48|t49|
+ t50|t51|t52|t53|t54|t55|t56|t57|t58|t59|
+ t60|t61|t62|t63|t64|t65|t66|t67|t68|t69|
+ t70|t71|t72|t73|t74|t75|t76|t77|t78|t79|
+ t80|t81|t82|t83|t84|t85|t86|t87|t88|t89|
+ t90|t91|t92|t93|t94|t95|t96|t97|t98
+}
+
+type u100a interface {
+ u99|float32
+}
+
+type u100b interface {
+ u99|float64
+}
+
+type u101 interface {
+ t00|t01|t02|t03|t04|t05|t06|t07|t08|t09|
+ t10|t11|t12|t13|t14|t15|t16|t17|t18|t19|
+ t20|t21|t22|t23|t24|t25|t26|t27|t28|t29|
+ t30|t31|t32|t33|t34|t35|t36|t37|t38|t39|
+ t40|t41|t42|t43|t44|t45|t46|t47|t48|t49|
+ t50|t51|t52|t53|t54|t55|t56|t57|t58|t59|
+ t60|t61|t62|t63|t64|t65|t66|t67|t68|t69|
+ t70|t71|t72|t73|t74|t75|t76|t77|t78|t79|
+ t80|t81|t82|t83|t84|t85|t86|t87|t88|t89|
+ t90|t91|t92|t93|t94|t95|t96|t97|t98|t99|
+ int // ERROR cannot handle more than 100 union terms
+}
+
+type u102 interface {
+ int /* ERROR cannot handle more than 100 union terms */ |string|u100a
+}
+
+type u200 interface {
+ u100a /* ERROR cannot handle more than 100 union terms */ |u100b
+}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/constraints.go2 b/src/cmd/compile/internal/types2/testdata/examples/constraints.go2
new file mode 100644
index 0000000000..f40d18c63e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/constraints.go2
@@ -0,0 +1,91 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic constraint interfaces.
+
+package p
+
+type (
+ // Type lists are processed as unions but an error is reported.
+ // TODO(gri) remove this once the parser doesn't accept type lists anymore.
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ }
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ type float32
+ }
+)
+
+type MyInt int
+
+type (
+ // Arbitrary types may be embedded like interfaces.
+ _ interface{int}
+ _ interface{~int}
+
+ // Types may be combined into a union.
+ union interface{int|~string}
+
+ // Union terms must describe disjoint (non-overlapping) type sets.
+ _ interface{int|int /* ERROR overlapping terms int */ }
+ _ interface{int|~ /* ERROR overlapping terms ~int */ int }
+ _ interface{~int|~ /* ERROR overlapping terms ~int */ int }
+ _ interface{~int|MyInt /* ERROR overlapping terms p.MyInt and ~int */ }
+ _ interface{int|interface{}}
+ _ interface{int|~string|union}
+ _ interface{int|~string|interface{int}}
+ _ interface{union|union /* ERROR overlapping terms p.union and p.union */ }
+
+ // For now we do not permit interfaces with methods in unions.
+ _ interface{~ /* ERROR invalid use of ~ */ interface{}}
+ _ interface{int|interface /* ERROR cannot use .* in union */ { m() }}
+)
+
+type (
+ // Tilde is not permitted on defined types or interfaces.
+ foo int
+ bar interface{}
+ _ interface{foo}
+ _ interface{~ /* ERROR invalid use of ~ */ foo }
+ _ interface{~ /* ERROR invalid use of ~ */ bar }
+)
+
+// Stand-alone type parameters are not permitted as elements or terms in unions.
+type (
+ _[T interface{ *T } ] struct{} // ok
+ _[T interface{ int | *T } ] struct{} // ok
+ _[T interface{ T /* ERROR cannot embed a type parameter */ } ] struct{}
+ _[T interface{ ~T /* ERROR cannot embed a type parameter */ } ] struct{}
+ _[T interface{ int|T /* ERROR cannot embed a type parameter */ }] struct{}
+)
+
+// Multiple embedded union elements are intersected. The order in which they
+// appear in the interface doesn't matter since intersection is a symmetric
+// operation.
+
+type myInt1 int
+type myInt2 int
+
+func _[T interface{ myInt1|myInt2; ~int }]() T { return T(0) }
+func _[T interface{ ~int; myInt1|myInt2 }]() T { return T(0) }
+
+// Here the intersections are empty - there's no type that's in the type set of T.
+func _[T interface{ myInt1|myInt2; int }]() T { return T(0 /* ERROR cannot convert */ ) }
+func _[T interface{ int; myInt1|myInt2 }]() T { return T(0 /* ERROR cannot convert */ ) }
+
+// Union elements may be interfaces as long as they don't define
+// any methods or embed comparable.
+
+type (
+ Integer interface{ ~int|~int8|~int16|~int32|~int64 }
+ Unsigned interface{ ~uint|~uint8|~uint16|~uint32|~uint64 }
+ Floats interface{ ~float32|~float64 }
+ Complex interface{ ~complex64|~complex128 }
+ Number interface{ Integer|Unsigned|Floats|Complex }
+ Ordered interface{ Integer|Unsigned|Floats|~string }
+
+ _ interface{ Number | error /* ERROR cannot use error in union */ }
+ _ interface{ Ordered | comparable /* ERROR cannot use comparable in union */ }
+)
diff --git a/src/cmd/compile/internal/types2/testdata/examples/functions.go2 b/src/cmd/compile/internal/types2/testdata/examples/functions.go2
index 0c2a408f02..ef8953cb43 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/functions.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/functions.go2
@@ -66,7 +66,7 @@ var _ float64 = foo(42, []float64{1.0}, &s)
// Type inference works in a straight-forward manner even
// for variadic functions.
-func variadic[A, B any](A, B, ...B) int
+func variadic[A, B any](A, B, ...B) int { panic(0) }
// var _ = variadic(1) // ERROR not enough arguments
var _ = variadic(1, 2.3)
@@ -98,7 +98,7 @@ func g2b[P, Q any](x P, y Q) {
// Here's an example of a recursive function call with variadic
// arguments and type inference inferring the type parameter of
// the caller (i.e., itself).
-func max[T interface{ type int }](x ...T) T {
+func max[T interface{ ~int }](x ...T) T {
var x0 T
if len(x) > 0 {
x0 = x[0]
@@ -118,9 +118,9 @@ func max[T interface{ type int }](x ...T) T {
// Thus even if a type can be inferred successfully, the function
// call may not be valid.
-func fboth[T any](chan T)
-func frecv[T any](<-chan T)
-func fsend[T any](chan<- T)
+func fboth[T any](chan T) {}
+func frecv[T any](<-chan T) {}
+func fsend[T any](chan<- T) {}
func _() {
var both chan int
@@ -140,9 +140,9 @@ func _() {
fsend(send)
}
-func ffboth[T any](func(chan T))
-func ffrecv[T any](func(<-chan T))
-func ffsend[T any](func(chan<- T))
+func ffboth[T any](func(chan T)) {}
+func ffrecv[T any](func(<-chan T)) {}
+func ffsend[T any](func(chan<- T)) {}
func _() {
var both func(chan int)
@@ -169,9 +169,9 @@ func _() {
// assignment is permitted, parameter passing is permitted as well,
// so type inference should be able to handle these cases well.
-func g1[T any]([]T)
-func g2[T any]([]T, T)
-func g3[T any](*T, ...T)
+func g1[T any]([]T) {}
+func g2[T any]([]T, T) {}
+func g3[T any](*T, ...T) {}
func _() {
type intSlize []int
@@ -195,7 +195,7 @@ func _() {
// Here's a realistic example.
-func append[T any](s []T, t ...T) []T
+func append[T any](s []T, t ...T) []T { panic(0) }
func _() {
var f func()
@@ -208,8 +208,12 @@ func _() {
// (that would indicate a slice type). Thus, generic functions cannot
// have empty type parameter lists, either. This is a syntax error.
-func h[] /* ERROR empty type parameter list */ ()
+func h[] /* ERROR empty type parameter list */ () {}
func _() {
h[] /* ERROR operand */ ()
}
+
+// Parameterized functions must have a function body.
+
+func _ /* ERROR missing function body */ [P any]()
diff --git a/src/cmd/compile/internal/types2/testdata/examples/inference.go2 b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
index b47ce75805..e169aec746 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/inference.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
@@ -7,10 +7,10 @@
package p
type Ordered interface {
- type int, float64, string
+ ~int|~float64|~string
}
-func min[T Ordered](x, y T) T
+func min[T Ordered](x, y T) T { panic(0) }
func _() {
// min can be called with explicit instantiation.
@@ -37,7 +37,7 @@ func _() {
_ = min("foo", "bar")
}
-func mixed[T1, T2, T3 any](T1, T2, T3)
+func mixed[T1, T2, T3 any](T1, T2, T3) {}
func _() {
// mixed can be called with explicit instantiation.
@@ -54,7 +54,7 @@ func _() {
mixed[int, string](1.1 /* ERROR cannot use 1.1 */ , "", false)
}
-func related1[Slice interface{type []Elem}, Elem any](s Slice, e Elem)
+func related1[Slice interface{~[]Elem}, Elem any](s Slice, e Elem) {}
func _() {
// related1 can be called with explicit instantiation.
@@ -78,7 +78,7 @@ func _() {
related1(si, "foo" /* ERROR cannot use "foo" */ )
}
-func related2[Elem any, Slice interface{type []Elem}](e Elem, s Slice)
+func related2[Elem any, Slice interface{~[]Elem}](e Elem, s Slice) {}
func _() {
// related2 can be called with explicit instantiation.
diff --git a/src/cmd/compile/internal/types2/testdata/examples/methods.go2 b/src/cmd/compile/internal/types2/testdata/examples/methods.go2
index 76c6539e1b..4e87041e54 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/methods.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/methods.go2
@@ -6,6 +6,8 @@
package p
+import "unsafe"
+
// Parameterized types may have methods.
type T1[A any] struct{ a A }
@@ -94,3 +96,18 @@ func (_ T2[_, _, _]) _() int { return 42 }
type T0 struct{}
func (T0) _() {}
func (T1[A]) _() {}
+
+// A generic receiver type may constrain its type parameter such
+// that it must be a pointer type. Such receiver types are not
+// permitted.
+type T3a[P interface{ ~int | ~string | ~float64 }] P
+
+func (T3a[_]) m() {} // this is ok
+
+type T3b[P interface{ ~unsafe.Pointer }] P
+
+func (T3b /* ERROR invalid receiver */ [_]) m() {}
+
+type T3c[P interface{ *int | *string }] P
+
+func (T3c /* ERROR invalid receiver */ [_]) m() {}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/operations.go2 b/src/cmd/compile/internal/types2/testdata/examples/operations.go2
new file mode 100644
index 0000000000..18e4d6080c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/operations.go2
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// indirection
+
+func _[P any](p P) {
+ _ = *p // ERROR cannot indirect p
+}
+
+func _[P interface{ int }](p P) {
+ _ = *p // ERROR cannot indirect p
+}
+
+func _[P interface{ *int }](p P) {
+ _ = *p
+}
+
+func _[P interface{ *int | *string }](p P) {
+ _ = *p // ERROR must have identical base types
+}
+
+type intPtr *int
+
+func _[P interface{ *int | intPtr } ](p P) {
+ var _ int = *p
+}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/types.go2 b/src/cmd/compile/internal/types2/testdata/examples/types.go2
index a7825ed2d9..d662444ead 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/types.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/types.go2
@@ -155,30 +155,40 @@ type _ struct {
List /* ERROR List redeclared */ [int]
}
+// Issue #45639: We don't allow this anymore. Keep this code
+// in case we decide to revisit this decision.
+//
// It's possible to declare local types whose underlying types
// are type parameters. As with ordinary type definitions, the
// types underlying properties are "inherited" but the methods
// are not.
-func _[T interface{ m(); type int }]() {
- type L T
- var x L
-
- // m is not defined on L (it is not "inherited" from
- // its underlying type).
- x.m /* ERROR x.m undefined */ ()
-
- // But the properties of T, such that as that it supports
- // the operations of the types given by its type bound,
- // are also the properties of L.
- x++
- _ = x - x
-
- // On the other hand, if we define a local alias for T,
- // that alias stands for T as expected.
- type A = T
- var y A
- y.m()
- _ = y < 0
+// func _[T interface{ m(); ~int }]() {
+// type L T
+// var x L
+//
+// // m is not defined on L (it is not "inherited" from
+// // its underlying type).
+// x.m /* ERROR x.m undefined */ ()
+//
+// // But the properties of T, such that as that it supports
+// // the operations of the types given by its type bound,
+// // are also the properties of L.
+// x++
+// _ = x - x
+//
+// // On the other hand, if we define a local alias for T,
+// // that alias stands for T as expected.
+// type A = T
+// var y A
+// y.m()
+// _ = y < 0
+// }
+
+// It is not permitted to declare a local type whose underlying
+// type is a type parameters not declared by that type declaration.
+func _[T any]() {
+ type _ T // ERROR cannot use function type parameter T as RHS in type declaration
+ type _ [_ any] T // ERROR cannot use function type parameter T as RHS in type declaration
}
// As a special case, an explicit type argument may be omitted
@@ -206,15 +216,15 @@ type B0 interface {}
type B1[_ any] interface{}
type B2[_, _ any] interface{}
-func _[T1 B0]()
-func _[T1 B1[T1]]()
-func _[T1 B2 /* ERROR cannot use generic type .* without instantiation */ ]()
+func _[T1 B0]() {}
+func _[T1 B1[T1]]() {}
+func _[T1 B2 /* ERROR cannot use generic type .* without instantiation */ ]() {}
-func _[T1, T2 B0]()
-func _[T1 B1[T1], T2 B1[T2]]()
-func _[T1, T2 B2 /* ERROR cannot use generic type .* without instantiation */ ]()
+func _[T1, T2 B0]() {}
+func _[T1 B1[T1], T2 B1[T2]]() {}
+func _[T1, T2 B2 /* ERROR cannot use generic type .* without instantiation */ ]() {}
-func _[T1 B0, T2 B1[T2]]() // here B1 applies to T2
+func _[T1 B0, T2 B1[T2]]() {} // here B1 applies to T2
// When the type argument is left away, the type bound is
// instantiated for each type parameter with that type
@@ -232,11 +242,11 @@ func _[A Adder[A], B Adder[B], C Adder[A]]() {
// The type of variables (incl. parameters and return values) cannot
// be an interface with type constraints or be/embed comparable.
type I interface {
- type int
+ ~int
}
var (
- _ interface /* ERROR contains type constraints */ {type int}
+ _ interface /* ERROR contains type constraints */ {~int}
_ I /* ERROR contains type constraints */
)
@@ -267,7 +277,7 @@ func _() {
// (If a type list contains just a single const type, we could
// allow it, but such type lists don't make much sense in the
// first place.)
-func _[T interface { type int, float64 }]() {
+func _[T interface{~int|~float64}]() {
// not valid
const _ = T /* ERROR not constant */ (0)
const _ T /* ERROR invalid constant type T */ = 1
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
index 2c1299feb0..8d14f8acaf 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
@@ -31,13 +31,14 @@ type x7[A any] struct{ foo7 }
func main7() { var _ foo7 = x7[int]{} }
// crash 8
-type foo8[A any] interface { type A }
-func bar8[A foo8[A]](a A) {}
-func main8() {}
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// type foo8[A any] interface { ~A }
+// func bar8[A foo8[A]](a A) {}
+// func main8() {}
// crash 9
-type foo9[A any] interface { type foo9 /* ERROR interface contains type constraints */ [A] }
-func _() { var _ = new(foo9 /* ERROR interface contains type constraints */ [int]) }
+type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
+func _() { var _ = new(foo9 /* ERROR illegal cycle */ [int]) }
// crash 12
var u /* ERROR cycle */ , i [func /* ERROR used as value */ /* ERROR used as value */ (u, c /* ERROR undeclared */ /* ERROR undeclared */ ) {}(0, len /* ERROR must be called */ /* ERROR must be called */ )]c /* ERROR undeclared */ /* ERROR undeclared */
@@ -49,7 +50,7 @@ func (G15 /* ERROR generic type .* without instantiation */ ) p()
// crash 16
type Foo16[T any] r16 /* ERROR not a type */
-func r16[T any]() Foo16[Foo16[T]]
+func r16[T any]() Foo16[Foo16[T]] { panic(0) }
// crash 17
type Y17 interface{ c() }
@@ -57,7 +58,7 @@ type Z17 interface {
c() Y17
Y17 /* ERROR duplicate method */
}
-func F17[T Z17](T)
+func F17[T Z17](T) {}
// crash 18
type o18[T any] []func(_ o18[[]_ /* ERROR cannot use _ */ ])
@@ -87,5 +88,5 @@ type T26 = interface{ F26[ /* ERROR cannot have type parameters */ Z any]() }
func F26[Z any]() T26 { return F26 /* ERROR without instantiation */ /* ERROR missing method */ [] /* ERROR operand */ }
// crash 27
-func e27[T any]() interface{ x27 /* ERROR not a type */ }
+func e27[T any]() interface{ x27 /* ERROR not a type */ } { panic(0) }
func x27() { e27( /* ERROR cannot infer T */ ) } \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
index 9bc26f3546..e56bc35475 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
@@ -4,16 +4,19 @@
package p
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+
+/*
import "fmt"
// Minimal test case.
-func _[T interface{type T}](x T) T{
+func _[T interface{~T}](x T) T{
return x
}
// Test case from issue.
type constr[T any] interface {
- type T
+ ~T
}
func Print[T constr[T]](s []T) {
@@ -25,3 +28,4 @@ func Print[T constr[T]](s []T) {
func f() {
Print([]string{"Hello, ", "playground\n"})
}
+*/
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
index 316ab1982e..301c13be41 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
@@ -4,11 +4,20 @@
package p
-type Number interface {
- int /* ERROR int is not an interface */
- float64 /* ERROR float64 is not an interface */
+type Number1 interface {
+ // embedding non-interface types is permitted
+ int
+ float64
}
-func Add[T Number](a, b T) T {
+func Add1[T Number1](a, b T) T {
return a /* ERROR not defined */ + b
}
+
+type Number2 interface {
+ int|float64
+}
+
+func Add2[T Number2](a, b T) T {
+ return a + b
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
index 75491e7e26..72f83997c2 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
@@ -8,7 +8,7 @@ type T0 interface{
}
type T1 interface{
- type int
+ ~int
}
type T2 interface{
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
index df621a4c17..85eb0a78fe 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
@@ -7,5 +7,7 @@ package p
// Do not report a duplicate type error for this type list.
// (Check types after interfaces have been completed.)
type _ interface {
- type interface{ Error() string }, interface{ String() string }
+ // TODO(gri) Once we have full type sets we can enable this again.
+ // Fow now we don't permit interfaces in type lists.
+ // type interface{ Error() string }, interface{ String() string }
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
index 55464e6b77..d5311ed3e7 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
@@ -6,4 +6,4 @@ package p
// A constraint must be an interface; it cannot
// be a type parameter, for instance.
-func _[A interface{ type interface{} }, B A /* ERROR not an interface */ ]()
+func _[A interface{ ~int }, B A /* ERROR not an interface */ ]() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go2
index e19b6770bf..62dc45a596 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go2
@@ -4,13 +4,13 @@
package p
-func f1[T1, T2 any](T1, T2, struct{a T1; b T2})
+func f1[T1, T2 any](T1, T2, struct{a T1; b T2}) {}
func _() {
f1(42, string("foo"), struct /* ERROR does not match inferred type struct\{a int; b string\} */ {a, b int}{})
}
// simplified test case from issue
-func f2[T any](_ []T, _ func(T))
+func f2[T any](_ []T, _ func(T)) {}
func _() {
f2([]string{}, func /* ERROR does not match inferred type func\(string\) */ (f []byte) {})
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
index b7ab68818e..257b73a2fb 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
@@ -4,14 +4,14 @@
package p
-func _[T interface{type map[string]int}](x T) {
+func _[T interface{~map[string]int}](x T) {
_ = x == nil
}
// simplified test case from issue
type PathParamsConstraint interface {
- type map[string]string, []struct{key, value string}
+ ~map[string]string | ~[]struct{key, value string}
}
type PathParams[T PathParamsConstraint] struct {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go2
index 76e7e369ca..0da6e103fd 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go2
@@ -8,8 +8,8 @@ package p
type E0[P any] P
type E1[P any] *P
-type E2[P any] struct{ P }
-type E3[P any] struct{ *P }
+type E2[P any] struct{ _ P }
+type E3[P any] struct{ _ *P }
type T0 /* ERROR illegal cycle */ struct {
_ E0[T0]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
index c2b460902c..e38e57268d 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
@@ -5,5 +5,5 @@
package p
type T[P any] interface{
- P // ERROR P is a type parameter, not an interface
+ P // ERROR cannot embed a type parameter
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go2
index 3db4eae012..d703da90a2 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go2
@@ -7,7 +7,7 @@ package p
type policy[K, V any] interface{}
type LRU[K, V any] struct{}
-func NewCache[K, V any](p policy[K, V])
+func NewCache[K, V any](p policy[K, V]) {}
func _() {
var lru LRU[int, string]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go2
index 8948d61caa..0981a335da 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go2
@@ -8,7 +8,7 @@ type A[T any] int
func (A[T]) m(A[T])
-func f[P interface{m(P)}]()
+func f[P interface{m(P)}]() {}
func _() {
_ = f[A[int]]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go2
index 747aab49dd..a3f3eecca0 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go2
@@ -10,6 +10,6 @@ func _() {
type S struct {}
-func NewS[T any]() *S
+func NewS[T any]() *S { panic(0) }
func (_ *S /* ERROR S is not a generic type */ [T]) M()
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go2
index 5d97855f8a..c78f9a1fa0 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go2
@@ -7,6 +7,6 @@ package p
import "unsafe"
func _[T any](x T) {
- _ = unsafe /* ERROR undefined */ .Alignof(x)
- _ = unsafe /* ERROR undefined */ .Sizeof(x)
+ _ = unsafe.Alignof(x)
+ _ = unsafe.Sizeof(x)
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go2
index 0269c3a62c..58d0f69f65 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go2
@@ -6,8 +6,8 @@ package p
type T[_ any] int
-func f[_ any]()
-func g[_, _ any]()
+func f[_ any]() {}
+func g[_, _ any]() {}
func _() {
_ = f[T /* ERROR without instantiation */ ]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go2
new file mode 100644
index 0000000000..9eea4ad60a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go2
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+func main() {
+ m := map[string]int{
+ "a": 6,
+ "b": 7,
+ }
+ fmt.Println(copyMap[map[string]int, string, int](m))
+}
+
+type Map[K comparable, V any] interface {
+ map[K] V
+}
+
+func copyMap[M Map[K, V], K comparable, V any](m M) M {
+ m1 := make(M)
+ for k, v := range m {
+ m1[k] = v
+ }
+ return m1
+}
+
+// simpler test case from the same issue
+
+type A[X comparable] interface {
+ []X
+}
+
+func f[B A[X], X comparable]() B {
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
index 61f766bcbd..4642ab60fc 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
@@ -7,7 +7,7 @@ package p
// Test case from issue.
type Nat interface {
- type Zero, Succ
+ Zero|Succ
}
type Zero struct{}
@@ -22,7 +22,7 @@ type I1 interface {
}
type I2 interface {
- type int
+ ~int
}
type I3 interface {
@@ -47,7 +47,7 @@ type _ struct{
}
type _ struct{
- I3 // ERROR interface contains type constraints
+ I3 // ERROR interface is .* comparable
}
// General composite types.
@@ -59,19 +59,19 @@ type (
_ []I1 // ERROR interface is .* comparable
_ []I2 // ERROR interface contains type constraints
- _ *I3 // ERROR interface contains type constraints
+ _ *I3 // ERROR interface is .* comparable
_ map[I1 /* ERROR interface is .* comparable */ ]I2 // ERROR interface contains type constraints
- _ chan I3 // ERROR interface contains type constraints
+ _ chan I3 // ERROR interface is .* comparable
_ func(I1 /* ERROR interface is .* comparable */ )
_ func() I2 // ERROR interface contains type constraints
)
// Other cases.
-var _ = [...]I3 /* ERROR interface contains type constraints */ {}
+var _ = [...]I3 /* ERROR interface is .* comparable */ {}
func _(x interface{}) {
- _ = x.(I3 /* ERROR interface contains type constraints */ )
+ _ = x.(I3 /* ERROR interface is .* comparable */ )
}
type T1[_ any] struct{}
@@ -79,9 +79,9 @@ type T3[_, _, _ any] struct{}
var _ T1[I2 /* ERROR interface contains type constraints */ ]
var _ T3[int, I2 /* ERROR interface contains type constraints */ , float32]
-func f1[_ any]() int
+func f1[_ any]() int { panic(0) }
var _ = f1[I2 /* ERROR interface contains type constraints */ ]()
-func f3[_, _, _ any]() int
+func f3[_, _, _ any]() int { panic(0) }
var _ = f3[int, I2 /* ERROR interface contains type constraints */ , float32]()
func _(x interface{}) {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
index 698cb8a16b..bf0031f5d2 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
@@ -17,7 +17,7 @@ func _[T any](x interface{}){
}
type constraint interface {
- type int
+ ~int
}
func _[T constraint](x interface{}){
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2
new file mode 100644
index 0000000000..6cc3801cc9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2
@@ -0,0 +1,58 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | <-chan T }
+
+func _[T any](ch T) {
+ <-ch // ERROR cannot receive from non-channel
+}
+
+func _[T C0](ch T) {
+ <-ch // ERROR cannot receive from non-channel
+}
+
+func _[T C1](ch T) {
+ <-ch
+}
+
+func _[T C2](ch T) {
+ <-ch
+}
+
+func _[T C3](ch T) {
+ <-ch // ERROR channels of ch .* must have the same element type
+}
+
+func _[T C4](ch T) {
+ <-ch // ERROR cannot receive from send-only channel
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ x = <-ch
+}
+
+// test case from issue, slightly modified
+type RecvChan[T any] interface {
+ ~chan T | ~<-chan T
+}
+
+func _[T any, C RecvChan[T]](ch C) T {
+ return <-ch
+}
+
+func f[T any, C interface{ chan T }](ch C) T {
+ return <-ch
+}
+
+func _(ch chan int) {
+ var x int = f(ch) // test constraint type inference for this case
+ _ = x
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
index b1e42497e8..b8ba0ad4a7 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
@@ -4,7 +4,7 @@
package p
-func f[F interface{type *Q}, G interface{type *R}, Q, R any](q Q, r R) {}
+func f[F interface{~*Q}, G interface{~*R}, Q, R any](q Q, r R) {}
func _() {
f[*float64, *int](1, 2)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
index 65662cdc76..2937959105 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
@@ -13,7 +13,7 @@ type N[T any] struct{}
var _ N[] /* ERROR expecting type */
type I interface {
- type map[int]int, []int
+ ~[]int
}
func _[T I](i, j int) {
@@ -27,6 +27,5 @@ func _[T I](i, j int) {
_ = s[i, j /* ERROR more than one index */ ]
var t T
- // TODO(gri) fix multiple error below
- _ = t[i, j /* ERROR more than one index */ /* ERROR more than one index */ ]
+ _ = t[i, j /* ERROR more than one index */ ]
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go2
new file mode 100644
index 0000000000..441fb4cb34
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go2
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package P
+
+// It is not permitted to declare a local type whose underlying
+// type is a type parameters not declared by that type declaration.
+func _[T any]() {
+ type _ T // ERROR cannot use function type parameter T as RHS in type declaration
+ type _ [_ any] T // ERROR cannot use function type parameter T as RHS in type declaration
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
index 7678e348ef..f25b9d2b26 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
@@ -5,7 +5,7 @@
package issue45985
// TODO(gri): this error should be on app[int] below.
-func app[S /* ERROR "type S = S does not match" */ interface{ type []T }, T any](s S, e T) S {
+func app[S /* ERROR "type S = S does not match" */ interface{ ~[]T }, T any](s S, e T) S {
return append(s, e)
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go2
new file mode 100644
index 0000000000..81b31974c8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go2
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The predeclared type comparable is not visible before Go 1.18.
+
+package go1_17
+
+type _ comparable // ERROR undeclared
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2
new file mode 100644
index 0000000000..f41ae26e4b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src
new file mode 100644
index 0000000000..da1f1ffbba
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T1 struct{}
+func (t T1) m(int) {}
+var f1 func(T1)
+
+type T2 struct{}
+func (t T2) m(x int) {}
+var f2 func(T2)
+
+type T3 struct{}
+func (T3) m(int) {}
+var f3 func(T3)
+
+type T4 struct{}
+func (T4) m(x int) {}
+var f4 func(T4)
+
+func _() {
+ f1 = T1 /* ERROR func\(T1, int\) */ .m
+ f2 = T2 /* ERROR func\(t T2, x int\) */ .m
+ f3 = T3 /* ERROR func\(T3, int\) */ .m
+ f4 = T4 /* ERROR func\(_ T4, x int\) */ .m
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go2
new file mode 100644
index 0000000000..b184f9b5b7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go2
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Mer interface { M() }
+
+func F[T Mer](p *T) {
+ p.M /* ERROR p\.M undefined */ ()
+}
+
+type MyMer int
+
+func (MyMer) M() {}
+
+func _() {
+ F(new(MyMer))
+ F[Mer](nil)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2
new file mode 100644
index 0000000000..00828eb997
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ ch /* ERROR cannot send to non-channel */ <- 0
+}
+
+func _[T C0](ch T) {
+ ch /* ERROR cannot send to non-channel */ <- 0
+}
+
+func _[T C1](ch T) {
+ ch <- 0
+}
+
+func _[T C2](ch T) {
+ ch /* ERROR cannot send to receive-only channel */ <- 0
+}
+
+func _[T C3](ch T) {
+ ch /* ERROR channels of ch .* must have the same element type */ <- 0
+}
+
+func _[T C4](ch T) {
+ ch <- 0
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ ch <- x
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go2
new file mode 100644
index 0000000000..108d600a38
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go2
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Embedding of stand-alone type parameters is not permitted.
+
+package p
+
+type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+)
+
+func _[P any]() {
+ type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+
+ _ interface{ *P | []P | chan P | map[string]P }
+ _ interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+ )
+}
+
+func _[P any, Q interface{ *P | []P | chan P | map[string]P }]() {}
+func _[P any, Q interface{ P /* ERROR "cannot embed a type parameter" */ }]() {}
+func _[P any, Q interface{ ~P /* ERROR "cannot embed a type parameter" */ }]() {}
+func _[P any, Q interface{ int | P /* ERROR "cannot embed a type parameter" */ }]() {}
+func _[P any, Q interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }]() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go2
new file mode 100644
index 0000000000..77281a19a2
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go2
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]() {}
+func g[_ interface{interface{comparable; ~int|~string}}]() {}
+
+func _[P comparable,
+ Q interface{ comparable; ~int|~string },
+ R any, // not comparable
+ S interface{ comparable; ~func() }, // not comparable
+]() {
+ _ = f[int]
+ _ = f[P]
+ _ = f[Q]
+ _ = f[func( /* ERROR does not satisfy comparable */ )]
+ _ = f[R /* ERROR R has no constraints */ ]
+
+ _ = g[int]
+ _ = g[P /* ERROR P has no type constraints */ ]
+ _ = g[Q]
+ _ = g[func( /* ERROR does not satisfy comparable */ )]
+ _ = g[R /* ERROR R has no constraints */ ]
+}
diff --git a/src/cmd/compile/internal/types2/tuple.go b/src/cmd/compile/internal/types2/tuple.go
new file mode 100644
index 0000000000..a3946beab5
--- /dev/null
+++ b/src/cmd/compile/internal/types2/tuple.go
@@ -0,0 +1,36 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple.
+// Tuples are used as components of signatures and to represent the type of multiple
+// assignments; they are not first class types of Go.
+type Tuple struct {
+ vars []*Var
+}
+
+// NewTuple returns a new tuple for the given variables.
+func NewTuple(x ...*Var) *Tuple {
+ if len(x) > 0 {
+ return &Tuple{vars: x}
+ }
+ // TODO(gri) Don't represent empty tuples with a (*Tuple)(nil) pointer;
+ // it's too subtle and causes problems.
+ return nil
+}
+
+// Len returns the number variables of tuple t.
+func (t *Tuple) Len() int {
+ if t != nil {
+ return len(t.vars)
+ }
+ return 0
+}
+
+// At returns the i'th variable of tuple t.
+func (t *Tuple) At(i int) *Var { return t.vars[i] }
+
+func (t *Tuple) Underlying() Type { return t }
+func (t *Tuple) String() string { return TypeString(t, nil) }
diff --git a/src/cmd/compile/internal/types2/type.go b/src/cmd/compile/internal/types2/type.go
index e6c260ff67..637829613b 100644
--- a/src/cmd/compile/internal/types2/type.go
+++ b/src/cmd/compile/internal/types2/type.go
@@ -4,12 +4,6 @@
package types2
-import (
- "cmd/compile/internal/syntax"
- "fmt"
- "sync/atomic"
-)
-
// A Type represents a type of Go.
// All types implement the Type interface.
type Type interface {
@@ -22,895 +16,55 @@ type Type interface {
String() string
}
-// BasicKind describes the kind of basic type.
-type BasicKind int
-
-const (
- Invalid BasicKind = iota // type is invalid
-
- // predeclared types
- Bool
- Int
- Int8
- Int16
- Int32
- Int64
- Uint
- Uint8
- Uint16
- Uint32
- Uint64
- Uintptr
- Float32
- Float64
- Complex64
- Complex128
- String
- UnsafePointer
-
- // types for untyped values
- UntypedBool
- UntypedInt
- UntypedRune
- UntypedFloat
- UntypedComplex
- UntypedString
- UntypedNil
-
- // aliases
- Byte = Uint8
- Rune = Int32
-)
-
-// BasicInfo is a set of flags describing properties of a basic type.
-type BasicInfo int
-
-// Properties of basic types.
-const (
- IsBoolean BasicInfo = 1 << iota
- IsInteger
- IsUnsigned
- IsFloat
- IsComplex
- IsString
- IsUntyped
-
- IsOrdered = IsInteger | IsFloat | IsString
- IsNumeric = IsInteger | IsFloat | IsComplex
- IsConstType = IsBoolean | IsNumeric | IsString
-)
-
-// A Basic represents a basic type.
-type Basic struct {
- kind BasicKind
- info BasicInfo
- name string
-}
-
-// Kind returns the kind of basic type b.
-func (b *Basic) Kind() BasicKind { return b.kind }
-
-// Info returns information about properties of basic type b.
-func (b *Basic) Info() BasicInfo { return b.info }
-
-// Name returns the name of basic type b.
-func (b *Basic) Name() string { return b.name }
-
-// An Array represents an array type.
-type Array struct {
- len int64
- elem Type
-}
-
-// NewArray returns a new array type for the given element type and length.
-// A negative length indicates an unknown length.
-func NewArray(elem Type, len int64) *Array { return &Array{len: len, elem: elem} }
-
-// Len returns the length of array a.
-// A negative result indicates an unknown length.
-func (a *Array) Len() int64 { return a.len }
-
-// Elem returns element type of array a.
-func (a *Array) Elem() Type { return a.elem }
-
-// A Slice represents a slice type.
-type Slice struct {
- elem Type
-}
-
-// NewSlice returns a new slice type for the given element type.
-func NewSlice(elem Type) *Slice { return &Slice{elem: elem} }
-
-// Elem returns the element type of slice s.
-func (s *Slice) Elem() Type { return s.elem }
-
-// A Struct represents a struct type.
-type Struct struct {
- fields []*Var
- tags []string // field tags; nil if there are no tags
-}
-
-// NewStruct returns a new struct with the given fields and corresponding field tags.
-// If a field with index i has a tag, tags[i] must be that tag, but len(tags) may be
-// only as long as required to hold the tag with the largest index i. Consequently,
-// if no field has a tag, tags may be nil.
-func NewStruct(fields []*Var, tags []string) *Struct {
- var fset objset
- for _, f := range fields {
- if f.name != "_" && fset.insert(f) != nil {
- panic("multiple fields with the same name")
- }
- }
- if len(tags) > len(fields) {
- panic("more tags than fields")
- }
- return &Struct{fields: fields, tags: tags}
-}
-
-// NumFields returns the number of fields in the struct (including blank and embedded fields).
-func (s *Struct) NumFields() int { return len(s.fields) }
-
-// Field returns the i'th field for 0 <= i < NumFields().
-func (s *Struct) Field(i int) *Var { return s.fields[i] }
-
-// Tag returns the i'th field tag for 0 <= i < NumFields().
-func (s *Struct) Tag(i int) string {
- if i < len(s.tags) {
- return s.tags[i]
- }
- return ""
-}
-
-// A Pointer represents a pointer type.
-type Pointer struct {
- base Type // element type
-}
-
-// NewPointer returns a new pointer type for the given element (base) type.
-func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} }
-
-// Elem returns the element type for the given pointer p.
-func (p *Pointer) Elem() Type { return p.base }
-
-// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple.
-// Tuples are used as components of signatures and to represent the type of multiple
-// assignments; they are not first class types of Go.
-type Tuple struct {
- vars []*Var
-}
-
-// NewTuple returns a new tuple for the given variables.
-func NewTuple(x ...*Var) *Tuple {
- if len(x) > 0 {
- return &Tuple{vars: x}
- }
- // TODO(gri) Don't represent empty tuples with a (*Tuple)(nil) pointer;
- // it's too subtle and causes problems.
- return nil
-}
-
-// Len returns the number variables of tuple t.
-func (t *Tuple) Len() int {
- if t != nil {
- return len(t.vars)
- }
- return 0
-}
-
-// At returns the i'th variable of tuple t.
-func (t *Tuple) At(i int) *Var { return t.vars[i] }
-
-// A Signature represents a (non-builtin) function or method type.
-// The receiver is ignored when comparing signatures for identity.
-type Signature struct {
- // We need to keep the scope in Signature (rather than passing it around
- // and store it in the Func Object) because when type-checking a function
- // literal we call the general type checker which returns a general Type.
- // We then unpack the *Signature and use the scope for the literal body.
- rparams []*TypeName // receiver type parameters from left to right; or nil
- tparams []*TypeName // type parameters from left to right; or nil
- scope *Scope // function scope, present for package-local signatures
- recv *Var // nil if not a method
- params *Tuple // (incoming) parameters from left to right; or nil
- results *Tuple // (outgoing) results from left to right; or nil
- variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
-}
-
-// NewSignature returns a new function type for the given receiver, parameters,
-// and results, either of which may be nil. If variadic is set, the function
-// is variadic, it must have at least one parameter, and the last parameter
-// must be of unnamed slice type.
-func NewSignature(recv *Var, params, results *Tuple, variadic bool) *Signature {
- if variadic {
- n := params.Len()
- if n == 0 {
- panic("types2.NewSignature: variadic function must have at least one parameter")
- }
- if _, ok := params.At(n - 1).typ.(*Slice); !ok {
- panic("types2.NewSignature: variadic parameter must be of unnamed slice type")
- }
- }
- return &Signature{recv: recv, params: params, results: results, variadic: variadic}
-}
-
-// Recv returns the receiver of signature s (if a method), or nil if a
-// function. It is ignored when comparing signatures for identity.
-//
-// For an abstract method, Recv returns the enclosing interface either
-// as a *Named or an *Interface. Due to embedding, an interface may
-// contain methods whose receiver type is a different interface.
-func (s *Signature) Recv() *Var { return s.recv }
-
-// TParams returns the type parameters of signature s, or nil.
-func (s *Signature) TParams() []*TypeName { return s.tparams }
-
-// RParams returns the receiver type params of signature s, or nil.
-func (s *Signature) RParams() []*TypeName { return s.rparams }
-
-// SetTParams sets the type parameters of signature s.
-func (s *Signature) SetTParams(tparams []*TypeName) { s.tparams = tparams }
-
-// Params returns the parameters of signature s, or nil.
-func (s *Signature) Params() *Tuple { return s.params }
-
-// Results returns the results of signature s, or nil.
-func (s *Signature) Results() *Tuple { return s.results }
-
-// Variadic reports whether the signature s is variadic.
-func (s *Signature) Variadic() bool { return s.variadic }
-
-// A Sum represents a set of possible types.
-// Sums are currently used to represent type lists of interfaces
-// and thus the underlying types of type parameters; they are not
-// first class types of Go.
-type Sum struct {
- types []Type // types are unique
-}
-
-// NewSum returns a new Sum type consisting of the provided
-// types if there are more than one. If there is exactly one
-// type, it returns that type. If the list of types is empty
-// the result is nil.
-func NewSum(types []Type) Type {
- if len(types) == 0 {
- return nil
- }
-
- // What should happen if types contains a sum type?
- // Do we flatten the types list? For now we check
- // and panic. This should not be possible for the
- // current use case of type lists.
- // TODO(gri) Come up with the rules for sum types.
- for _, t := range types {
- if _, ok := t.(*Sum); ok {
- panic("sum type contains sum type - unimplemented")
- }
- }
-
- if len(types) == 1 {
- return types[0]
- }
- return &Sum{types: types}
-}
-
-// is reports whether all types in t satisfy pred.
-func (s *Sum) is(pred func(Type) bool) bool {
- if s == nil {
- return false
- }
- for _, t := range s.types {
- if !pred(t) {
- return false
- }
- }
- return true
-}
-
-// An Interface represents an interface type.
-type Interface struct {
- methods []*Func // ordered list of explicitly declared methods
- types Type // (possibly a Sum) type declared with a type list (TODO(gri) need better field name)
- embeddeds []Type // ordered list of explicitly embedded types
-
- allMethods []*Func // ordered list of methods declared with or embedded in this interface (TODO(gri): replace with mset)
- allTypes Type // intersection of all embedded and locally declared types (TODO(gri) need better field name)
-
- obj Object // type declaration defining this interface; or nil (for better error messages)
-}
-
-// unpack unpacks a type into a list of types.
-// TODO(gri) Try to eliminate the need for this function.
-func unpack(typ Type) []Type {
- if typ == nil {
- return nil
- }
- if sum := asSum(typ); sum != nil {
- return sum.types
- }
- return []Type{typ}
-}
-
-// is reports whether interface t represents types that all satisfy pred.
-func (t *Interface) is(pred func(Type) bool) bool {
- if t.allTypes == nil {
- return false // we must have at least one type! (was bug)
- }
- for _, t := range unpack(t.allTypes) {
- if !pred(t) {
- return false
- }
- }
- return true
-}
-
-// emptyInterface represents the empty (completed) interface
-var emptyInterface = Interface{allMethods: markComplete}
-
-// markComplete is used to mark an empty interface as completely
-// set up by setting the allMethods field to a non-nil empty slice.
-var markComplete = make([]*Func, 0)
-
-// NewInterface returns a new (incomplete) interface for the given methods and embedded types.
-// Each embedded type must have an underlying type of interface type.
-// NewInterface takes ownership of the provided methods and may modify their types by setting
-// missing receivers. To compute the method set of the interface, Complete must be called.
-//
-// Deprecated: Use NewInterfaceType instead which allows any (even non-defined) interface types
-// to be embedded. This is necessary for interfaces that embed alias type names referring to
-// non-defined (literal) interface types.
-func NewInterface(methods []*Func, embeddeds []*Named) *Interface {
- tnames := make([]Type, len(embeddeds))
- for i, t := range embeddeds {
- tnames[i] = t
- }
- return NewInterfaceType(methods, tnames)
-}
-
-// NewInterfaceType returns a new (incomplete) interface for the given methods and embedded types.
-// Each embedded type must have an underlying type of interface type (this property is not
-// verified for defined types, which may be in the process of being set up and which don't
-// have a valid underlying type yet).
-// NewInterfaceType takes ownership of the provided methods and may modify their types by setting
-// missing receivers. To compute the method set of the interface, Complete must be called.
-func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
- if len(methods) == 0 && len(embeddeds) == 0 {
- return &emptyInterface
- }
-
- // set method receivers if necessary
- typ := new(Interface)
- for _, m := range methods {
- if sig := m.typ.(*Signature); sig.recv == nil {
- sig.recv = NewVar(m.pos, m.pkg, "", typ)
- }
- }
-
- // All embedded types should be interfaces; however, defined types
- // may not yet be fully resolved. Only verify that non-defined types
- // are interfaces. This matches the behavior of the code before the
- // fix for #25301 (issue #25596).
- for _, t := range embeddeds {
- if _, ok := t.(*Named); !ok && !IsInterface(t) {
- panic("embedded type is not an interface")
- }
- }
-
- // sort for API stability
- sortMethods(methods)
- sortTypes(embeddeds)
-
- typ.methods = methods
- typ.embeddeds = embeddeds
- return typ
-}
-
-// NumExplicitMethods returns the number of explicitly declared methods of interface t.
-func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
-
-// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
-// The methods are ordered by their unique Id.
-func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
-
-// NumEmbeddeds returns the number of embedded types in interface t.
-func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
-
-// Embedded returns the i'th embedded defined (*Named) type of interface t for 0 <= i < t.NumEmbeddeds().
-// The result is nil if the i'th embedded type is not a defined type.
-//
-// Deprecated: Use EmbeddedType which is not restricted to defined (*Named) types.
-func (t *Interface) Embedded(i int) *Named { tname, _ := t.embeddeds[i].(*Named); return tname }
-
-// EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
-func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
-
-// NumMethods returns the total number of methods of interface t.
-// The interface must have been completed.
-func (t *Interface) NumMethods() int { t.assertCompleteness(); return len(t.allMethods) }
-
-func (t *Interface) assertCompleteness() {
- if t.allMethods == nil {
- panic("interface is incomplete")
- }
-}
-
-// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
-// The methods are ordered by their unique Id.
-// The interface must have been completed.
-func (t *Interface) Method(i int) *Func { t.assertCompleteness(); return t.allMethods[i] }
-
-// Empty reports whether t is the empty interface.
-func (t *Interface) Empty() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- // A non-nil allTypes may still be empty and represents the bottom type.
- return len(t.allMethods) == 0 && t.allTypes == nil
- }
- return !t.iterate(func(t *Interface) bool {
- return len(t.methods) > 0 || t.types != nil
- }, nil)
-}
-
-// HasTypeList reports whether interface t has a type list, possibly from an embedded type.
-func (t *Interface) HasTypeList() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- return t.allTypes != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- return t.types != nil
- }, nil)
-}
-
-// IsComparable reports whether interface t is or embeds the predeclared interface "comparable".
-func (t *Interface) IsComparable() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
-}
-
-// IsConstraint reports t.HasTypeList() || t.IsComparable().
-func (t *Interface) IsConstraint() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- if t.allTypes != nil {
- return true
- }
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- if t.types != nil {
- return true
- }
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
-}
-
-// iterate calls f with t and then with any embedded interface of t, recursively, until f returns true.
-// iterate reports whether any call to f returned true.
-func (t *Interface) iterate(f func(*Interface) bool, seen map[*Interface]bool) bool {
- if f(t) {
- return true
- }
- for _, e := range t.embeddeds {
- // e should be an interface but be careful (it may be invalid)
- if e := asInterface(e); e != nil {
- // Cyclic interfaces such as "type E interface { E }" are not permitted
- // but they are still constructed and we need to detect such cycles.
- if seen[e] {
- continue
- }
- if seen == nil {
- seen = make(map[*Interface]bool)
- }
- seen[e] = true
- if e.iterate(f, seen) {
- return true
- }
- }
- }
- return false
-}
-
-// isSatisfiedBy reports whether interface t's type list is satisfied by the type typ.
-// If the type list is empty (absent), typ trivially satisfies the interface.
-// TODO(gri) This is not a great name. Eventually, we should have a more comprehensive
-// "implements" predicate.
-func (t *Interface) isSatisfiedBy(typ Type) bool {
- t.Complete()
- if t.allTypes == nil {
- return true
- }
- types := unpack(t.allTypes)
- return includes(types, typ) || includes(types, under(typ))
-}
-
-// Complete computes the interface's method set. It must be called by users of
-// NewInterfaceType and NewInterface after the interface's embedded types are
-// fully defined and before using the interface type in any way other than to
-// form other types. The interface must not contain duplicate methods or a
-// panic occurs. Complete returns the receiver.
-func (t *Interface) Complete() *Interface {
- // TODO(gri) consolidate this method with Checker.completeInterface
- if t.allMethods != nil {
- return t
- }
-
- t.allMethods = markComplete // avoid infinite recursion
-
- var todo []*Func
- var methods []*Func
- var seen objset
- addMethod := func(m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- case explicit:
- panic("duplicate method " + m.name)
- default:
- // check method signatures after all locally embedded interfaces are computed
- todo = append(todo, m, other.(*Func))
- }
- }
-
- for _, m := range t.methods {
- addMethod(m, true)
- }
-
- allTypes := t.types
+// top represents the top of the type lattice.
+// It is the underlying type of a type parameter that
+// can be satisfied by any type (ignoring methods),
+// because its type constraint contains no restrictions
+// besides methods.
+type top struct{}
- for _, typ := range t.embeddeds {
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- panic(fmt.Sprintf("%s is not an interface", typ))
- }
- continue
- }
- etyp.Complete()
- for _, m := range etyp.allMethods {
- addMethod(m, false)
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
+// theTop is the singleton top type.
+var theTop = &top{}
- for i := 0; i < len(todo); i += 2 {
- m := todo[i]
- other := todo[i+1]
- if !Identical(m.typ, other.typ) {
- panic("duplicate method " + m.name)
- }
- }
+func (t *top) Underlying() Type { return t }
+func (t *top) String() string { return TypeString(t, nil) }
- if methods != nil {
- sortMethods(methods)
- t.allMethods = methods
+// under returns the true expanded underlying type.
+// If it doesn't exist, the result is Typ[Invalid].
+// under must only be called when a type is known
+// to be fully set up.
+func under(t Type) Type {
+ // TODO(gri) is this correct for *Union?
+ if n := asNamed(t); n != nil {
+ return n.under()
}
- t.allTypes = allTypes
-
return t
}
-// A Map represents a map type.
-type Map struct {
- key, elem Type
-}
-
-// NewMap returns a new map for the given key and element types.
-func NewMap(key, elem Type) *Map {
- return &Map{key: key, elem: elem}
-}
-
-// Key returns the key type of map m.
-func (m *Map) Key() Type { return m.key }
-
-// Elem returns the element type of map m.
-func (m *Map) Elem() Type { return m.elem }
-
-// A Chan represents a channel type.
-type Chan struct {
- dir ChanDir
- elem Type
-}
-
-// A ChanDir value indicates a channel direction.
-type ChanDir int
-
-// The direction of a channel is indicated by one of these constants.
-const (
- SendRecv ChanDir = iota
- SendOnly
- RecvOnly
-)
-
-// NewChan returns a new channel type for the given direction and element type.
-func NewChan(dir ChanDir, elem Type) *Chan {
- return &Chan{dir: dir, elem: elem}
-}
-
-// Dir returns the direction of channel c.
-func (c *Chan) Dir() ChanDir { return c.dir }
-
-// Elem returns the element type of channel c.
-func (c *Chan) Elem() Type { return c.elem }
-
-// TODO(gri) Clean up Named struct below; specifically the fromRHS field (can we use underlying?).
-
-// A Named represents a named (defined) type.
-type Named struct {
- check *Checker // for Named.under implementation
- info typeInfo // for cycle detection
- obj *TypeName // corresponding declared object
- orig *Named // original, uninstantiated type
- fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
- underlying Type // possibly a *Named during setup; never a *Named once set up completely
- tparams []*TypeName // type parameters, or nil
- targs []Type // type arguments (after instantiation), or nil
- methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
-}
-
-// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
-// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
-// The underlying type must not be a *Named.
-func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
- if _, ok := underlying.(*Named); ok {
- panic("types2.NewNamed: underlying type must not be *Named")
- }
- return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
-}
-
-// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
-func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams []*TypeName, methods []*Func) *Named {
- typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
- if typ.orig == nil {
- typ.orig = typ
- }
- if obj.typ == nil {
- obj.typ = typ
- }
- return typ
-}
-
-// Obj returns the type name for the named type t.
-func (t *Named) Obj() *TypeName { return t.obj }
-
-// Orig returns the original generic type an instantiated type is derived from.
-// If t is not an instantiated type, the result is t.
-func (t *Named) Orig() *Named { return t.orig }
-
-// TODO(gri) Come up with a better representation and API to distinguish
-// between parameterized instantiated and non-instantiated types.
-
-// TParams returns the type parameters of the named type t, or nil.
-// The result is non-nil for an (originally) parameterized type even if it is instantiated.
-func (t *Named) TParams() []*TypeName { return t.tparams }
-
-// SetTParams sets the type parameters of the named type t.
-func (t *Named) SetTParams(tparams []*TypeName) { t.tparams = tparams }
-
-// TArgs returns the type arguments after instantiation of the named type t, or nil if not instantiated.
-func (t *Named) TArgs() []Type { return t.targs }
-
-// SetTArgs sets the type arguments of the named type t.
-func (t *Named) SetTArgs(args []Type) { t.targs = args }
-
-// NumMethods returns the number of explicit methods whose receiver is named type t.
-func (t *Named) NumMethods() int { return len(t.methods) }
-
-// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
-func (t *Named) Method(i int) *Func { return t.methods[i] }
-
-// SetUnderlying sets the underlying type and marks t as complete.
-func (t *Named) SetUnderlying(underlying Type) {
- if underlying == nil {
- panic("types2.Named.SetUnderlying: underlying type must not be nil")
- }
- if _, ok := underlying.(*Named); ok {
- panic("types2.Named.SetUnderlying: underlying type must not be *Named")
- }
- t.underlying = underlying
-}
-
-// AddMethod adds method m unless it is already in the method list.
-func (t *Named) AddMethod(m *Func) {
- if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
- t.methods = append(t.methods, m)
- }
-}
-
-// Note: This is a uint32 rather than a uint64 because the
-// respective 64 bit atomic instructions are not available
-// on all platforms.
-var lastId uint32
-
-// nextId returns a value increasing monotonically by 1 with
-// each call, starting with 1. It may be called concurrently.
-func nextId() uint64 { return uint64(atomic.AddUint32(&lastId, 1)) }
-
-// A TypeParam represents a type parameter type.
-type TypeParam struct {
- check *Checker // for lazy type bound completion
- id uint64 // unique id, for debugging only
- obj *TypeName // corresponding type name
- index int // type parameter index in source order, starting at 0
- bound Type // *Named or *Interface; underlying type is always *Interface
-}
-
-// Obj returns the type name for the type parameter t.
-func (t *TypeParam) Obj() *TypeName { return t.obj }
-
-// NewTypeParam returns a new TypeParam.
-func (check *Checker) NewTypeParam(obj *TypeName, index int, bound Type) *TypeParam {
- assert(bound != nil)
- typ := &TypeParam{check: check, id: nextId(), obj: obj, index: index, bound: bound}
- if obj.typ == nil {
- obj.typ = typ
- }
- return typ
-}
-
-func (t *TypeParam) Bound() *Interface {
- iface := asInterface(t.bound)
- // use the type bound position if we have one
- pos := nopos
- if n, _ := t.bound.(*Named); n != nil {
- pos = n.obj.pos
- }
- // TODO(gri) switch this to an unexported method on Checker.
- t.check.completeInterface(pos, iface)
- return iface
-}
-
// optype returns a type's operational type. Except for
// type parameters, the operational type is the same
// as the underlying type (as returned by under). For
-// Type parameters, the operational type is determined
-// by the corresponding type bound's type list. The
-// result may be the bottom or top type, but it is never
-// the incoming type parameter.
+// Type parameters, the operational type is the structural
+// type, if any; otherwise it's the top type.
+// The result is never the incoming type parameter.
func optype(typ Type) Type {
if t := asTypeParam(typ); t != nil {
+ // TODO(gri) review accuracy of this comment
// If the optype is typ, return the top type as we have
// no information. It also prevents infinite recursion
// via the asTypeParam converter function. This can happen
// for a type parameter list of the form:
// (type T interface { type T }).
// See also issue #39680.
- if u := t.Bound().allTypes; u != nil && u != typ {
- // u != typ and u is a type parameter => under(u) != typ, so this is ok
- return under(u)
+ if u := t.structuralType(); u != nil {
+ assert(u != typ) // "naked" type parameters cannot be embedded
+ return u
}
return theTop
}
return under(typ)
}
-// An instance represents an instantiated generic type syntactically
-// (without expanding the instantiation). Type instances appear only
-// during type-checking and are replaced by their fully instantiated
-// (expanded) types before the end of type-checking.
-type instance struct {
- check *Checker // for lazy instantiation
- pos syntax.Pos // position of type instantiation; for error reporting only
- base *Named // parameterized type to be instantiated
- targs []Type // type arguments
- poslist []syntax.Pos // position of each targ; for error reporting only
- value Type // base(targs...) after instantiation or Typ[Invalid]; nil if not yet set
-}
-
-// expand returns the instantiated (= expanded) type of t.
-// The result is either an instantiated *Named type, or
-// Typ[Invalid] if there was an error.
-func (t *instance) expand() Type {
- v := t.value
- if v == nil {
- v = t.check.instantiate(t.pos, t.base, t.targs, t.poslist)
- if v == nil {
- v = Typ[Invalid]
- }
- t.value = v
- }
- // After instantiation we must have an invalid or a *Named type.
- if debug && v != Typ[Invalid] {
- _ = v.(*Named)
- }
- return v
-}
-
-// expand expands a type instance into its instantiated
-// type and leaves all other types alone. expand does
-// not recurse.
-func expand(typ Type) Type {
- if t, _ := typ.(*instance); t != nil {
- return t.expand()
- }
- return typ
-}
-
-// expandf is set to expand.
-// Call expandf when calling expand causes compile-time cycle error.
-var expandf func(Type) Type
-
-func init() { expandf = expand }
-
-// bottom represents the bottom of the type lattice.
-// It is the underlying type of a type parameter that
-// cannot be satisfied by any type, usually because
-// the intersection of type constraints left nothing).
-type bottom struct{}
-
-// theBottom is the singleton bottom type.
-var theBottom = &bottom{}
-
-// top represents the top of the type lattice.
-// It is the underlying type of a type parameter that
-// can be satisfied by any type (ignoring methods),
-// usually because the type constraint has no type
-// list.
-type top struct{}
-
-// theTop is the singleton top type.
-var theTop = &top{}
-
-// Type-specific implementations of Underlying.
-func (t *Basic) Underlying() Type { return t }
-func (t *Array) Underlying() Type { return t }
-func (t *Slice) Underlying() Type { return t }
-func (t *Struct) Underlying() Type { return t }
-func (t *Pointer) Underlying() Type { return t }
-func (t *Tuple) Underlying() Type { return t }
-func (t *Signature) Underlying() Type { return t }
-func (t *Sum) Underlying() Type { return t }
-func (t *Interface) Underlying() Type { return t }
-func (t *Map) Underlying() Type { return t }
-func (t *Chan) Underlying() Type { return t }
-func (t *Named) Underlying() Type { return t.underlying }
-func (t *TypeParam) Underlying() Type { return t }
-func (t *instance) Underlying() Type { return t }
-func (t *bottom) Underlying() Type { return t }
-func (t *top) Underlying() Type { return t }
-
-// Type-specific implementations of String.
-func (t *Basic) String() string { return TypeString(t, nil) }
-func (t *Array) String() string { return TypeString(t, nil) }
-func (t *Slice) String() string { return TypeString(t, nil) }
-func (t *Struct) String() string { return TypeString(t, nil) }
-func (t *Pointer) String() string { return TypeString(t, nil) }
-func (t *Tuple) String() string { return TypeString(t, nil) }
-func (t *Signature) String() string { return TypeString(t, nil) }
-func (t *Sum) String() string { return TypeString(t, nil) }
-func (t *Interface) String() string { return TypeString(t, nil) }
-func (t *Map) String() string { return TypeString(t, nil) }
-func (t *Chan) String() string { return TypeString(t, nil) }
-func (t *Named) String() string { return TypeString(t, nil) }
-func (t *TypeParam) String() string { return TypeString(t, nil) }
-func (t *instance) String() string { return TypeString(t, nil) }
-func (t *bottom) String() string { return TypeString(t, nil) }
-func (t *top) String() string { return TypeString(t, nil) }
-
-// under returns the true expanded underlying type.
-// If it doesn't exist, the result is Typ[Invalid].
-// under must only be called when a type is known
-// to be fully set up.
-func under(t Type) Type {
- // TODO(gri) is this correct for *Sum?
- if n := asNamed(t); n != nil {
- return n.under()
- }
- return t
-}
-
// Converters
//
// A converter must only be called when a type is
@@ -944,39 +98,26 @@ func asPointer(t Type) *Pointer {
return op
}
-// asTuple is not needed - not provided
-
func asSignature(t Type) *Signature {
op, _ := optype(t).(*Signature)
return op
}
-func asSum(t Type) *Sum {
- op, _ := optype(t).(*Sum)
- return op
-}
+// If the argument to asInterface, asNamed, or asTypeParam is of the respective type
+// (possibly after expanding an instance type), these methods return that type.
+// Otherwise the result is nil.
+// asInterface does not need to look at optype (type sets don't contain interfaces)
func asInterface(t Type) *Interface {
- op, _ := optype(t).(*Interface)
- return op
-}
-
-func asMap(t Type) *Map {
- op, _ := optype(t).(*Map)
- return op
-}
-
-func asChan(t Type) *Chan {
- op, _ := optype(t).(*Chan)
- return op
+ u, _ := under(t).(*Interface)
+ return u
}
-// If the argument to asNamed and asTypeParam is of the respective types
-// (possibly after expanding an instance type), these methods return that type.
-// Otherwise the result is nil.
-
func asNamed(t Type) *Named {
- e, _ := expand(t).(*Named)
+ e, _ := t.(*Named)
+ if e != nil {
+ e.expand()
+ }
return e
}
@@ -991,3 +132,4 @@ func AsPointer(t Type) *Pointer { return asPointer(t) }
func AsNamed(t Type) *Named { return asNamed(t) }
func AsSignature(t Type) *Signature { return asSignature(t) }
func AsInterface(t Type) *Interface { return asInterface(t) }
+func AsTypeParam(t Type) *TypeParam { return asTypeParam(t) }
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
new file mode 100644
index 0000000000..4b4282efe0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -0,0 +1,143 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "sync/atomic"
+
+// Note: This is a uint32 rather than a uint64 because the
+// respective 64 bit atomic instructions are not available
+// on all platforms.
+var lastID uint32
+
+// nextID returns a value increasing monotonically by 1 with
+// each call, starting with 1. It may be called concurrently.
+func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
+
+// A TypeParam represents a type parameter type.
+type TypeParam struct {
+ check *Checker // for lazy type bound completion
+ id uint64 // unique id, for debugging only
+ obj *TypeName // corresponding type name
+ index int // type parameter index in source order, starting at 0
+ // TODO(rfindley): this could also be Typ[Invalid]. Verify that this is handled correctly.
+ bound Type // *Named or *Interface; underlying type is always *Interface
+}
+
+// Obj returns the type name for the type parameter t.
+func (t *TypeParam) Obj() *TypeName { return t.obj }
+
+// NewTypeParam returns a new TypeParam. Type parameters may be set on a Named
+// or Signature type by calling SetTParams. Setting a type parameter on more
+// than one type will result in a panic.
+//
+// The bound argument can be nil, and set later via SetBound.
+func (check *Checker) NewTypeParam(obj *TypeName, bound Type) *TypeParam {
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+ typ := &TypeParam{check: check, id: id, obj: obj, index: -1, bound: bound}
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ return typ
+}
+
+// Index returns the index of the type param within its param list.
+func (t *TypeParam) Index() int {
+ return t.index
+}
+
+// SetId sets the unique id of a type param. Should only be used for type params
+// in imported generic types.
+func (t *TypeParam) SetId(id uint64) {
+ t.id = id
+}
+
+// Constraint returns the type constraint specified for t.
+func (t *TypeParam) Constraint() Type {
+ // compute the type set if possible (we may not have an interface)
+ if iface, _ := under(t.bound).(*Interface); iface != nil {
+ // use the type bound position if we have one
+ pos := nopos
+ if n, _ := t.bound.(*Named); n != nil {
+ pos = n.obj.pos
+ }
+ computeInterfaceTypeSet(t.check, pos, iface)
+ }
+ return t.bound
+}
+
+// SetConstraint sets the type constraint for t.
+func (t *TypeParam) SetConstraint(bound Type) {
+ if bound == nil {
+ panic("nil constraint")
+ }
+ t.bound = bound
+}
+
+func (t *TypeParam) Underlying() Type { return t }
+func (t *TypeParam) String() string { return TypeString(t, nil) }
+
+// TypeParams holds a list of type parameters bound to a type.
+type TypeParams struct{ tparams []*TypeName }
+
+// Len returns the number of type parameters in the list.
+// It is safe to call on a nil receiver.
+func (tps *TypeParams) Len() int {
+ return len(tps.list())
+}
+
+// At returns the i'th type parameter in the list.
+func (tps *TypeParams) At(i int) *TypeName {
+ return tps.list()[i]
+}
+
+func (tps *TypeParams) list() []*TypeName {
+ if tps == nil {
+ return nil
+ }
+ return tps.tparams
+}
+
+func bindTParams(list []*TypeName) *TypeParams {
+ if len(list) == 0 {
+ return nil
+ }
+ for i, tp := range list {
+ typ := tp.Type().(*TypeParam)
+ if typ.index >= 0 {
+ panic("type parameter bound more than once")
+ }
+ typ.index = i
+ }
+ return &TypeParams{tparams: list}
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// iface returns the constraint interface of t.
+func (t *TypeParam) iface() *Interface {
+ if iface, _ := under(t.Constraint()).(*Interface); iface != nil {
+ return iface
+ }
+ return &emptyInterface
+}
+
+// structuralType returns the structural type of the type parameter's constraint; or nil.
+func (t *TypeParam) structuralType() Type {
+ return t.iface().typeSet().structuralType()
+}
+
+func (t *TypeParam) is(f func(*term) bool) bool {
+ return t.iface().typeSet().is(f)
+}
+
+func (t *TypeParam) underIs(f func(Type) bool) bool {
+ return t.iface().typeSet().underIs(f)
+}
diff --git a/src/cmd/compile/internal/types2/types_test.go b/src/cmd/compile/internal/types2/types_test.go
index 096402148d..1525844f2d 100644
--- a/src/cmd/compile/internal/types2/types_test.go
+++ b/src/cmd/compile/internal/types2/types_test.go
@@ -4,14 +4,9 @@
package types2
-import "sync/atomic"
-
func init() {
acceptMethodTypeParams = true
}
-// Upon calling ResetId, nextId starts with 1 again.
-// It may be called concurrently. This is only needed
-// for tests where we may want to have a consistent
-// numbering for each individual test case.
-func ResetId() { atomic.StoreUint32(&lastId, 0) }
+// Debug is set if types2 is built with debug mode enabled.
+const Debug = debug
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
new file mode 100644
index 0000000000..5955bbe805
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -0,0 +1,392 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "sort"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A TypeSet represents the type set of an interface.
+type TypeSet struct {
+ comparable bool // if set, the interface is or embeds comparable
+ // TODO(gri) consider using a set for the methods for faster lookup
+ methods []*Func // all methods of the interface; sorted by unique ID
+ terms termlist // type terms of the type set
+}
+
+// IsEmpty reports whether type set s is the empty set.
+func (s *TypeSet) IsEmpty() bool { return s.terms.isEmpty() }
+
+// IsAll reports whether type set s is the set of all types (corresponding to the empty interface).
+func (s *TypeSet) IsAll() bool {
+ return !s.comparable && len(s.methods) == 0 && s.terms.isAll()
+}
+
+// TODO(gri) IsMethodSet is not a great name for this predicate. Find a better one.
+
+// IsMethodSet reports whether the type set s is described by a single set of methods.
+func (s *TypeSet) IsMethodSet() bool { return !s.comparable && s.terms.isAll() }
+
+// IsComparable reports whether each type in the set is comparable.
+func (s *TypeSet) IsComparable() bool {
+ if s.terms.isAll() {
+ return s.comparable
+ }
+ return s.is(func(t *term) bool {
+ return Comparable(t.typ)
+ })
+}
+
+// TODO(gri) IsTypeSet is not a great name for this predicate. Find a better one.
+
+// IsTypeSet reports whether the type set s is represented by a finite set of underlying types.
+func (s *TypeSet) IsTypeSet() bool {
+ return !s.comparable && len(s.methods) == 0
+}
+
+// NumMethods returns the number of methods available.
+func (s *TypeSet) NumMethods() int { return len(s.methods) }
+
+// Method returns the i'th method of type set s for 0 <= i < s.NumMethods().
+// The methods are ordered by their unique ID.
+func (s *TypeSet) Method(i int) *Func { return s.methods[i] }
+
+// LookupMethod returns the index of and method with matching package and name, or (-1, nil).
+func (s *TypeSet) LookupMethod(pkg *Package, name string) (int, *Func) {
+ // TODO(gri) s.methods is sorted - consider binary search
+ return lookupMethod(s.methods, pkg, name)
+}
+
+func (s *TypeSet) String() string {
+ switch {
+ case s.IsEmpty():
+ return "∅"
+ case s.IsAll():
+ return "𝓤"
+ }
+
+ hasMethods := len(s.methods) > 0
+ hasTerms := s.hasTerms()
+
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ if s.comparable {
+ buf.WriteString(" comparable")
+ if hasMethods || hasTerms {
+ buf.WriteByte(';')
+ }
+ }
+ for i, m := range s.methods {
+ if i > 0 {
+ buf.WriteByte(';')
+ }
+ buf.WriteByte(' ')
+ buf.WriteString(m.String())
+ }
+ if hasMethods && hasTerms {
+ buf.WriteByte(';')
+ }
+ if hasTerms {
+ buf.WriteString(s.terms.String())
+ }
+ buf.WriteString(" }") // there was at least one method or term
+
+ return buf.String()
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (s *TypeSet) hasTerms() bool { return !s.terms.isAll() }
+func (s *TypeSet) structuralType() Type { return s.terms.structuralType() }
+func (s *TypeSet) includes(t Type) bool { return s.terms.includes(t) }
+func (s1 *TypeSet) subsetOf(s2 *TypeSet) bool { return s1.terms.subsetOf(s2.terms) }
+
+// TODO(gri) TypeSet.is and TypeSet.underIs should probably also go into termlist.go
+
+var topTerm = term{false, theTop}
+
+func (s *TypeSet) is(f func(*term) bool) bool {
+ if len(s.terms) == 0 {
+ return false
+ }
+ for _, t := range s.terms {
+ // Terms represent the top term with a nil type.
+ // The rest of the type checker uses the top type
+ // instead. Convert.
+ // TODO(gri) investigate if we can do without this
+ if t.typ == nil {
+ t = &topTerm
+ }
+ if !f(t) {
+ return false
+ }
+ }
+ return true
+}
+
+func (s *TypeSet) underIs(f func(Type) bool) bool {
+ if len(s.terms) == 0 {
+ return false
+ }
+ for _, t := range s.terms {
+ // see corresponding comment in TypeSet.is
+ u := t.typ
+ if u == nil {
+ u = theTop
+ }
+ // t == under(t) for ~t terms
+ if !t.tilde {
+ u = under(u)
+ }
+ if debug {
+ assert(Identical(u, under(u)))
+ }
+ if !f(u) {
+ return false
+ }
+ }
+ return true
+}
+
+// topTypeSet may be used as type set for the empty interface.
+var topTypeSet = TypeSet{terms: allTermlist}
+
+// computeInterfaceTypeSet may be called with check == nil.
+func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *TypeSet {
+ if ityp.tset != nil {
+ return ityp.tset
+ }
+
+ // If the interface is not fully set up yet, the type set will
+ // not be complete, which may lead to errors when using the the
+ // type set (e.g. missing method). Don't compute a partial type
+ // set (and don't store it!), so that we still compute the full
+ // type set eventually. Instead, return the top type set and
+ // let any follow-on errors play out.
+ if !ityp.complete {
+ return &topTypeSet
+ }
+
+ if check != nil && check.conf.Trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsKnown() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "type set for %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s ", ityp.typeSet())
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.tset = &TypeSet{terms: allTermlist} // TODO(gri) is this sufficient?
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also issue #34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var todo []*Func
+ var seen objset
+ var methods []*Func
+ mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ methods = append(methods, m)
+ mpos[m] = pos
+ case explicit:
+ if check == nil {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ // check != nil
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (issue #33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check == nil {
+ // check method signatures after all locally embedded interfaces are computed
+ todo = append(todo, m, other.(*Func))
+ break
+ }
+ // check != nil
+ check.later(func() {
+ if !check.allowVersion(m.pkg, 1, 14) || !Identical(m.typ, other.Type()) {
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ }
+ })
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect embedded elements
+ var allTerms = allTermlist
+ for i, typ := range ityp.embeddeds {
+ // The embedding position is nil for imported interfaces
+ // and also for interface copies after substitution (but
+ // in that case we don't need to report errors again).
+ var pos syntax.Pos // embedding position
+ if ityp.embedPos != nil {
+ pos = (*ityp.embedPos)[i]
+ }
+ var terms termlist
+ switch u := under(typ).(type) {
+ case *Interface:
+ tset := computeInterfaceTypeSet(check, pos, u)
+ if tset.comparable {
+ ityp.tset.comparable = true
+ }
+ for _, m := range tset.methods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+ }
+ terms = tset.terms
+ case *Union:
+ tset := computeUnionTypeSet(check, pos, u)
+ if tset == &invalidTypeSet {
+ continue // ignore invalid unions
+ }
+ terms = tset.terms
+ case *TypeParam:
+ // Embedding stand-alone type parameters is not permitted.
+ // This case is handled during union parsing.
+ unreachable()
+ default:
+ if typ == Typ[Invalid] {
+ continue
+ }
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(pos, "%s is not an interface", typ)
+ continue
+ }
+ terms = termlist{{false, typ}}
+ }
+ // The type set of an interface is the intersection
+ // of the type sets of all its elements.
+ // Intersection cannot produce longer termlists and
+ // thus cannot overflow.
+ allTerms = allTerms.intersect(terms)
+ }
+ ityp.embedPos = nil // not needed anymore (errors have been reported)
+
+ // process todo's (this only happens if check == nil)
+ for i := 0; i < len(todo); i += 2 {
+ m := todo[i]
+ other := todo[i+1]
+ if !Identical(m.typ, other.typ) {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ }
+
+ if methods != nil {
+ sortMethods(methods)
+ ityp.tset.methods = methods
+ }
+ ityp.tset.terms = allTerms
+
+ return ityp.tset
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(&a[j].object) }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// invalidTypeSet is a singleton type set to signal an invalid type set
+// due to an error. It's also a valid empty type set, so consumers of
+// type sets may choose to ignore it.
+var invalidTypeSet TypeSet
+
+// computeUnionTypeSet may be called with check == nil.
+// The result is &invalidTypeSet if the union overflows.
+func computeUnionTypeSet(check *Checker, pos syntax.Pos, utyp *Union) *TypeSet {
+ if utyp.tset != nil {
+ return utyp.tset
+ }
+
+ // avoid infinite recursion (see also computeInterfaceTypeSet)
+ utyp.tset = new(TypeSet)
+
+ var allTerms termlist
+ for _, t := range utyp.terms {
+ var terms termlist
+ switch u := under(t.typ).(type) {
+ case *Interface:
+ terms = computeInterfaceTypeSet(check, pos, u).terms
+ case *TypeParam:
+ // A stand-alone type parameters is not permitted as union term.
+ // This case is handled during union parsing.
+ unreachable()
+ default:
+ if t.typ == Typ[Invalid] {
+ continue
+ }
+ terms = termlist{(*term)(t)}
+ }
+ // The type set of a union expression is the union
+ // of the type sets of each term.
+ allTerms = allTerms.union(terms)
+ if len(allTerms) > maxTermCount {
+ if check != nil {
+ check.errorf(pos, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
+ }
+ utyp.tset = &invalidTypeSet
+ return utyp.tset
+ }
+ }
+ utyp.tset.terms = allTerms
+
+ return utyp.tset
+}
diff --git a/src/cmd/compile/internal/types2/typeset_test.go b/src/cmd/compile/internal/types2/typeset_test.go
new file mode 100644
index 0000000000..0e14d523c8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeset_test.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "testing"
+
+func TestInvalidTypeSet(t *testing.T) {
+ if !invalidTypeSet.IsEmpty() {
+ t.Error("invalidTypeSet is not empty")
+ }
+}
+
+// TODO(gri) add more tests
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
index 40016697b7..cb7cf73a62 100644
--- a/src/cmd/compile/internal/types2/typestring.go
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -39,27 +39,6 @@ func RelativeTo(pkg *Package) Qualifier {
}
}
-// If gcCompatibilityMode is set, printing of types is modified
-// to match the representation of some types in the gc compiler:
-//
-// - byte and rune lose their alias name and simply stand for
-// uint8 and int32 respectively
-// - embedded interfaces get flattened (the embedding info is lost,
-// and certain recursive interface types cannot be printed anymore)
-//
-// This makes it easier to compare packages computed with the type-
-// checker vs packages imported from gc export data.
-//
-// Caution: This flag affects all uses of WriteType, globally.
-// It is only provided for testing in conjunction with
-// gc-generated data.
-//
-// This flag is exported in the x/tools/go/types package. We don't
-// need it at the moment in the std repo and so we don't export it
-// anymore. We should eventually try to remove it altogether.
-// TODO(gri) remove this
-var gcCompatibilityMode bool
-
// TypeString returns the string representation of typ.
// The Qualifier controls the printing of
// package-level objects, and may be nil.
@@ -106,16 +85,6 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
break
}
}
-
- if gcCompatibilityMode {
- // forget the alias names
- switch t.kind {
- case Byte:
- t = Typ[Uint8]
- case Rune:
- t = Typ[Int32]
- }
- }
buf.WriteString(t.name)
case *Array:
@@ -157,80 +126,39 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
buf.WriteString("func")
writeSignature(buf, t, qf, visited)
- case *Sum:
- for i, t := range t.types {
+ case *Union:
+ // Unions only appear as (syntactic) embedded elements
+ // in interfaces and syntactically cannot be empty.
+ if t.Len() == 0 {
+ panic("empty union")
+ }
+ for i, t := range t.terms {
if i > 0 {
- buf.WriteString(", ")
+ buf.WriteByte('|')
}
- writeType(buf, t, qf, visited)
+ if t.tilde {
+ buf.WriteByte('~')
+ }
+ writeType(buf, t.typ, qf, visited)
}
case *Interface:
- // We write the source-level methods and embedded types rather
- // than the actual method set since resolved method signatures
- // may have non-printable cycles if parameters have embedded
- // interface types that (directly or indirectly) embed the
- // current interface. For instance, consider the result type
- // of m:
- //
- // type T interface{
- // m() interface{ T }
- // }
- //
buf.WriteString("interface{")
- empty := true
- if gcCompatibilityMode {
- // print flattened interface
- // (useful to compare against gc-generated interfaces)
- for i, m := range t.allMethods {
- if i > 0 {
- buf.WriteString("; ")
- }
- buf.WriteString(m.name)
- writeSignature(buf, m.typ.(*Signature), qf, visited)
- empty = false
- }
- if !empty && t.allTypes != nil {
- buf.WriteString("; ")
- }
- if t.allTypes != nil {
- buf.WriteString("type ")
- writeType(buf, t.allTypes, qf, visited)
- }
- } else {
- // print explicit interface methods and embedded types
- for i, m := range t.methods {
- if i > 0 {
- buf.WriteString("; ")
- }
- buf.WriteString(m.name)
- writeSignature(buf, m.typ.(*Signature), qf, visited)
- empty = false
- }
- if !empty && t.types != nil {
+ first := true
+ for _, m := range t.methods {
+ if !first {
buf.WriteString("; ")
}
- if t.types != nil {
- buf.WriteString("type ")
- writeType(buf, t.types, qf, visited)
- empty = false
- }
- if !empty && len(t.embeddeds) > 0 {
- buf.WriteString("; ")
- }
- for i, typ := range t.embeddeds {
- if i > 0 {
- buf.WriteString("; ")
- }
- writeType(buf, typ, qf, visited)
- empty = false
- }
+ first = false
+ buf.WriteString(m.name)
+ writeSignature(buf, m.typ.(*Signature), qf, visited)
}
- if t.allMethods == nil || len(t.methods) > len(t.allMethods) {
- if !empty {
- buf.WriteByte(' ')
+ for _, typ := range t.embeddeds {
+ if !first {
+ buf.WriteString("; ")
}
- buf.WriteString("/* incomplete */")
+ first = false
+ writeType(buf, typ, qf, visited)
}
buf.WriteByte('}')
@@ -255,7 +183,7 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
case RecvOnly:
s = "<-chan "
default:
- panic("unreachable")
+ unreachable()
}
buf.WriteString(s)
if parens {
@@ -267,39 +195,40 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
}
case *Named:
+ if t.instance != nil {
+ buf.WriteByte(instanceMarker)
+ }
writeTypeName(buf, t.obj, qf)
if t.targs != nil {
// instantiated type
buf.WriteByte('[')
writeTypeList(buf, t.targs, qf, visited)
buf.WriteByte(']')
- } else if t.tparams != nil {
+ } else if t.TParams().Len() != 0 {
// parameterized type
- writeTParamList(buf, t.tparams, qf, visited)
+ writeTParamList(buf, t.TParams().list(), qf, visited)
}
case *TypeParam:
s := "?"
if t.obj != nil {
+ // Optionally write out package for typeparams (like Named).
+ // TODO(danscales): this is required for import/export, so
+ // we maybe need a separate function that won't be changed
+ // for debugging purposes.
+ if t.obj.pkg != nil {
+ writePackage(buf, t.obj.pkg, qf)
+ }
s = t.obj.name
}
buf.WriteString(s + subscript(t.id))
- case *instance:
- buf.WriteByte(instanceMarker) // indicate "non-evaluated" syntactic instance
- writeTypeName(buf, t.base.obj, qf)
- buf.WriteByte('[')
- writeTypeList(buf, t.targs, qf, visited)
- buf.WriteByte(']')
-
- case *bottom:
- buf.WriteString("⊥")
-
case *top:
buf.WriteString("⊤")
default:
// For externally defined implementations of Type.
+ // Note: In this case cycles won't be caught.
buf.WriteString(t.String())
}
}
@@ -317,23 +246,27 @@ func writeTParamList(buf *bytes.Buffer, list []*TypeName, qf Qualifier, visited
buf.WriteString("[")
var prev Type
for i, p := range list {
- // TODO(gri) support 'any' sugar here.
- var b Type = &emptyInterface
- if t, _ := p.typ.(*TypeParam); t != nil && t.bound != nil {
- b = t.bound
+ // Determine the type parameter and its constraint.
+ // list is expected to hold type parameter names,
+ // but don't crash if that's not the case.
+ tpar, _ := p.typ.(*TypeParam)
+ var bound Type
+ if tpar != nil {
+ bound = tpar.bound // should not be nil but we want to see it if it is
}
+
if i > 0 {
- if b != prev {
- // type bound changed - write previous one before advancing
+ if bound != prev {
+ // bound changed - write previous one before advancing
buf.WriteByte(' ')
writeType(buf, prev, qf, visited)
}
buf.WriteString(", ")
}
- prev = b
+ prev = bound
- if t, _ := p.typ.(*TypeParam); t != nil {
- writeType(buf, t, qf, visited)
+ if tpar != nil {
+ writeType(buf, tpar, qf, visited)
} else {
buf.WriteString(p.name)
}
@@ -346,17 +279,38 @@ func writeTParamList(buf *bytes.Buffer, list []*TypeName, qf Qualifier, visited
}
func writeTypeName(buf *bytes.Buffer, obj *TypeName, qf Qualifier) {
- s := "<Named w/o object>"
- if obj != nil {
- if obj.pkg != nil {
- writePackage(buf, obj.pkg, qf)
+ if obj == nil {
+ buf.WriteString("<Named w/o object>")
+ return
+ }
+ if obj.pkg != nil {
+ writePackage(buf, obj.pkg, qf)
+ }
+ buf.WriteString(obj.name)
+
+ if instanceHashing != 0 {
+ // For local defined types, use the (original!) TypeName's scope
+ // numbers to disambiguate.
+ typ := obj.typ.(*Named)
+ // TODO(gri) Figure out why typ.orig != typ.orig.orig sometimes
+ // and whether the loop can iterate more than twice.
+ // (It seems somehow connected to instance types.)
+ for typ.orig != typ {
+ typ = typ.orig
}
- // TODO(gri): function-local named types should be displayed
- // differently from named types at package level to avoid
- // ambiguity.
- s = obj.name
+ writeScopeNumbers(buf, typ.obj.parent)
+ }
+}
+
+// writeScopeNumbers writes the number sequence for this scope to buf
+// in the form ".i.j.k" where i, j, k, etc. stand for scope numbers.
+// If a scope is nil or has no parent (such as a package scope), nothing
+// is written.
+func writeScopeNumbers(buf *bytes.Buffer, s *Scope) {
+ if s != nil && s.number > 0 {
+ writeScopeNumbers(buf, s.parent)
+ fmt.Fprintf(buf, ".%d", s.number)
}
- buf.WriteString(s)
}
func writeTuple(buf *bytes.Buffer, tup *Tuple, variadic bool, qf Qualifier, visited []Type) {
@@ -379,7 +333,7 @@ func writeTuple(buf *bytes.Buffer, tup *Tuple, variadic bool, qf Qualifier, visi
// special case:
// append(s, "foo"...) leads to signature func([]byte, string...)
if t := asBasic(typ); t == nil || t.kind != String {
- panic("internal error: string type expected")
+ panic("expected string type")
}
writeType(buf, typ, qf, visited)
buf.WriteString("...")
@@ -401,8 +355,8 @@ func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) {
}
func writeSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier, visited []Type) {
- if sig.tparams != nil {
- writeTParamList(buf, sig.tparams, qf, visited)
+ if sig.TParams().Len() != 0 {
+ writeTParamList(buf, sig.TParams().list(), qf, visited)
}
writeTuple(buf, sig.params, sig.variadic, qf, visited)
diff --git a/src/cmd/compile/internal/types2/typestring_test.go b/src/cmd/compile/internal/types2/typestring_test.go
index d98e9a5ade..0ed2934961 100644
--- a/src/cmd/compile/internal/types2/typestring_test.go
+++ b/src/cmd/compile/internal/types2/typestring_test.go
@@ -91,7 +91,8 @@ var independentTestTypes = []testEntry{
dup("interface{}"),
dup("interface{m()}"),
dup(`interface{String() string; m(int) float32}`),
- dup(`interface{type int, float32, complex128}`),
+ dup("interface{int|float32|complex128}"),
+ dup("interface{int|~float32|~complex128}"),
// maps
dup("map[string]int"),
@@ -135,60 +136,6 @@ func TestTypeString(t *testing.T) {
}
}
-var nopos syntax.Pos
-
-func TestIncompleteInterfaces(t *testing.T) {
- sig := NewSignature(nil, nil, nil, false)
- m := NewFunc(nopos, nil, "m", sig)
- for _, test := range []struct {
- typ *Interface
- want string
- }{
- {new(Interface), "interface{/* incomplete */}"},
- {new(Interface).Complete(), "interface{}"},
-
- {NewInterface(nil, nil), "interface{}"},
- {NewInterface(nil, nil).Complete(), "interface{}"},
- {NewInterface([]*Func{}, nil), "interface{}"},
- {NewInterface([]*Func{}, nil).Complete(), "interface{}"},
- {NewInterface(nil, []*Named{}), "interface{}"},
- {NewInterface(nil, []*Named{}).Complete(), "interface{}"},
- {NewInterface([]*Func{m}, nil), "interface{m() /* incomplete */}"},
- {NewInterface([]*Func{m}, nil).Complete(), "interface{m()}"},
- {NewInterface(nil, []*Named{newDefined(new(Interface).Complete())}), "interface{T /* incomplete */}"},
- {NewInterface(nil, []*Named{newDefined(new(Interface).Complete())}).Complete(), "interface{T}"},
- {NewInterface(nil, []*Named{newDefined(NewInterface([]*Func{m}, nil))}), "interface{T /* incomplete */}"},
- {NewInterface(nil, []*Named{newDefined(NewInterface([]*Func{m}, nil).Complete())}), "interface{T /* incomplete */}"},
- {NewInterface(nil, []*Named{newDefined(NewInterface([]*Func{m}, nil).Complete())}).Complete(), "interface{T}"},
-
- {NewInterfaceType(nil, nil), "interface{}"},
- {NewInterfaceType(nil, nil).Complete(), "interface{}"},
- {NewInterfaceType([]*Func{}, nil), "interface{}"},
- {NewInterfaceType([]*Func{}, nil).Complete(), "interface{}"},
- {NewInterfaceType(nil, []Type{}), "interface{}"},
- {NewInterfaceType(nil, []Type{}).Complete(), "interface{}"},
- {NewInterfaceType([]*Func{m}, nil), "interface{m() /* incomplete */}"},
- {NewInterfaceType([]*Func{m}, nil).Complete(), "interface{m()}"},
- {NewInterfaceType(nil, []Type{new(Interface).Complete()}), "interface{interface{} /* incomplete */}"},
- {NewInterfaceType(nil, []Type{new(Interface).Complete()}).Complete(), "interface{interface{}}"},
- {NewInterfaceType(nil, []Type{NewInterfaceType([]*Func{m}, nil)}), "interface{interface{m() /* incomplete */} /* incomplete */}"},
- {NewInterfaceType(nil, []Type{NewInterfaceType([]*Func{m}, nil).Complete()}), "interface{interface{m()} /* incomplete */}"},
- {NewInterfaceType(nil, []Type{NewInterfaceType([]*Func{m}, nil).Complete()}).Complete(), "interface{interface{m()}}"},
- } {
- got := test.typ.String()
- if got != test.want {
- t.Errorf("got: %s, want: %s", got, test.want)
- }
- }
-}
-
-// newDefined creates a new defined type named T with the given underlying type.
-// Helper function for use with TestIncompleteInterfaces only.
-func newDefined(underlying Type) *Named {
- tname := NewTypeName(nopos, nil, "T", nil)
- return NewNamed(tname, underlying, nil)
-}
-
func TestQualifiedTypeString(t *testing.T) {
p, _ := pkgFor("p.go", "package p; type T int", nil)
q, _ := pkgFor("q.go", "package q", nil)
diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go
new file mode 100644
index 0000000000..1d7223f13c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeterm.go
@@ -0,0 +1,166 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+//
+type term struct {
+ tilde bool // valid if typ != nil
+ typ Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !Identical(ux, uy)
+}
diff --git a/src/cmd/compile/internal/types2/typeterm_test.go b/src/cmd/compile/internal/types2/typeterm_test.go
new file mode 100644
index 0000000000..5a5c1fa447
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeterm_test.go
@@ -0,0 +1,239 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "strings"
+ "testing"
+)
+
+var myInt = func() Type {
+ tname := NewTypeName(nopos, nil, "myInt", nil)
+ return NewNamed(tname, Typ[Int], nil)
+}()
+
+var testTerms = map[string]*term{
+ "∅": nil,
+ "𝓤": {},
+ "int": {false, Typ[Int]},
+ "~int": {true, Typ[Int]},
+ "string": {false, Typ[String]},
+ "~string": {true, Typ[String]},
+ "myInt": {false, myInt},
+}
+
+func TestTermString(t *testing.T) {
+ for want, x := range testTerms {
+ if got := x.String(); got != want {
+ t.Errorf("%v.String() == %v; want %v", x, got, want)
+ }
+ }
+}
+
+func split(s string, n int) []string {
+ r := strings.Split(s, " ")
+ if len(r) != n {
+ panic("invalid test case: " + s)
+ }
+ return r
+}
+
+func testTerm(name string) *term {
+ r, ok := testTerms[name]
+ if !ok {
+ panic("invalid test argument: " + name)
+ }
+ return r
+}
+
+func TestTermEqual(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "𝓤 𝓤 T",
+ "int int T",
+ "~int ~int T",
+ "myInt myInt T",
+ "∅ 𝓤 F",
+ "∅ int F",
+ "∅ ~int F",
+ "𝓤 int F",
+ "𝓤 ~int F",
+ "𝓤 myInt F",
+ "int ~int F",
+ "int myInt F",
+ "~int myInt F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ // equal is symmetric
+ x, y = y, x
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermUnion(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅ ∅",
+ "∅ 𝓤 𝓤 ∅",
+ "∅ int int ∅",
+ "∅ ~int ~int ∅",
+ "∅ myInt myInt ∅",
+ "𝓤 𝓤 𝓤 ∅",
+ "𝓤 int 𝓤 ∅",
+ "𝓤 ~int 𝓤 ∅",
+ "𝓤 myInt 𝓤 ∅",
+ "int int int ∅",
+ "int ~int ~int ∅",
+ "int string int string",
+ "int ~string int ~string",
+ "int myInt int myInt",
+ "~int ~string ~int ~string",
+ "~int myInt ~int ∅",
+
+ // union is symmetric, but the result order isn't - repeat symmetric cases explictly
+ "𝓤 ∅ 𝓤 ∅",
+ "int ∅ int ∅",
+ "~int ∅ ~int ∅",
+ "myInt ∅ myInt ∅",
+ "int 𝓤 𝓤 ∅",
+ "~int 𝓤 𝓤 ∅",
+ "myInt 𝓤 𝓤 ∅",
+ "~int int ~int ∅",
+ "string int string int",
+ "~string int ~string int",
+ "myInt int myInt int",
+ "~string ~int ~string ~int",
+ "myInt ~int ~int ∅",
+ } {
+ args := split(test, 4)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want1 := testTerm(args[2])
+ want2 := testTerm(args[3])
+ if got1, got2 := x.union(y); !got1.equal(want1) || !got2.equal(want2) {
+ t.Errorf("%v.union(%v) = %v, %v; want %v, %v", x, y, got1, got2, want1, want2)
+ }
+ }
+}
+
+func TestTermIntersection(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅",
+ "∅ 𝓤 ∅",
+ "∅ int ∅",
+ "∅ ~int ∅",
+ "∅ myInt ∅",
+ "𝓤 𝓤 𝓤",
+ "𝓤 int int",
+ "𝓤 ~int ~int",
+ "𝓤 myInt myInt",
+ "int int int",
+ "int ~int int",
+ "int string ∅",
+ "int ~string ∅",
+ "int string ∅",
+ "~int ~string ∅",
+ "~int myInt myInt",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := testTerm(args[2])
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ // intersect is symmetric
+ x, y = y, x
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermIncludes(t *testing.T) {
+ for _, test := range []string{
+ "∅ int F",
+ "𝓤 int T",
+ "int int T",
+ "~int int T",
+ "~int myInt T",
+ "string int F",
+ "~string int F",
+ "myInt int F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1]).typ
+ want := args[2] == "T"
+ if got := x.includes(y); got != want {
+ t.Errorf("%v.includes(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermSubsetOf(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "𝓤 𝓤 T",
+ "int int T",
+ "~int ~int T",
+ "myInt myInt T",
+ "∅ 𝓤 T",
+ "∅ int T",
+ "∅ ~int T",
+ "∅ myInt T",
+ "𝓤 int F",
+ "𝓤 ~int F",
+ "𝓤 myInt F",
+ "int ~int T",
+ "int myInt F",
+ "~int myInt F",
+ "myInt int F",
+ "myInt ~int T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.subsetOf(y); got != want {
+ t.Errorf("%v.subsetOf(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermDisjoint(t *testing.T) {
+ for _, test := range []string{
+ "int int F",
+ "~int ~int F",
+ "int ~int F",
+ "int string T",
+ "int ~string T",
+ "int myInt T",
+ "~int ~string T",
+ "~int myInt F",
+ "string myInt T",
+ "~string myInt T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ // disjoint is symmetric
+ x, y = y, x
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index e64d804c30..6a9eacd31d 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -10,14 +10,9 @@ import (
"cmd/compile/internal/syntax"
"fmt"
"go/constant"
- "sort"
- "strconv"
"strings"
)
-// Disabled by default, but enabled when running tests (via types_test.go).
-var acceptMethodTypeParams bool
-
// ident type-checks identifier e and initializes x with the value or type of e.
// If an error occurred, x.mode is set to invalid.
// For the meaning of def, see Checker.definedType, below.
@@ -30,7 +25,8 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
// Note that we cannot use check.lookup here because the returned scope
// may be different from obj.Parent(). See also Scope.LookupParent doc.
scope, obj := check.scope.LookupParent(e.Value, check.pos)
- if obj == nil {
+ switch obj {
+ case nil:
if e.Value == "_" {
check.error(e, "cannot use _ as value or type")
} else {
@@ -41,6 +37,16 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
}
}
return
+ case universeAny, universeComparable:
+ if !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(e, "undeclared name: %s (requires version go1.18 or later)", e.Value)
+ return
+ }
+ // If we allow "any" for general use, this if-statement can be removed (issue #33232).
+ if obj == universeAny {
+ check.error(e, "cannot use any outside constraint position")
+ return
+ }
}
check.recordUse(e, obj)
@@ -63,7 +69,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
// If so, mark the respective package as used.
// (This code is only needed for dot-imports. Without them,
// we only have to mark variables, see *Var case below).
- if pkgName := check.dotImportMap[dotImportKey{scope, obj}]; pkgName != nil {
+ if pkgName := check.dotImportMap[dotImportKey{scope, obj.Name()}]; pkgName != nil {
pkgName.used = true
}
@@ -141,18 +147,18 @@ func (check *Checker) varType(e syntax.Expr) Type {
// ordinaryType reports an error if typ is an interface type containing
// type lists or is (or embeds) the predeclared type comparable.
func (check *Checker) ordinaryType(pos syntax.Pos, typ Type) {
- // We don't want to call under() (via Interface) or complete interfaces while we
+ // We don't want to call under() (via asInterface) or complete interfaces while we
// are in the middle of type-checking parameter declarations that might belong to
// interface methods. Delay this check to the end of type-checking.
check.later(func() {
if t := asInterface(typ); t != nil {
- check.completeInterface(pos, t) // TODO(gri) is this the correct position?
- if t.allTypes != nil {
- check.softErrorf(pos, "interface contains type constraints (%s)", t.allTypes)
- return
- }
- if t.IsComparable() {
- check.softErrorf(pos, "interface is (or embeds) comparable")
+ tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position?
+ if !tset.IsMethodSet() {
+ if tset.comparable {
+ check.softErrorf(pos, "interface is (or embeds) comparable")
+ } else {
+ check.softErrorf(pos, "interface contains type constraints")
+ }
}
}
})
@@ -198,238 +204,6 @@ func (check *Checker) genericType(e syntax.Expr, reportErr bool) Type {
return typ
}
-// isubst returns an x with identifiers substituted per the substitution map smap.
-// isubst only handles the case of (valid) method receiver type expressions correctly.
-func isubst(x syntax.Expr, smap map[*syntax.Name]*syntax.Name) syntax.Expr {
- switch n := x.(type) {
- case *syntax.Name:
- if alt := smap[n]; alt != nil {
- return alt
- }
- // case *syntax.StarExpr:
- // X := isubst(n.X, smap)
- // if X != n.X {
- // new := *n
- // new.X = X
- // return &new
- // }
- case *syntax.Operation:
- if n.Op == syntax.Mul && n.Y == nil {
- X := isubst(n.X, smap)
- if X != n.X {
- new := *n
- new.X = X
- return &new
- }
- }
- case *syntax.IndexExpr:
- Index := isubst(n.Index, smap)
- if Index != n.Index {
- new := *n
- new.Index = Index
- return &new
- }
- case *syntax.ListExpr:
- var elems []syntax.Expr
- for i, elem := range n.ElemList {
- new := isubst(elem, smap)
- if new != elem {
- if elems == nil {
- elems = make([]syntax.Expr, len(n.ElemList))
- copy(elems, n.ElemList)
- }
- elems[i] = new
- }
- }
- if elems != nil {
- new := *n
- new.ElemList = elems
- return &new
- }
- case *syntax.ParenExpr:
- return isubst(n.X, smap) // no need to keep parentheses
- default:
- // Other receiver type expressions are invalid.
- // It's fine to ignore those here as they will
- // be checked elsewhere.
- }
- return x
-}
-
-// funcType type-checks a function or method type.
-func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
- check.openScope(ftyp, "function")
- check.scope.isFunc = true
- check.recordScope(ftyp, check.scope)
- sig.scope = check.scope
- defer check.closeScope()
-
- var recvTyp syntax.Expr // rewritten receiver type; valid if != nil
- if recvPar != nil {
- // collect generic receiver type parameters, if any
- // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
- // - the receiver specification acts as local declaration for its type parameters, which may be blank
- _, rname, rparams := check.unpackRecv(recvPar.Type, true)
- if len(rparams) > 0 {
- // Blank identifiers don't get declared and regular type-checking of the instantiated
- // parameterized receiver type expression fails in Checker.collectParams of receiver.
- // Identify blank type parameters and substitute each with a unique new identifier named
- // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
- // name.
- var smap map[*syntax.Name]*syntax.Name // substitution map from "_" to "!n" identifiers
- for i, p := range rparams {
- if p.Value == "_" {
- new := *p
- new.Value = fmt.Sprintf("%d_", i)
- rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
- if smap == nil {
- smap = make(map[*syntax.Name]*syntax.Name)
- }
- smap[p] = &new
- }
- }
- if smap != nil {
- // blank identifiers were found => use rewritten receiver type
- recvTyp = isubst(recvPar.Type, smap)
- }
- // TODO(gri) rework declareTypeParams
- sig.rparams = nil
- for _, rparam := range rparams {
- sig.rparams = check.declareTypeParam(sig.rparams, rparam)
- }
- // determine receiver type to get its type parameters
- // and the respective type parameter bounds
- var recvTParams []*TypeName
- if rname != nil {
- // recv should be a Named type (otherwise an error is reported elsewhere)
- // Also: Don't report an error via genericType since it will be reported
- // again when we type-check the signature.
- // TODO(gri) maybe the receiver should be marked as invalid instead?
- if recv := asNamed(check.genericType(rname, false)); recv != nil {
- recvTParams = recv.tparams
- }
- }
- // provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if len(sig.rparams) == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, len(sig.rparams))
- for i, t := range sig.rparams {
- list[i] = t.typ
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tname := range sig.rparams {
- bound := recvTParams[i].typ.(*TypeParam).bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- // TODO(gri) should we assume now that bounds always exist?
- // (no bound == empty interface)
- if bound != nil {
- bound = check.subst(tname.pos, bound, smap)
- tname.typ.(*TypeParam).bound = bound
- }
- }
- }
- }
- }
-
- if tparams != nil {
- sig.tparams = check.collectTypeParams(tparams)
- // Always type-check method type parameters but complain if they are not enabled.
- // (A separate check is needed when type-checking interface method signatures because
- // they don't have a receiver specification.)
- if recvPar != nil && !acceptMethodTypeParams {
- check.error(ftyp, "methods cannot have type parameters")
- }
- }
-
- // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
- // declarations and then squash that scope into the parent scope (and report any redeclarations at
- // that time).
- scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
- var recvList []*Var // TODO(gri) remove the need for making a list here
- if recvPar != nil {
- recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, recvTyp, false) // use rewritten receiver type, if any
- }
- params, variadic := check.collectParams(scope, ftyp.ParamList, nil, true)
- results, _ := check.collectParams(scope, ftyp.ResultList, nil, false)
- scope.Squash(func(obj, alt Object) {
- var err error_
- err.errorf(obj, "%s redeclared in this block", obj.Name())
- err.recordAltDecl(alt)
- check.report(&err)
- })
-
- if recvPar != nil {
- // recv parameter list present (may be empty)
- // spec: "The receiver is specified via an extra parameter section preceding the
- // method name. That parameter section must declare a single parameter, the receiver."
- var recv *Var
- switch len(recvList) {
- case 0:
- // error reported by resolver
- recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
- default:
- // more than one receiver
- check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
- fallthrough // continue with first receiver
- case 1:
- recv = recvList[0]
- }
-
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
- rtyp = expand(rtyp)
-
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if t := rtyp; t != Typ[Invalid] {
- var err string
- if T := asNamed(t); T != nil {
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
- if check.conf.CompilerErrorMessages {
- check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
- err = ""
- }
- } else {
- switch u := optype(T).(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
- }
- case *Pointer, *Interface:
- err = "pointer or interface type"
- }
- }
- } else if T := asBasic(t); T != nil {
- err = "basic or unnamed type"
- if check.conf.CompilerErrorMessages {
- check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
- err = ""
- }
- } else {
- check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
- }
- if err != "" {
- check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
- // ok to continue
- }
- }
- sig.recv = recv
- }
-
- sig.params = NewTuple(params...)
- sig.results = NewTuple(results...)
- sig.variadic = variadic
-}
-
// goTypeName returns the Go type name for typ and
// removes any occurrences of "types2." from that name.
func goTypeName(typ Type) string {
@@ -647,44 +421,32 @@ func (check *Checker) typOrNil(e syntax.Expr) Type {
return Typ[Invalid]
}
-func (check *Checker) instantiatedType(x syntax.Expr, targs []syntax.Expr, def *Named) Type {
- b := check.genericType(x, true) // TODO(gri) what about cycles?
- if b == Typ[Invalid] {
- return b // error already reported
+func (check *Checker) instantiatedType(x syntax.Expr, targsx []syntax.Expr, def *Named) Type {
+ base := check.genericType(x, true)
+ if base == Typ[Invalid] {
+ return base // error already reported
}
- base := asNamed(b)
- if base == nil {
- unreachable() // should have been caught by genericType
- }
-
- // create a new type instance rather than instantiate the type
- // TODO(gri) should do argument number check here rather than
- // when instantiating the type?
- typ := new(instance)
- def.setUnderlying(typ)
-
- typ.check = check
- typ.pos = x.Pos()
- typ.base = base
- // evaluate arguments (always)
- typ.targs = check.typeList(targs)
- if typ.targs == nil {
+ // evaluate arguments
+ targs := check.typeList(targsx)
+ if targs == nil {
def.setUnderlying(Typ[Invalid]) // avoid later errors due to lazy instantiation
return Typ[Invalid]
}
- // determine argument positions (for error reporting)
- typ.poslist = make([]syntax.Pos, len(targs))
- for i, arg := range targs {
- typ.poslist[i] = syntax.StartPos(arg)
+ // determine argument positions
+ posList := make([]syntax.Pos, len(targs))
+ for i, arg := range targsx {
+ posList[i] = syntax.StartPos(arg)
}
+ typ := check.InstantiateLazy(x.Pos(), base, targs, posList, true)
+ def.setUnderlying(typ)
+
// make sure we check instantiation works at least once
// and that the resulting type is valid
check.later(func() {
- t := typ.expand()
- check.validType(t, nil)
+ check.validType(typ, nil)
})
return typ
@@ -732,537 +494,3 @@ func (check *Checker) typeList(list []syntax.Expr) []Type {
}
return res
}
-
-// collectParams declares the parameters of list in scope and returns the corresponding
-// variable list. If type0 != nil, it is used instead of the first type in list.
-func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 syntax.Expr, variadicOk bool) (params []*Var, variadic bool) {
- if list == nil {
- return
- }
-
- var named, anonymous bool
-
- var typ Type
- var prev syntax.Expr
- for i, field := range list {
- ftype := field.Type
- // type-check type of grouped fields only once
- if ftype != prev {
- prev = ftype
- if i == 0 && type0 != nil {
- ftype = type0
- }
- if t, _ := ftype.(*syntax.DotsType); t != nil {
- ftype = t.Elem
- if variadicOk && i == len(list)-1 {
- variadic = true
- } else {
- check.softErrorf(t, "can only use ... with final parameter in list")
- // ignore ... and continue
- }
- }
- typ = check.varType(ftype)
- }
- // The parser ensures that f.Tag is nil and we don't
- // care if a constructed AST contains a non-nil tag.
- if field.Name != nil {
- // named parameter
- name := field.Name.Value
- if name == "" {
- check.error(field.Name, invalidAST+"anonymous parameter")
- // ok to continue
- }
- par := NewParam(field.Name.Pos(), check.pkg, name, typ)
- check.declare(scope, field.Name, par, scope.pos)
- params = append(params, par)
- named = true
- } else {
- // anonymous parameter
- par := NewParam(ftype.Pos(), check.pkg, "", typ)
- check.recordImplicit(field, par)
- params = append(params, par)
- anonymous = true
- }
- }
-
- if named && anonymous {
- check.error(list[0], invalidAST+"list contains both named and anonymous parameters")
- // ok to continue
- }
-
- // For a variadic function, change the last parameter's type from T to []T.
- // Since we type-checked T rather than ...T, we also need to retro-actively
- // record the type for ...T.
- if variadic {
- last := params[len(params)-1]
- last.typ = &Slice{elem: last.typ}
- check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
- }
-
- return
-}
-
-func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
- if alt := oset.insert(obj); alt != nil {
- var err error_
- err.errorf(pos, "%s redeclared", obj.Name())
- err.recordAltDecl(alt)
- check.report(&err)
- return false
- }
- return true
-}
-
-func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
- var tname *syntax.Name // most recent "type" name
- var types []syntax.Expr
- for _, f := range iface.MethodList {
- if f.Name != nil {
- // We have a method with name f.Name, or a type
- // of a type list (f.Name.Value == "type").
- name := f.Name.Value
- if name == "_" {
- if check.conf.CompilerErrorMessages {
- check.error(f.Name, "methods must have a unique non-blank name")
- } else {
- check.error(f.Name, "invalid method name _")
- }
- continue // ignore
- }
-
- if name == "type" {
- // Always collect all type list entries, even from
- // different type lists, under the assumption that
- // the author intended to include all types.
- types = append(types, f.Type)
- if tname != nil && tname != f.Name {
- check.error(f.Name, "cannot have multiple type lists in an interface")
- }
- tname = f.Name
- continue
- }
-
- typ := check.typ(f.Type)
- sig, _ := typ.(*Signature)
- if sig == nil {
- if typ != Typ[Invalid] {
- check.errorf(f.Type, invalidAST+"%s is not a method signature", typ)
- }
- continue // ignore
- }
-
- // Always type-check method type parameters but complain if they are not enabled.
- // (This extra check is needed here because interface method signatures don't have
- // a receiver specification.)
- if sig.tparams != nil && !acceptMethodTypeParams {
- check.error(f.Type, "methods cannot have type parameters")
- }
-
- // use named receiver type if available (for better error messages)
- var recvTyp Type = ityp
- if def != nil {
- recvTyp = def
- }
- sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
-
- m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
- check.recordDef(f.Name, m)
- ityp.methods = append(ityp.methods, m)
- } else {
- // We have an embedded type. completeInterface will
- // eventually verify that we have an interface.
- ityp.embeddeds = append(ityp.embeddeds, check.typ(f.Type))
- check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
- }
- }
-
- // type constraints
- ityp.types = NewSum(check.collectTypeConstraints(iface.Pos(), types))
-
- if len(ityp.methods) == 0 && ityp.types == nil && len(ityp.embeddeds) == 0 {
- // empty interface
- ityp.allMethods = markComplete
- return
- }
-
- // sort for API stability
- sortMethods(ityp.methods)
- sortTypes(ityp.embeddeds)
-
- check.later(func() { check.completeInterface(iface.Pos(), ityp) })
-}
-
-func (check *Checker) completeInterface(pos syntax.Pos, ityp *Interface) {
- if ityp.allMethods != nil {
- return
- }
-
- // completeInterface may be called via the LookupFieldOrMethod,
- // MissingMethod, Identical, or IdenticalIgnoreTags external API
- // in which case check will be nil. In this case, type-checking
- // must be finished and all interfaces should have been completed.
- if check == nil {
- panic("internal error: incomplete interface")
- }
-
- if check.conf.Trace {
- // Types don't generally have position information.
- // If we don't have a valid pos provided, try to use
- // one close enough.
- if !pos.IsKnown() && len(ityp.methods) > 0 {
- pos = ityp.methods[0].pos
- }
-
- check.trace(pos, "complete %s", ityp)
- check.indent++
- defer func() {
- check.indent--
- check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
- }()
- }
-
- // An infinitely expanding interface (due to a cycle) is detected
- // elsewhere (Checker.validType), so here we simply assume we only
- // have valid interfaces. Mark the interface as complete to avoid
- // infinite recursion if the validType check occurs later for some
- // reason.
- ityp.allMethods = markComplete
-
- // Methods of embedded interfaces are collected unchanged; i.e., the identity
- // of a method I.m's Func Object of an interface I is the same as that of
- // the method m in an interface that embeds interface I. On the other hand,
- // if a method is embedded via multiple overlapping embedded interfaces, we
- // don't provide a guarantee which "original m" got chosen for the embedding
- // interface. See also issue #34421.
- //
- // If we don't care to provide this identity guarantee anymore, instead of
- // reusing the original method in embeddings, we can clone the method's Func
- // Object and give it the position of a corresponding embedded interface. Then
- // we can get rid of the mpos map below and simply use the cloned method's
- // position.
-
- var seen objset
- var methods []*Func
- mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
- addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- mpos[m] = pos
- case explicit:
- var err error_
- err.errorf(pos, "duplicate method %s", m.name)
- err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
- check.report(&err)
- default:
- // We have a duplicate method name in an embedded (not explicitly declared) method.
- // Check method signatures after all types are computed (issue #33656).
- // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
- // error here as well (even though we could do it eagerly) because it's the same
- // error message.
- check.later(func() {
- if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
- var err error_
- err.errorf(pos, "duplicate method %s", m.name)
- err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
- check.report(&err)
- }
- })
- }
- }
-
- for _, m := range ityp.methods {
- addMethod(m.pos, m, true)
- }
-
- // collect types
- allTypes := ityp.types
-
- posList := check.posMap[ityp]
- for i, typ := range ityp.embeddeds {
- pos := posList[i] // embedding position
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- var format string
- if _, ok := utyp.(*TypeParam); ok {
- format = "%s is a type parameter, not an interface"
- } else {
- format = "%s is not an interface"
- }
- check.errorf(pos, format, typ)
- }
- continue
- }
- check.completeInterface(pos, etyp)
- for _, m := range etyp.allMethods {
- addMethod(pos, m, false) // use embedding position pos rather than m.pos
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- if methods != nil {
- sortMethods(methods)
- ityp.allMethods = methods
- }
- ityp.allTypes = allTypes
-}
-
-// intersect computes the intersection of the types x and y.
-// Note: A incomming nil type stands for the top type. A top
-// type result is returned as nil.
-func intersect(x, y Type) (r Type) {
- defer func() {
- if r == theTop {
- r = nil
- }
- }()
-
- switch {
- case x == theBottom || y == theBottom:
- return theBottom
- case x == nil || x == theTop:
- return y
- case y == nil || x == theTop:
- return x
- }
-
- xtypes := unpack(x)
- ytypes := unpack(y)
- // Compute the list rtypes which includes only
- // types that are in both xtypes and ytypes.
- // Quadratic algorithm, but good enough for now.
- // TODO(gri) fix this
- var rtypes []Type
- for _, x := range xtypes {
- if includes(ytypes, x) {
- rtypes = append(rtypes, x)
- }
- }
-
- if rtypes == nil {
- return theBottom
- }
- return NewSum(rtypes)
-}
-
-func sortTypes(list []Type) {
- sort.Stable(byUniqueTypeName(list))
-}
-
-// byUniqueTypeName named type lists can be sorted by their unique type names.
-type byUniqueTypeName []Type
-
-func (a byUniqueTypeName) Len() int { return len(a) }
-func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
-func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func sortName(t Type) string {
- if named := asNamed(t); named != nil {
- return named.obj.Id()
- }
- return ""
-}
-
-func sortMethods(list []*Func) {
- sort.Sort(byUniqueMethodName(list))
-}
-
-func assertSortedMethods(list []*Func) {
- if !debug {
- panic("internal error: assertSortedMethods called outside debug mode")
- }
- if !sort.IsSorted(byUniqueMethodName(list)) {
- panic("internal error: methods not sorted")
- }
-}
-
-// byUniqueMethodName method lists can be sorted by their unique method names.
-type byUniqueMethodName []*Func
-
-func (a byUniqueMethodName) Len() int { return len(a) }
-func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(a[j]) }
-func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (check *Checker) tag(t *syntax.BasicLit) string {
- // If t.Bad, an error was reported during parsing.
- if t != nil && !t.Bad {
- if t.Kind == syntax.StringLit {
- if val, err := strconv.Unquote(t.Value); err == nil {
- return val
- }
- }
- check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value)
- }
- return ""
-}
-
-func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
- if e.FieldList == nil {
- return
- }
-
- // struct fields and tags
- var fields []*Var
- var tags []string
-
- // for double-declaration checks
- var fset objset
-
- // current field typ and tag
- var typ Type
- var tag string
- add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
- if tag != "" && tags == nil {
- tags = make([]string, len(fields))
- }
- if tags != nil {
- tags = append(tags, tag)
- }
-
- name := ident.Value
- fld := NewField(pos, check.pkg, name, typ, embedded)
- // spec: "Within a struct, non-blank field names must be unique."
- if name == "_" || check.declareInSet(&fset, pos, fld) {
- fields = append(fields, fld)
- check.recordDef(ident, fld)
- }
- }
-
- // addInvalid adds an embedded field of invalid type to the struct for
- // fields with errors; this keeps the number of struct fields in sync
- // with the source as long as the fields are _ or have different names
- // (issue #25627).
- addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
- typ = Typ[Invalid]
- tag = ""
- add(ident, true, pos)
- }
-
- var prev syntax.Expr
- for i, f := range e.FieldList {
- // Fields declared syntactically with the same type (e.g.: a, b, c T)
- // share the same type expression. Only check type if it's a new type.
- if i == 0 || f.Type != prev {
- typ = check.varType(f.Type)
- prev = f.Type
- }
- tag = ""
- if i < len(e.TagList) {
- tag = check.tag(e.TagList[i])
- }
- if f.Name != nil {
- // named field
- add(f.Name, false, f.Name.Pos())
- } else {
- // embedded field
- // spec: "An embedded type must be specified as a type name T or as a
- // pointer to a non-interface type name *T, and T itself may not be a
- // pointer type."
- pos := syntax.StartPos(f.Type)
- name := embeddedFieldIdent(f.Type)
- if name == nil {
- check.errorf(pos, "invalid embedded field type %s", f.Type)
- name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
- addInvalid(name, pos)
- continue
- }
- add(name, true, pos)
-
- // Because we have a name, typ must be of the form T or *T, where T is the name
- // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
- // We must delay this check to the end because we don't want to instantiate
- // (via under(t)) a possibly incomplete type.
- embeddedTyp := typ // for closure below
- embeddedPos := pos
- check.later(func() {
- t, isPtr := deref(embeddedTyp)
- switch t := optype(t).(type) {
- case *Basic:
- if t == Typ[Invalid] {
- // error was reported before
- return
- }
- // unsafe.Pointer is treated like a regular pointer
- if t.kind == UnsafePointer {
- check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
- }
- case *Pointer:
- check.error(embeddedPos, "embedded field type cannot be a pointer")
- case *Interface:
- if isPtr {
- check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
- }
- }
- })
- }
- }
-
- styp.fields = fields
- styp.tags = tags
-}
-
-func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
- switch e := e.(type) {
- case *syntax.Name:
- return e
- case *syntax.Operation:
- if base := ptrBase(e); base != nil {
- // *T is valid, but **T is not
- if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
- return embeddedFieldIdent(e.X)
- }
- }
- case *syntax.SelectorExpr:
- return e.Sel
- case *syntax.IndexExpr:
- return embeddedFieldIdent(e.X)
- }
- return nil // invalid embedded field
-}
-
-func (check *Checker) collectTypeConstraints(pos syntax.Pos, types []syntax.Expr) []Type {
- list := make([]Type, 0, len(types)) // assume all types are correct
- for _, texpr := range types {
- if texpr == nil {
- check.error(pos, invalidAST+"missing type constraint")
- continue
- }
- list = append(list, check.varType(texpr))
- }
-
- // Ensure that each type is only present once in the type list. Types may be
- // interfaces, which may not be complete yet. It's ok to do this check at the
- // end because it's not a requirement for correctness of the code.
- // Note: This is a quadratic algorithm, but type lists tend to be short.
- check.later(func() {
- for i, t := range list {
- if t := asInterface(t); t != nil {
- check.completeInterface(types[i].Pos(), t)
- }
- if includes(list[:i], t) {
- check.softErrorf(types[i], "duplicate type %s in type list", t)
- }
- }
- })
-
- return list
-}
-
-// includes reports whether typ is in list
-func includes(list []Type, typ Type) bool {
- for _, e := range list {
- if Identical(typ, e) {
- return true
- }
- }
- return false
-}
-
-func ptrBase(x *syntax.Operation) syntax.Expr {
- if x.Op == syntax.Mul && x.Y == nil {
- return x.X
- }
- return nil
-}
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index e1832bbb2a..ae81382fb0 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -6,7 +6,10 @@
package types2
-import "bytes"
+import (
+ "bytes"
+ "fmt"
+)
// The unifier maintains two separate sets of type parameters x and y
// which are used to resolve type parameters in the x and y arguments
@@ -34,7 +37,6 @@ import "bytes"
// and the respective types inferred for each type parameter.
// A unifier is created by calling newUnifier.
type unifier struct {
- check *Checker
exact bool
x, y tparamsList // x and y must initialized via tparamsList.init
types []Type // inferred types, shared by x and y
@@ -45,8 +47,8 @@ type unifier struct {
// exactly. If exact is not set, a named type's underlying type
// is considered if unification would fail otherwise, and the
// direction of channels is ignored.
-func newUnifier(check *Checker, exact bool) *unifier {
- u := &unifier{check: check, exact: exact}
+func newUnifier(exact bool) *unifier {
+ u := &unifier{exact: exact}
u.x.unifier = u
u.y.unifier = u
return u
@@ -148,10 +150,17 @@ func (u *unifier) join(i, j int) bool {
// If typ is a type parameter of d, index returns the type parameter index.
// Otherwise, the result is < 0.
func (d *tparamsList) index(typ Type) int {
- if t, ok := typ.(*TypeParam); ok {
- if i := t.index; i < len(d.tparams) && d.tparams[i].typ == t {
- return i
- }
+ if tpar, ok := typ.(*TypeParam); ok {
+ return tparamIndex(d.tparams, tpar)
+ }
+ return -1
+}
+
+// If tpar is a type parameter in list, tparamIndex returns the type parameter index.
+// Otherwise, the result is < 0. tpar must not be nil.
+func tparamIndex(list []*TypeName, tpar *TypeParam) int {
+ if i := tpar.index; i < len(list) && list[i].typ == tpar {
+ return i
}
return -1
}
@@ -220,10 +229,6 @@ func (u *unifier) nifyEq(x, y Type, p *ifacePair) bool {
// code the corresponding changes should be made here.
// Must not be called directly from outside the unifier.
func (u *unifier) nify(x, y Type, p *ifacePair) bool {
- // types must be expanded for comparison
- x = expand(x)
- y = expand(y)
-
if !u.exact {
// If exact unification is known to fail because we attempt to
// match a type name against an unnamed type literal, consider
@@ -352,25 +357,18 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
u.nify(x.results, y.results, p)
}
- case *Sum:
- // This should not happen with the current internal use of sum types.
- panic("type inference across sum types not implemented")
-
case *Interface:
// Two interface types are identical if they have the same set of methods with
// the same names and identical function types. Lower-case method names from
// different packages are always different. The order of the methods is irrelevant.
if y, ok := y.(*Interface); ok {
- // If identical0 is called (indirectly) via an external API entry point
- // (such as Identical, IdenticalIgnoreTags, etc.), check is nil. But in
- // that case, interfaces are expected to be complete and lazy completion
- // here is not needed.
- if u.check != nil {
- u.check.completeInterface(nopos, x)
- u.check.completeInterface(nopos, y)
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if !xset.terms.equal(yset.terms) {
+ return false
}
- a := x.allMethods
- b := y.allMethods
+ a := xset.methods
+ b := yset.methods
if len(a) == len(b) {
// Interface types are the only types where cycles can occur
// that are not "terminated" via named types; and such cycles
@@ -434,6 +432,8 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
// return x.obj == y.obj
// }
if y, ok := y.(*Named); ok {
+ x.expand()
+ y.expand()
// TODO(gri) This is not always correct: two types may have the same names
// in the same package if one of them is nested in a function.
// Extremely unlikely but we need an always correct solution.
@@ -454,15 +454,11 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
// are identical if they originate in the same declaration.
return x == y
- // case *instance:
- // unreachable since types are expanded
-
case nil:
// avoid a crash in case of nil type
default:
- u.check.dump("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams)
- unreachable()
+ panic(fmt.Sprintf("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
}
return false
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
new file mode 100644
index 0000000000..f61c37a6af
--- /dev/null
+++ b/src/cmd/compile/internal/types2/union.go
@@ -0,0 +1,150 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Union represents a union of terms embedded in an interface.
+type Union struct {
+ terms []*Term // list of syntactical terms (not a canonicalized termlist)
+ tset *TypeSet // type set described by this union, computed lazily
+}
+
+// NewUnion returns a new Union type with the given terms.
+// It is an error to create an empty union; they are syntactically not possible.
+func NewUnion(terms []*Term) *Union {
+ if len(terms) == 0 {
+ panic("empty union")
+ }
+ return &Union{terms, nil}
+}
+
+func (u *Union) Len() int { return len(u.terms) }
+func (u *Union) Term(i int) *Term { return u.terms[i] }
+
+func (u *Union) Underlying() Type { return u }
+func (u *Union) String() string { return TypeString(u, nil) }
+
+// A Term represents a term in a Union.
+type Term term
+
+// NewTerm returns a new union term.
+func NewTerm(tilde bool, typ Type) *Term { return &Term{tilde, typ} }
+
+func (t *Term) Tilde() bool { return t.tilde }
+func (t *Term) Type() Type { return t.typ }
+func (t *Term) String() string { return (*term)(t).String() }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// Avoid excessive type-checking times due to quadratic termlist operations.
+const maxTermCount = 100
+
+// parseUnion parses the given list of type expressions tlist as a union of
+// those expressions. The result is a Union type, or Typ[Invalid] for some
+// errors.
+func parseUnion(check *Checker, tlist []syntax.Expr) Type {
+ var terms []*Term
+ for _, x := range tlist {
+ tilde, typ := parseTilde(check, x)
+ if len(tlist) == 1 && !tilde {
+ return typ // single type (optimization)
+ }
+ if len(terms) >= maxTermCount {
+ check.errorf(x, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
+ return Typ[Invalid]
+ }
+ terms = append(terms, NewTerm(tilde, typ))
+ }
+
+ // Check validity of terms.
+ // Do this check later because it requires types to be set up.
+ // Note: This is a quadratic algorithm, but unions tend to be short.
+ check.later(func() {
+ for i, t := range terms {
+ if t.typ == Typ[Invalid] {
+ continue
+ }
+
+ x := tlist[i]
+ pos := syntax.StartPos(x)
+ // We may not know the position of x if it was a typechecker-
+ // introduced ~T term for a type list entry T. Use the position
+ // of T instead.
+ // TODO(gri) remove this test once we don't support type lists anymore
+ if !pos.IsKnown() {
+ if op, _ := x.(*syntax.Operation); op != nil {
+ pos = syntax.StartPos(op.X)
+ }
+ }
+
+ u := under(t.typ)
+ f, _ := u.(*Interface)
+ if t.tilde {
+ if f != nil {
+ check.errorf(x, "invalid use of ~ (%s is an interface)", t.typ)
+ continue // don't report another error for t
+ }
+
+ if !Identical(u, t.typ) {
+ check.errorf(x, "invalid use of ~ (underlying type of %s is %s)", t.typ, u)
+ continue // don't report another error for t
+ }
+ }
+
+ // Stand-alone embedded interfaces are ok and are handled by the single-type case
+ // in the beginning. Embedded interfaces with tilde are excluded above. If we reach
+ // here, we must have at least two terms in the union.
+ if f != nil && !f.typeSet().IsTypeSet() {
+ check.errorf(pos, "cannot use %s in union (interface contains methods)", t)
+ continue // don't report another error for t
+ }
+
+ // Report overlapping (non-disjoint) terms such as
+ // a|a, a|~a, ~a|~a, and ~a|A (where under(A) == a).
+ if j := overlappingTerm(terms[:i], t); j >= 0 {
+ check.softErrorf(pos, "overlapping terms %s and %s", t, terms[j])
+ }
+ }
+ })
+
+ return &Union{terms, nil}
+}
+
+func parseTilde(check *Checker, x syntax.Expr) (tilde bool, typ Type) {
+ if op, _ := x.(*syntax.Operation); op != nil && op.Op == syntax.Tilde {
+ x = op.X
+ tilde = true
+ }
+ typ = check.anyType(x)
+ // embedding stand-alone type parameters is not permitted (issue #47127).
+ if _, ok := under(typ).(*TypeParam); ok {
+ check.error(x, "cannot embed a type parameter")
+ typ = Typ[Invalid]
+ }
+ return
+}
+
+// overlappingTerm reports the index of the term x in terms which is
+// overlapping (not disjoint) from y. The result is < 0 if there is no
+// such term.
+func overlappingTerm(terms []*Term, y *Term) int {
+ for i, x := range terms {
+ // disjoint requires non-nil, non-top arguments
+ if debug {
+ if x == nil || x.typ == nil || y == nil || y.typ == nil {
+ panic("empty or top union term")
+ }
+ }
+ if !(*term)(x).disjoint((*term)(y)) {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
index 76d4e55e84..f14c079222 100644
--- a/src/cmd/compile/internal/types2/universe.go
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -20,11 +20,12 @@ var Universe *Scope
var Unsafe *Package
var (
- universeIota *Const
- universeByte *Basic // uint8 alias, but has name "byte"
- universeRune *Basic // int32 alias, but has name "rune"
- universeAny *Interface
- universeError *Named
+ universeIota Object
+ universeByte Type // uint8 alias, but has name "byte"
+ universeRune Type // int32 alias, but has name "rune"
+ universeAny Object
+ universeError Type
+ universeComparable Object
)
// Typ contains the predeclared *Basic types indexed by their
@@ -77,20 +78,30 @@ func defPredeclaredTypes() {
def(NewTypeName(nopos, nil, t.name, t))
}
- // any
- // (Predeclared and entered into universe scope so we do all the
- // usual checks; but removed again from scope later since it's
- // only visible as constraint in a type parameter list.)
+ // type any = interface{}
def(NewTypeName(nopos, nil, "any", &emptyInterface))
- // Error has a nil package in its qualified name since it is in no package
+ // type error interface{ Error() string }
{
+ obj := NewTypeName(nopos, nil, "error", nil)
+ obj.setColor(black)
res := NewVar(nopos, nil, "", Typ[String])
- sig := &Signature{results: NewTuple(res)}
+ sig := NewSignature(nil, nil, NewTuple(res), false)
err := NewFunc(nopos, nil, "Error", sig)
- typ := &Named{underlying: NewInterfaceType([]*Func{err}, nil).Complete()}
+ ityp := &Interface{obj, []*Func{err}, nil, nil, true, nil}
+ computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset
+ typ := NewNamed(obj, ityp, nil)
sig.recv = NewVar(nopos, nil, "", typ)
- def(NewTypeName(nopos, nil, "error", typ))
+ def(obj)
+ }
+
+ // type comparable interface{ /* type set marked comparable */ }
+ {
+ obj := NewTypeName(nopos, nil, "comparable", nil)
+ obj.setColor(black)
+ ityp := &Interface{obj, nil, nil, nil, true, &TypeSet{true, nil, allTermlist}}
+ NewNamed(obj, ityp, nil)
+ def(obj)
}
}
@@ -200,33 +211,6 @@ func DefPredeclaredTestFuncs() {
def(newBuiltin(_Trace))
}
-func defPredeclaredComparable() {
- // The "comparable" interface can be imagined as defined like
- //
- // type comparable interface {
- // == () untyped bool
- // != () untyped bool
- // }
- //
- // == and != cannot be user-declared but we can declare
- // a magic method == and check for its presence when needed.
-
- // Define interface { == () }. We don't care about the signature
- // for == so leave it empty except for the receiver, which is
- // set up later to match the usual interface method assumptions.
- sig := new(Signature)
- eql := NewFunc(nopos, nil, "==", sig)
- iface := NewInterfaceType([]*Func{eql}, nil).Complete()
-
- // set up the defined type for the interface
- obj := NewTypeName(nopos, nil, "comparable", nil)
- named := NewNamed(obj, iface, nil)
- obj.color_ = black
- sig.recv = NewVar(nopos, nil, "", named) // complete == signature
-
- def(obj)
-}
-
func init() {
Universe = NewScope(nil, nopos, nopos, "universe")
Unsafe = NewPackage("unsafe", "unsafe")
@@ -236,16 +220,13 @@ func init() {
defPredeclaredConsts()
defPredeclaredNil()
defPredeclaredFuncs()
- defPredeclaredComparable()
-
- universeIota = Universe.Lookup("iota").(*Const)
- universeByte = Universe.Lookup("byte").(*TypeName).typ.(*Basic)
- universeRune = Universe.Lookup("rune").(*TypeName).typ.(*Basic)
- universeAny = Universe.Lookup("any").(*TypeName).typ.(*Interface)
- universeError = Universe.Lookup("error").(*TypeName).typ.(*Named)
- // "any" is only visible as constraint in a type parameter list
- delete(Universe.elems, "any")
+ universeIota = Universe.Lookup("iota")
+ universeByte = Universe.Lookup("byte").Type()
+ universeRune = Universe.Lookup("rune").Type()
+ universeAny = Universe.Lookup("any")
+ universeError = Universe.Lookup("error").Type()
+ universeComparable = Universe.Lookup("comparable")
}
// Objects with names containing blanks are internal and not entered into
@@ -277,6 +258,6 @@ func def(obj Object) {
}
}
if scope.Insert(obj) != nil {
- panic("internal error: double declaration")
+ panic("double declaration of predeclared identifier")
}
}
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
index 6d697a53ae..d4c1aafdc1 100644
--- a/src/cmd/compile/internal/walk/assign.go
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -429,6 +429,7 @@ func readsMemory(n ir.Node) bool {
ir.OBITNOT,
ir.OCONV,
ir.OCONVIFACE,
+ ir.OCONVIDATA,
ir.OCONVNOP,
ir.ODIV,
ir.ODOT,
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index 14efc05e32..af4f8f4822 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -641,16 +641,9 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
return walkStmt(typecheck.Stmt(r))
}
-// walkRecover walks an ORECOVER node.
-func walkRecover(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
- // Call gorecover with the FP of this frame.
- // FP is equal to caller's SP plus FixedFrameSize().
- var fp ir.Node = mkcall("getcallersp", types.Types[types.TUINTPTR], init)
- if off := base.Ctxt.FixedFrameSize(); off != 0 {
- fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
- }
- fp = ir.NewConvExpr(fp.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
- return mkcall("gorecover", nn.Type(), init, fp)
+// walkRecover walks an ORECOVERFP node.
+func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
}
func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
index 2194e1c5b0..902e01ef38 100644
--- a/src/cmd/compile/internal/walk/closure.go
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -37,14 +37,6 @@ func directClosureCall(n *ir.CallExpr) {
return // leave for walkClosure to handle
}
- // If wrapGoDefer() in the order phase has flagged this call,
- // avoid eliminating the closure even if there is a direct call to
- // (the closure is needed to simplify the register ABI). See
- // wrapGoDefer for more details.
- if n.PreserveClosure {
- return
- }
-
// We are going to insert captured variables before input args.
var params []*types.Field
var decls []*ir.Name
@@ -122,6 +114,9 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
clos.SetEsc(clo.Esc())
clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...)
+ for i, value := range clos.List {
+ clos.List[i] = ir.NewStructKeyExpr(base.Pos, typ.Field(i), value)
+ }
addr := typecheck.NodAddr(clos)
addr.SetEsc(clo.Esc())
@@ -161,7 +156,7 @@ func closureArgs(clo *ir.ClosureExpr) []ir.Node {
return args
}
-func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+func walkMethodValue(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
@@ -175,18 +170,16 @@ func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
n.X = cheapExpr(n.X, init)
n.X = walkExpr(n.X, nil)
- tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
-
- c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
- c.SetTypecheck(1)
- init.Append(c)
+ tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X)
+ check := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
+ init.Append(typecheck.Stmt(check))
}
- typ := typecheck.PartialCallType(n)
+ typ := typecheck.MethodValueType(n)
clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
clos.SetEsc(n.Esc())
- clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, typecheck.MethodValueWrapper(n).Nname), n.X}
+ clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, methodValueWrapper(n)), n.X}
addr := typecheck.NodAddr(clos)
addr.SetEsc(n.Esc())
@@ -205,3 +198,74 @@ func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
return walkExpr(cfn, init)
}
+
+// methodValueWrapper returns the ONAME node representing the
+// wrapper function (*-fm) needed for the given method value. If the
+// wrapper function hasn't already been created yet, it's created and
+// added to typecheck.Target.Decls.
+func methodValueWrapper(dot *ir.SelectorExpr) *ir.Name {
+ if dot.Op() != ir.OMETHVALUE {
+ base.Fatalf("methodValueWrapper: unexpected %v (%v)", dot, dot.Op())
+ }
+
+ t0 := dot.Type()
+ meth := dot.Sel
+ rcvrtype := dot.X.Type()
+ sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
+
+ if sym.Uniq() {
+ return sym.Def.(*ir.Name)
+ }
+ sym.SetUniq(true)
+
+ savecurfn := ir.CurFunc
+ saveLineNo := base.Pos
+ ir.CurFunc = nil
+
+ // Set line number equal to the line number where the method is declared.
+ if pos := dot.Selection.Pos; pos.IsKnown() {
+ base.Pos = pos
+ }
+ // Note: !dot.Selection.Pos.IsKnown() happens for method expressions where
+ // the method is implicitly declared. The Error method of the
+ // built-in error type is one such method. We leave the line
+ // number at the use of the method expression in this
+ // case. See issue 29389.
+
+ tfn := ir.NewFuncType(base.Pos, nil,
+ typecheck.NewFuncParams(t0.Params(), true),
+ typecheck.NewFuncParams(t0.Results(), false))
+
+ fn := typecheck.DeclFunc(sym, tfn)
+ fn.SetDupok(true)
+ fn.SetWrapper(true)
+
+ // Declare and initialize variable holding receiver.
+ ptr := ir.NewHiddenParam(base.Pos, fn, typecheck.Lookup(".this"), rcvrtype)
+
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+
+ var body ir.Node = call
+ if t0.NumResults() != 0 {
+ ret := ir.NewReturnStmt(base.Pos, nil)
+ ret.Results = []ir.Node{call}
+ body = ret
+ }
+
+ fn.Body = []ir.Node{body}
+ typecheck.FinishFuncBody()
+
+ typecheck.Func(fn)
+ // Need to typecheck the body of the just-generated wrapper.
+ // typecheckslice() requires that Curfn is set when processing an ORETURN.
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ sym.Def = fn.Nname
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+ ir.CurFunc = savecurfn
+ base.Pos = saveLineNo
+
+ return fn.Nname
+}
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
index abd920d646..6c6b4982a0 100644
--- a/src/cmd/compile/internal/walk/complit.go
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -218,11 +218,11 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node,
case ir.OSTRUCTLIT:
splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
r := rn.(*ir.StructKeyExpr)
- if r.Field.IsBlank() || isBlank {
+ if r.Sym().IsBlank() || isBlank {
return ir.BlankNode, r.Value
}
ir.SetPos(r)
- return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value
+ return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Value
}
default:
base.Fatalf("fixedlit bad op: %v", n.Op())
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
index 26e17a126f..27a07ce4b6 100644
--- a/src/cmd/compile/internal/walk/convert.go
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -41,46 +41,98 @@ func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
// walkConvInterface walks an OCONVIFACE node.
func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+
n.X = walkExpr(n.X, init)
fromType := n.X.Type()
toType := n.Type()
-
- if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { // skip unnamed functions (func _())
+ if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) {
+ // skip unnamed functions (func _())
reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym)
}
- // typeword generates the type word of the interface value.
- typeword := func() ir.Node {
+ if !fromType.IsInterface() {
+ var typeWord ir.Node
if toType.IsEmptyInterface() {
- return reflectdata.TypePtr(fromType)
+ typeWord = reflectdata.TypePtr(fromType)
+ } else {
+ typeWord = reflectdata.ITabAddr(fromType, toType)
}
- return reflectdata.ITabAddr(fromType, toType)
- }
-
- // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
- if types.IsDirectIface(fromType) {
- l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), n.X)
+ l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n.X, init, n.Esc() != ir.EscNone))
l.SetType(toType)
l.SetTypecheck(n.Typecheck())
return l
}
+ if fromType.IsEmptyInterface() {
+ base.Fatalf("OCONVIFACE can't operate on an empty interface")
+ }
- // Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
- // by using an existing addressable value identical to n.Left
- // or creating one on the stack.
+ // Evaluate the input interface.
+ c := typecheck.Temp(fromType)
+ init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
+
+ // Grab its parts.
+ itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c)
+ itab.SetType(types.Types[types.TUINTPTR].PtrTo())
+ itab.SetTypecheck(1)
+ data := ir.NewUnaryExpr(base.Pos, ir.OIDATA, c)
+ data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through.
+ data.SetTypecheck(1)
+
+ var typeWord ir.Node
+ if toType.IsEmptyInterface() {
+ // Implement interface to empty interface conversion.
+ // res = itab
+ // if res != nil {
+ // res = res.type
+ // }
+ typeWord = typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
+ init.Append(ir.NewAssignStmt(base.Pos, typeWord, itab))
+ nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, typeWord, typecheck.NodNil())), nil, nil)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, typeWord, itabType(typeWord))}
+ init.Append(nif)
+ } else {
+ // Must be converting I2I (more specific to less specific interface).
+ // res = convI2I(toType, itab)
+ fn := typecheck.LookupRuntime("convI2I")
+ types.CalcSize(fn.Type())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args = []ir.Node{reflectdata.TypePtr(toType), itab}
+ typeWord = walkExpr(typecheck.Expr(call), init)
+ }
+
+ // Build the result.
+ // e = iface{typeWord, data}
+ e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, data)
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
+ e.SetTypecheck(1)
+ return e
+}
+
+// Returns the data word (the second word) used to represent n in an interface.
+// n must not be of interface type.
+// esc describes whether the result escapes.
+func dataWord(n ir.Node, init *ir.Nodes, escapes bool) ir.Node {
+ fromType := n.Type()
+
+ // If it's a pointer, it is its own representation.
+ if types.IsDirectIface(fromType) {
+ return n
+ }
+
+ // Try a bunch of cases to avoid an allocation.
var value ir.Node
switch {
case fromType.Size() == 0:
- // n.Left is zero-sized. Use zerobase.
- cheapExpr(n.X, init) // Evaluate n.Left for side-effects. See issue 19246.
+ // n is zero-sized. Use zerobase.
+ cheapExpr(n, init) // Evaluate n for side-effects. See issue 19246.
value = ir.NewLinksymExpr(base.Pos, ir.Syms.Zerobase, types.Types[types.TUINTPTR])
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
- // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
- // and staticuint64s[n.Left * 8 + 7] on big-endian.
- n.X = cheapExpr(n.X, init)
- // byteindex widens n.Left so that the multiplication doesn't overflow.
- index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3))
+ // n is a bool/byte. Use staticuint64s[n * 8] on little-endian
+ // and staticuint64s[n * 8 + 7] on big-endian.
+ n = cheapExpr(n, init)
+ // byteindex widens n so that the multiplication doesn't overflow.
+ index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n), ir.NewInt(3))
if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
}
@@ -90,118 +142,71 @@ func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
xe := ir.NewIndexExpr(base.Pos, staticuint64s, index)
xe.SetBounded(true)
value = xe
- case n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PEXTERN && n.X.(*ir.Name).Readonly():
- // n.Left is a readonly global; use it directly.
- value = n.X
- case !fromType.IsInterface() && n.Esc() == ir.EscNone && fromType.Width <= 1024:
- // n.Left does not escape. Use a stack temporary initialized to n.Left.
+ case n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PEXTERN && n.(*ir.Name).Readonly():
+ // n is a readonly global; use it directly.
+ value = n
+ case !escapes && fromType.Width <= 1024:
+ // n does not escape. Use a stack temporary initialized to n.
value = typecheck.Temp(fromType)
- init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n.X)))
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n)))
}
-
if value != nil {
- // Value is identical to n.Left.
- // Construct the interface directly: {type/itab, &value}.
- l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), typecheck.Expr(typecheck.NodAddr(value)))
- l.SetType(toType)
- l.SetTypecheck(n.Typecheck())
- return l
- }
-
- // Implement interface to empty interface conversion.
- // tmp = i.itab
- // if tmp != nil {
- // tmp = tmp.type
- // }
- // e = iface{tmp, i.data}
- if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
- // Evaluate the input interface.
- c := typecheck.Temp(fromType)
- init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
-
- // Get the itab out of the interface.
- tmp := typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
- init.Append(ir.NewAssignStmt(base.Pos, tmp, typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, c))))
-
- // Get the type out of the itab.
- nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, tmp, typecheck.NodNil())), nil, nil)
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, tmp, itabType(tmp))}
- init.Append(nif)
-
- // Build the result.
- e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
- e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
- e.SetTypecheck(1)
- return e
+ // The interface data word is &value.
+ return typecheck.Expr(typecheck.NodAddr(value))
}
- fnname, argType, needsaddr := convFuncName(fromType, toType)
-
- if !needsaddr && !fromType.IsInterface() {
- // Use a specialized conversion routine that only returns a data pointer.
- // ptr = convT2X(val)
- // e = iface{typ/tab, ptr}
- fn := typecheck.LookupRuntime(fnname)
- types.CalcSize(fromType)
+ // Time to do an allocation. We'll call into the runtime for that.
+ fnname, argType, needsaddr := dataWordFuncName(fromType)
+ fn := typecheck.LookupRuntime(fnname)
- arg := n.X
+ var args []ir.Node
+ if needsaddr {
+ // Types of large or unknown size are passed by reference.
+ // Orderexpr arranged for n to be a temporary for all
+ // the conversions it could see. Comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to order.stmt, so we
+ // have to fall back on allocating a temp here.
+ if !ir.IsAddressable(n) {
+ n = copyExpr(n, fromType, init)
+ }
+ fn = typecheck.SubstArgTypes(fn, fromType)
+ args = []ir.Node{reflectdata.TypePtr(fromType), typecheck.NodAddr(n)}
+ } else {
+ // Use a specialized conversion routine that takes the type being
+ // converted by value, not by pointer.
+ var arg ir.Node
switch {
case fromType == argType:
// already in the right type, nothing to do
+ arg = n
case fromType.Kind() == argType.Kind(),
fromType.IsPtrShaped() && argType.IsPtrShaped():
// can directly convert (e.g. named type to underlying type, or one pointer to another)
- arg = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, argType, arg)
+ // TODO: never happens because pointers are directIface?
+ arg = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, argType, n)
case fromType.IsInteger() && argType.IsInteger():
// can directly convert (e.g. int32 to uint32)
- arg = ir.NewConvExpr(n.Pos(), ir.OCONV, argType, arg)
+ arg = ir.NewConvExpr(n.Pos(), ir.OCONV, argType, n)
default:
// unsafe cast through memory
- arg = copyExpr(arg, arg.Type(), init)
+ arg = copyExpr(n, fromType, init)
var addr ir.Node = typecheck.NodAddr(arg)
addr = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, argType.PtrTo(), addr)
arg = ir.NewStarExpr(n.Pos(), addr)
arg.SetType(argType)
}
-
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
- call.Args = []ir.Node{arg}
- e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeword(), safeExpr(walkExpr(typecheck.Expr(call), init), init))
- e.SetType(toType)
- e.SetTypecheck(1)
- return e
- }
-
- var tab ir.Node
- if fromType.IsInterface() {
- // convI2I
- tab = reflectdata.TypePtr(toType)
- } else {
- // convT2x
- tab = typeword()
+ args = []ir.Node{arg}
}
-
- v := n.X
- if needsaddr {
- // Types of large or unknown size are passed by reference.
- // Orderexpr arranged for n.Left to be a temporary for all
- // the conversions it could see. Comparison of an interface
- // with a non-interface, especially in a switch on interface value
- // with non-interface cases, is not visible to order.stmt, so we
- // have to fall back on allocating a temp here.
- if !ir.IsAddressable(v) {
- v = copyExpr(v, v.Type(), init)
- }
- v = typecheck.NodAddr(v)
- }
-
- types.CalcSize(fromType)
- fn := typecheck.LookupRuntime(fnname)
- fn = typecheck.SubstArgTypes(fn, fromType, toType)
- types.CalcSize(fn.Type())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
- call.Args = []ir.Node{tab, v}
- return walkExpr(typecheck.Expr(call), init)
+ call.Args = args
+ return safeExpr(walkExpr(typecheck.Expr(call), init), init)
+}
+
+// walkConvIData walks an OCONVIDATA node.
+func walkConvIData(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ return dataWord(n.X, init, n.Esc() != ir.EscNone)
}
// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
@@ -312,50 +317,35 @@ func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
}
-// convFuncName builds the runtime function name for interface conversion.
-// It also returns the argument type that the runtime function takes, and
-// whether the function expects the data by address.
-// Not all names are possible. For example, we never generate convE2E or convE2I.
-func convFuncName(from, to *types.Type) (fnname string, argType *types.Type, needsaddr bool) {
- tkind := to.Tie()
- switch from.Tie() {
- case 'I':
- if tkind == 'I' {
- return "convI2I", types.Types[types.TINTER], false
- }
- case 'T':
+// dataWordFuncName returns the name of the function used to convert a value of type "from"
+// to the data word of an interface.
+// argType is the type the argument needs to be coerced to.
+// needsaddr reports whether the value should be passed (needaddr==false) or its address (needsaddr==true).
+func dataWordFuncName(from *types.Type) (fnname string, argType *types.Type, needsaddr bool) {
+ if from.IsInterface() {
+ base.Fatalf("can only handle non-interfaces")
+ }
+ switch {
+ case from.Size() == 2 && from.Align == 2:
+ return "convT16", types.Types[types.TUINT16], false
+ case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
+ return "convT32", types.Types[types.TUINT32], false
+ case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
+ return "convT64", types.Types[types.TUINT64], false
+ }
+ if sc := from.SoleComponent(); sc != nil {
switch {
- case from.Size() == 2 && from.Align == 2:
- return "convT16", types.Types[types.TUINT16], false
- case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
- return "convT32", types.Types[types.TUINT32], false
- case from.Size() == 8 && from.Align == types.Types[types.TUINT64].Align && !from.HasPointers():
- return "convT64", types.Types[types.TUINT64], false
- }
- if sc := from.SoleComponent(); sc != nil {
- switch {
- case sc.IsString():
- return "convTstring", types.Types[types.TSTRING], false
- case sc.IsSlice():
- return "convTslice", types.NewSlice(types.Types[types.TUINT8]), false // the element type doesn't matter
- }
+ case sc.IsString():
+ return "convTstring", types.Types[types.TSTRING], false
+ case sc.IsSlice():
+ return "convTslice", types.NewSlice(types.Types[types.TUINT8]), false // the element type doesn't matter
}
+ }
- switch tkind {
- case 'E':
- if !from.HasPointers() {
- return "convT2Enoptr", types.Types[types.TUNSAFEPTR], true
- }
- return "convT2E", types.Types[types.TUNSAFEPTR], true
- case 'I':
- if !from.HasPointers() {
- return "convT2Inoptr", types.Types[types.TUNSAFEPTR], true
- }
- return "convT2I", types.Types[types.TUNSAFEPTR], true
- }
+ if from.HasPointers() {
+ return "convT", types.Types[types.TUNSAFEPTR], true
}
- base.Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
- panic("unreachable")
+ return "convTnoptr", types.Types[types.TUNSAFEPTR], true
}
// rtconvfn returns the parameter and result types that will be used by a
@@ -462,7 +452,9 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
switch n.X.Op() {
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ case ir.OCALLMETH:
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC, ir.OCALLINTER:
return n
}
@@ -499,7 +491,7 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
cheap := cheapExpr(n, init)
- slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice := typecheck.MakeDotArgs(base.Pos, types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
slice.SetEsc(ir.EscNone)
init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index 2fb907710b..26e225440a 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -82,7 +82,7 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op())
panic("unreachable")
- case ir.ONONAME, ir.OGETG:
+ case ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP:
return n
case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
@@ -136,6 +136,10 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.TypeAssertExpr)
return walkDotType(n, init)
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ return walkDynamicDotType(n, init)
+
case ir.OLEN, ir.OCAP:
n := n.(*ir.UnaryExpr)
return walkLenCap(n, init)
@@ -161,13 +165,13 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.UnaryExpr)
return mkcall("gopanic", nil, init, n.X)
- case ir.ORECOVER:
- return walkRecover(n.(*ir.CallExpr), init)
+ case ir.ORECOVERFP:
+ return walkRecoverFP(n.(*ir.CallExpr), init)
case ir.OCFUNC:
return n
- case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
+ case ir.OCALLINTER, ir.OCALLFUNC:
n := n.(*ir.CallExpr)
return walkCall(n, init)
@@ -206,6 +210,10 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.ConvExpr)
return walkConvInterface(n, init)
+ case ir.OCONVIDATA:
+ n := n.(*ir.ConvExpr)
+ return walkConvIData(n, init)
+
case ir.OCONV, ir.OCONVNOP:
n := n.(*ir.ConvExpr)
return walkConv(n, init)
@@ -308,8 +316,8 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
case ir.OCLOSURE:
return walkClosure(n.(*ir.ClosureExpr), init)
- case ir.OCALLPART:
- return walkCallPart(n.(*ir.SelectorExpr), init)
+ case ir.OMETHVALUE:
+ return walkMethodValue(n.(*ir.SelectorExpr), init)
}
// No return! Each case must return (or panic),
@@ -487,9 +495,12 @@ func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
return r1
}
-// walkCall walks an OCALLFUNC, OCALLINTER, or OCALLMETH node.
+// walkCall walks an OCALLFUNC or OCALLINTER node.
func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- if n.Op() == ir.OCALLINTER || n.Op() == ir.OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Op() == ir.OCALLINTER || n.X.Op() == ir.OMETHEXPR {
// We expect both interface call reflect.Type.Method and concrete
// call reflect.(*rtype).Method.
usemethod(n)
@@ -549,20 +560,8 @@ func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
}
n.SetWalked(true)
- // If this is a method call t.M(...),
- // rewrite into a function call T.M(t, ...).
- // TODO(mdempsky): Do this right after type checking.
if n.Op() == ir.OCALLMETH {
- withRecv := make([]ir.Node, len(n.Args)+1)
- dot := n.X.(*ir.SelectorExpr)
- withRecv[0] = dot.X
- copy(withRecv[1:], n.Args)
- n.Args = withRecv
-
- dot = ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym)
-
- n.SetOp(ir.OCALLFUNC)
- n.X = typecheck.Expr(dot)
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
}
args := n.Args
@@ -671,6 +670,13 @@ func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {
return n
}
+// walkDynamicdotType walks an ODYNAMICDOTTYPE or ODYNAMICDOTTYPE2 node.
+func walkDynamicDotType(n *ir.DynamicTypeAssertExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.T = walkExpr(n.T, init)
+ return n
+}
+
// walkIndex walks an OINDEX node.
func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
n.X = walkExpr(n.X, init)
@@ -931,56 +937,55 @@ func bounded(n ir.Node, max int64) bool {
return false
}
-// usemethod checks interface method calls for uses of reflect.Type.Method.
+// usemethod checks calls for uses of reflect.Type.{Method,MethodByName}.
func usemethod(n *ir.CallExpr) {
- t := n.X.Type()
+ // Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
+ // Those functions may be alive via the itab, which should not cause all methods
+ // alive. We only want to mark their callers.
+ if base.Ctxt.Pkgpath == "reflect" {
+ switch ir.CurFunc.Nname.Sym().Name { // TODO: is there a better way than hardcoding the names?
+ case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
+ return
+ }
+ }
- // Looking for either of:
- // Method(int) reflect.Method
- // MethodByName(string) (reflect.Method, bool)
- //
- // TODO(crawshaw): improve precision of match by working out
- // how to check the method name.
- if n := t.NumParams(); n != 1 {
+ dot, ok := n.X.(*ir.SelectorExpr)
+ if !ok {
return
}
- if n := t.NumResults(); n != 1 && n != 2 {
+
+ // Looking for either direct method calls and interface method calls of:
+ // reflect.Type.Method - func(int) reflect.Method
+ // reflect.Type.MethodByName - func(string) (reflect.Method, bool)
+ var pKind types.Kind
+
+ switch dot.Sel.Name {
+ case "Method":
+ pKind = types.TINT
+ case "MethodByName":
+ pKind = types.TSTRING
+ default:
return
}
- p0 := t.Params().Field(0)
- res0 := t.Results().Field(0)
- var res1 *types.Field
- if t.NumResults() == 2 {
- res1 = t.Results().Field(1)
- }
- if res1 == nil {
- if p0.Type.Kind() != types.TINT {
- return
- }
- } else {
- if !p0.Type.IsString() {
- return
- }
- if !res1.Type.IsBoolean() {
- return
- }
+ t := dot.Selection.Type
+ if t.NumParams() != 1 || t.Params().Field(0).Type.Kind() != pKind {
+ return
}
-
- // Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
- // Those functions may be alive via the itab, which should not cause all methods
- // alive. We only want to mark their callers.
- if base.Ctxt.Pkgpath == "reflect" {
- switch ir.CurFunc.Nname.Sym().Name { // TODO: is there a better way than hardcoding the names?
- case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
+ switch t.NumResults() {
+ case 1:
+ // ok
+ case 2:
+ if t.Results().Field(1).Type.Kind() != types.TBOOL {
return
}
+ default:
+ return
}
- // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
- // (including global variables such as numImports - was issue #19028).
- // Also need to check for reflect package itself (see Issue #38515).
- if s := res0.Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
+ // Check that first result type is "reflect.Method". Note that we have to check sym name and sym package
+ // separately, as we can't check for exact string "reflect.Method" reliably (e.g., see #19028 and #38515).
+ if s := t.Results().Field(0).Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
ir.CurFunc.SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index b733d3a29f..6e336f565c 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -7,10 +7,8 @@ package walk
import (
"fmt"
"go/constant"
- "internal/buildcfg"
"cmd/compile/internal/base"
- "cmd/compile/internal/escape"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticinit"
@@ -53,7 +51,7 @@ import (
type orderState struct {
out []ir.Node // list of generated statements
temp []*ir.Name // stack of temporary variables
- free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
+ free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString().
edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
}
@@ -78,20 +76,14 @@ func (o *orderState) append(stmt ir.Node) {
// If clear is true, newTemp emits code to zero the temporary.
func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
var v *ir.Name
- // Note: LongString is close to the type equality we want,
- // but not exactly. We still need to double-check with types.Identical.
- key := t.LongString()
- a := o.free[key]
- for i, n := range a {
- if types.Identical(t, n.Type()) {
- v = a[i]
- a[i] = a[len(a)-1]
- a = a[:len(a)-1]
- o.free[key] = a
- break
+ key := t.LinkString()
+ if a := o.free[key]; len(a) > 0 {
+ v = a[len(a)-1]
+ if !types.Identical(t, v.Type()) {
+ base.Fatalf("expected %L to have type %v", v, t)
}
- }
- if v == nil {
+ o.free[key] = a[:len(a)-1]
+ } else {
v = typecheck.Temp(t)
}
if clear {
@@ -372,7 +364,7 @@ func (o *orderState) markTemp() ordermarker {
// which must have been returned by markTemp.
func (o *orderState) popTemp(mark ordermarker) {
for _, n := range o.temp[mark:] {
- key := n.Type().LongString()
+ key := n.Type().LinkString()
o.free[key] = append(o.free[key], n)
}
o.temp = o.temp[:mark]
@@ -514,15 +506,18 @@ func (o *orderState) init(n ir.Node) {
}
// call orders the call expression n.
-// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+// n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY.
func (o *orderState) call(nn ir.Node) {
if len(nn.Init()) > 0 {
// Caller should have already called o.init(nn).
base.Fatalf("%v with unexpected ninit", nn.Op())
}
+ if nn.Op() == ir.OCALLMETH {
+ base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck")
+ }
// Builtin functions.
- if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
+ if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER {
switch n := nn.(type) {
default:
base.Fatalf("unexpected call: %+v", n)
@@ -554,39 +549,6 @@ func (o *orderState) call(nn ir.Node) {
n.X = o.expr(n.X, nil)
o.exprList(n.Args)
-
- if n.Op() == ir.OCALLINTER {
- return
- }
- keepAlive := func(arg ir.Node) {
- // If the argument is really a pointer being converted to uintptr,
- // arrange for the pointer to be kept alive until the call returns,
- // by copying it into a temp and marking that temp
- // still alive when we pop the temp stack.
- if arg.Op() == ir.OCONVNOP {
- arg := arg.(*ir.ConvExpr)
- if arg.X.Type().IsUnsafePtr() {
- x := o.copyExpr(arg.X)
- arg.X = x
- x.SetAddrtaken(true) // ensure SSA keeps the x variable
- n.KeepAlive = append(n.KeepAlive, x)
- }
- }
- }
-
- // Check for "unsafe-uintptr" tag provided by escape analysis.
- for i, param := range n.X.Type().Params().FieldSlice() {
- if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote {
- if arg := n.Args[i]; arg.Op() == ir.OSLICELIT {
- arg := arg.(*ir.CompLitExpr)
- for _, elt := range arg.List {
- keepAlive(elt)
- }
- } else {
- keepAlive(arg)
- }
- }
- }
}
// mapAssign appends n to o.out.
@@ -693,9 +655,20 @@ func (o *orderState) stmt(n ir.Node) {
n := n.(*ir.AssignListStmt)
t := o.markTemp()
o.exprList(n.Lhs)
- o.init(n.Rhs[0])
- o.call(n.Rhs[0])
- o.as2func(n)
+ call := n.Rhs[0]
+ o.init(call)
+ if ic, ok := call.(*ir.InlinedCallExpr); ok {
+ o.stmtList(ic.Body)
+
+ n.SetOp(ir.OAS2)
+ n.Rhs = ic.ReturnVars
+
+ o.exprList(n.Rhs)
+ o.out = append(o.out, n)
+ } else {
+ o.call(call)
+ o.as2func(n)
+ }
o.cleanTemp(t)
// Special: use temporary variables to hold result,
@@ -713,6 +686,10 @@ func (o *orderState) stmt(n ir.Node) {
case ir.ODOTTYPE2:
r := r.(*ir.TypeAssertExpr)
r.X = o.expr(r.X, nil)
+ case ir.ODYNAMICDOTTYPE2:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ r.X = o.expr(r.X, nil)
+ r.T = o.expr(r.T, nil)
case ir.ORECV:
r := r.(*ir.UnaryExpr)
r.X = o.expr(r.X, nil)
@@ -748,14 +725,25 @@ func (o *orderState) stmt(n ir.Node) {
o.out = append(o.out, n)
// Special: handle call arguments.
- case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ case ir.OCALLFUNC, ir.OCALLINTER:
n := n.(*ir.CallExpr)
t := o.markTemp()
o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
- case ir.OCLOSE, ir.ORECV:
+ case ir.OINLCALL:
+ n := n.(*ir.InlinedCallExpr)
+ o.stmtList(n.Body)
+
+ // discard results; double-check for no side effects
+ for _, result := range n.ReturnVars {
+ if staticinit.AnySideEffects(result) {
+ base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result)
+ }
+ }
+
+ case ir.OCHECKNIL, ir.OCLOSE, ir.OPANIC, ir.ORECV:
n := n.(*ir.UnaryExpr)
t := o.markTemp()
n.X = o.expr(n.X, nil)
@@ -770,10 +758,10 @@ func (o *orderState) stmt(n ir.Node) {
o.out = append(o.out, n)
o.cleanTemp(t)
- case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
n := n.(*ir.CallExpr)
t := o.markTemp()
- o.exprList(n.Args)
+ o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
@@ -783,16 +771,6 @@ func (o *orderState) stmt(n ir.Node) {
t := o.markTemp()
o.init(n.Call)
o.call(n.Call)
- if n.Call.Op() == ir.ORECOVER {
- // Special handling of "defer recover()". We need to evaluate the FP
- // argument before wrapping.
- var init ir.Nodes
- n.Call = walkRecover(n.Call.(*ir.CallExpr), &init)
- o.stmtList(init)
- }
- if buildcfg.Experiment.RegabiDefer {
- o.wrapGoDefer(n)
- }
o.out = append(o.out, n)
o.cleanTemp(t)
@@ -830,16 +808,6 @@ func (o *orderState) stmt(n ir.Node) {
orderBlock(&n.Else, o.free)
o.out = append(o.out, n)
- case ir.OPANIC:
- n := n.(*ir.UnaryExpr)
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
- if !n.X.Type().IsEmptyInterface() {
- base.FatalfAt(n.Pos(), "bad argument to panic: %L", n.X)
- }
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
case ir.ORANGE:
// n.Right is the expression being ranged over.
// order it, and then make a copy if we need one.
@@ -1192,23 +1160,26 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
// concrete type (not interface) argument might need an addressable
// temporary to pass to the runtime conversion routine.
- case ir.OCONVIFACE:
+ case ir.OCONVIFACE, ir.OCONVIDATA:
n := n.(*ir.ConvExpr)
n.X = o.expr(n.X, nil)
if n.X.Type().IsInterface() {
return n
}
- if _, _, needsaddr := convFuncName(n.X.Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
+ if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
// Need a temp if we need to pass the address to the conversion function.
// We also process static composite literal node here, making a named static global
- // whose address we can put directly in an interface (see OCONVIFACE case in walk).
+ // whose address we can put directly in an interface (see OCONVIFACE/OCONVIDATA case in walk).
n.X = o.addrTemp(n.X)
}
return n
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
- if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) {
+ if n.X.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) {
call := n.X.(*ir.CallExpr)
// When reordering unsafe.Pointer(f()) into a separate
// statement, the conversion and function call must stay
@@ -1261,9 +1232,12 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
o.out = append(o.out, nif)
return r
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ panic("unreachable")
+
case ir.OCALLFUNC,
ir.OCALLINTER,
- ir.OCALLMETH,
ir.OCAP,
ir.OCOMPLEX,
ir.OCOPY,
@@ -1275,7 +1249,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
ir.OMAKESLICECOPY,
ir.ONEW,
ir.OREAL,
- ir.ORECOVER,
+ ir.ORECOVERFP,
ir.OSTR2BYTES,
ir.OSTR2BYTESTMP,
ir.OSTR2RUNES:
@@ -1293,6 +1267,11 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
}
return n
+ case ir.OINLCALL:
+ n := n.(*ir.InlinedCallExpr)
+ o.stmtList(n.Body)
+ return n.SingleResult()
+
case ir.OAPPEND:
// Check for append(x, make([]T, y)...) .
n := n.(*ir.CallExpr)
@@ -1327,11 +1306,11 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
}
return n
- case ir.OCALLPART:
+ case ir.OMETHVALUE:
n := n.(*ir.SelectorExpr)
n.X = o.expr(n.X, nil)
if n.Transient() {
- t := typecheck.PartialCallType(n)
+ t := typecheck.MethodValueType(n)
n.Prealloc = o.newTemp(t, false)
}
return n
@@ -1498,313 +1477,6 @@ func (o *orderState) as2ok(n *ir.AssignListStmt) {
o.stmt(typecheck.Stmt(as))
}
-var wrapGoDefer_prgen int
-
-// wrapGoDefer wraps the target of a "go" or "defer" statement with a
-// new "function with no arguments" closure. Specifically, it converts
-//
-// defer f(x, y)
-//
-// to
-//
-// x1, y1 := x, y
-// defer func() { f(x1, y1) }()
-//
-// This is primarily to enable a quicker bringup of defers under the
-// new register ABI; by doing this conversion, we can simplify the
-// code in the runtime that invokes defers on the panic path.
-func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
- call := n.Call
-
- var callX ir.Node // thing being called
- var callArgs []ir.Node // call arguments
- var keepAlive []*ir.Name // KeepAlive list from call, if present
-
- // A helper to recreate the call within the closure.
- var mkNewCall func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node
-
- // Defer calls come in many shapes and sizes; not all of them
- // are ir.CallExpr's. Examine the type to see what we're dealing with.
- switch x := call.(type) {
- case *ir.CallExpr:
- callX = x.X
- callArgs = x.Args
- keepAlive = x.KeepAlive
- mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
- newcall := ir.NewCallExpr(pos, op, fun, args)
- newcall.IsDDD = x.IsDDD
- return ir.Node(newcall)
- }
- case *ir.UnaryExpr: // ex: OCLOSE
- callArgs = []ir.Node{x.X}
- mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
- if len(args) != 1 {
- panic("internal error, expecting single arg")
- }
- return ir.Node(ir.NewUnaryExpr(pos, op, args[0]))
- }
- case *ir.BinaryExpr: // ex: OCOPY
- callArgs = []ir.Node{x.X, x.Y}
- mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
- if len(args) != 2 {
- panic("internal error, expecting two args")
- }
- return ir.Node(ir.NewBinaryExpr(pos, op, args[0], args[1]))
- }
- default:
- panic("unhandled op")
- }
-
- // No need to wrap if called func has no args, no receiver, and no results.
- // However in the case of "defer func() { ... }()" we need to
- // protect against the possibility of directClosureCall rewriting
- // things so that the call does have arguments.
- //
- // Do wrap method calls (OCALLMETH, OCALLINTER), because it has
- // a receiver.
- //
- // Also do wrap builtin functions, because they may be expanded to
- // calls with arguments (e.g. ORECOVER).
- //
- // TODO: maybe not wrap if the called function has no arguments and
- // only in-register results?
- if len(callArgs) == 0 && call.Op() == ir.OCALLFUNC && callX.Type().NumResults() == 0 {
- if c, ok := call.(*ir.CallExpr); ok && callX != nil && callX.Op() == ir.OCLOSURE {
- cloFunc := callX.(*ir.ClosureExpr).Func
- cloFunc.SetClosureCalled(false)
- c.PreserveClosure = true
- }
- return
- }
-
- if c, ok := call.(*ir.CallExpr); ok {
- // To simplify things, turn f(a, b, []T{c, d, e}...) back
- // into f(a, b, c, d, e) -- when the final call is run through the
- // type checker below, it will rebuild the proper slice literal.
- undoVariadic(c)
- callX = c.X
- callArgs = c.Args
- }
-
- // This is set to true if the closure we're generating escapes
- // (needs heap allocation).
- cloEscapes := func() bool {
- if n.Op() == ir.OGO {
- // For "go", assume that all closures escape.
- return true
- }
- // For defer, just use whatever result escape analysis
- // has determined for the defer.
- return n.Esc() != ir.EscNever
- }()
-
- // A helper for making a copy of an argument. Note that it is
- // not safe to use o.copyExpr(arg) if we're putting a
- // reference to the temp into the closure (as opposed to
- // copying it in by value), since in the by-reference case we
- // need a temporary whose lifetime extends to the end of the
- // function (as opposed to being local to the current block or
- // statement being ordered).
- mkArgCopy := func(arg ir.Node) *ir.Name {
- t := arg.Type()
- byval := t.Size() <= 128 || cloEscapes
- var argCopy *ir.Name
- if byval {
- argCopy = o.copyExpr(arg)
- } else {
- argCopy = typecheck.Temp(t)
- o.append(ir.NewAssignStmt(base.Pos, argCopy, arg))
- }
- // The value of 128 below is meant to be consistent with code
- // in escape analysis that picks byval/byaddr based on size.
- argCopy.SetByval(byval)
- return argCopy
- }
-
- // getUnsafeArg looks for an unsafe.Pointer arg that has been
- // previously captured into the call's keepalive list, returning
- // the name node for it if found.
- getUnsafeArg := func(arg ir.Node) *ir.Name {
- // Look for uintptr(unsafe.Pointer(name))
- if arg.Op() != ir.OCONVNOP {
- return nil
- }
- if !arg.Type().IsUintptr() {
- return nil
- }
- if !arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
- return nil
- }
- arg = arg.(*ir.ConvExpr).X
- argname, ok := arg.(*ir.Name)
- if !ok {
- return nil
- }
- for i := range keepAlive {
- if argname == keepAlive[i] {
- return argname
- }
- }
- return nil
- }
-
- // Copy the arguments to the function into temps.
- //
- // For calls with uintptr(unsafe.Pointer(...)) args that are being
- // kept alive (see code in (*orderState).call that does this), use
- // the existing arg copy instead of creating a new copy.
- unsafeArgs := make([]*ir.Name, len(callArgs))
- origArgs := callArgs
- var newNames []*ir.Name
- for i := range callArgs {
- arg := callArgs[i]
- var argname *ir.Name
- unsafeArgName := getUnsafeArg(arg)
- if unsafeArgName != nil {
- // arg has been copied already, use keepalive copy
- argname = unsafeArgName
- unsafeArgs[i] = unsafeArgName
- } else {
- argname = mkArgCopy(arg)
- }
- newNames = append(newNames, argname)
- }
-
- // Deal with cases where the function expression (what we're
- // calling) is not a simple function symbol.
- var fnExpr *ir.Name
- var methSelectorExpr *ir.SelectorExpr
- if callX != nil {
- switch {
- case callX.Op() == ir.ODOTMETH || callX.Op() == ir.ODOTINTER:
- // Handle defer of a method call, e.g. "defer v.MyMethod(x, y)"
- n := callX.(*ir.SelectorExpr)
- n.X = mkArgCopy(n.X)
- methSelectorExpr = n
- if callX.Op() == ir.ODOTINTER {
- // Currently for "defer i.M()" if i is nil it panics at the
- // point of defer statement, not when deferred function is called.
- // (I think there is an issue discussing what is the intended
- // behavior but I cannot find it.)
- // We need to do the nil check outside of the wrapper.
- tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
- c := ir.NewUnaryExpr(n.Pos(), ir.OCHECKNIL, tab)
- c.SetTypecheck(1)
- o.append(c)
- }
- case !(callX.Op() == ir.ONAME && callX.(*ir.Name).Class == ir.PFUNC):
- // Deal with "defer returnsafunc()(x, y)" (for
- // example) by copying the callee expression.
- fnExpr = mkArgCopy(callX)
- if callX.Op() == ir.OCLOSURE {
- // For "defer func(...)", in addition to copying the
- // closure into a temp, mark it as no longer directly
- // called.
- callX.(*ir.ClosureExpr).Func.SetClosureCalled(false)
- }
- }
- }
-
- // Create a new no-argument function that we'll hand off to defer.
- var noFuncArgs []*ir.Field
- noargst := ir.NewFuncType(base.Pos, nil, noFuncArgs, nil)
- wrapGoDefer_prgen++
- outerfn := ir.CurFunc
- wrapname := fmt.Sprintf("%v·dwrap·%d", outerfn, wrapGoDefer_prgen)
- sym := types.LocalPkg.Lookup(wrapname)
- fn := typecheck.DeclFunc(sym, noargst)
- fn.SetIsHiddenClosure(true)
- fn.SetWrapper(true)
-
- // helper for capturing reference to a var declared in an outer scope.
- capName := func(pos src.XPos, fn *ir.Func, n *ir.Name) *ir.Name {
- t := n.Type()
- cv := ir.CaptureName(pos, fn, n)
- cv.SetType(t)
- return typecheck.Expr(cv).(*ir.Name)
- }
-
- // Call args (x1, y1) need to be captured as part of the newly
- // created closure.
- newCallArgs := []ir.Node{}
- for i := range newNames {
- var arg ir.Node
- arg = capName(callArgs[i].Pos(), fn, newNames[i])
- if unsafeArgs[i] != nil {
- arg = ir.NewConvExpr(arg.Pos(), origArgs[i].Op(), origArgs[i].Type(), arg)
- }
- newCallArgs = append(newCallArgs, arg)
- }
- // Also capture the function or method expression (if needed) into
- // the closure.
- if fnExpr != nil {
- callX = capName(callX.Pos(), fn, fnExpr)
- }
- if methSelectorExpr != nil {
- methSelectorExpr.X = capName(callX.Pos(), fn, methSelectorExpr.X.(*ir.Name))
- }
- ir.FinishCaptureNames(n.Pos(), outerfn, fn)
-
- // This flags a builtin as opposed to a regular call.
- irregular := (call.Op() != ir.OCALLFUNC &&
- call.Op() != ir.OCALLMETH &&
- call.Op() != ir.OCALLINTER)
-
- // Construct new function body: f(x1, y1)
- op := ir.OCALL
- if irregular {
- op = call.Op()
- }
- newcall := mkNewCall(call.Pos(), op, callX, newCallArgs)
-
- // Type-check the result.
- if !irregular {
- typecheck.Call(newcall.(*ir.CallExpr))
- } else {
- typecheck.Stmt(newcall)
- }
-
- // Finalize body, register function on the main decls list.
- fn.Body = []ir.Node{newcall}
- typecheck.FinishFuncBody()
- typecheck.Func(fn)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-
- // Create closure expr
- clo := ir.NewClosureExpr(n.Pos(), fn)
- fn.OClosure = clo
- clo.SetType(fn.Type())
-
- // Set escape properties for closure.
- if n.Op() == ir.OGO {
- // For "go", assume that the closure is going to escape
- // (with an exception for the runtime, which doesn't
- // permit heap-allocated closures).
- if base.Ctxt.Pkgpath != "runtime" {
- clo.SetEsc(ir.EscHeap)
- }
- } else {
- // For defer, just use whatever result escape analysis
- // has determined for the defer.
- if n.Esc() == ir.EscNever {
- clo.SetTransient(true)
- clo.SetEsc(ir.EscNone)
- }
- }
-
- // Create new top level call to closure over argless function.
- topcall := ir.NewCallExpr(n.Pos(), ir.OCALL, clo, []ir.Node{})
- typecheck.Call(topcall)
-
- // Tag the call to insure that directClosureCall doesn't undo our work.
- topcall.PreserveClosure = true
-
- fn.SetClosureCalled(false)
-
- // Finally, point the defer statement at the newly generated call.
- n.Call = topcall
-}
-
// isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
func isFuncPCIntrinsic(n *ir.CallExpr) bool {
if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME {
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
index 0bf76680c4..4581bca3df 100644
--- a/src/cmd/compile/internal/walk/stmt.go
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -7,7 +7,6 @@ package walk
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
- "cmd/compile/internal/typecheck"
)
// The result of walkStmt MUST be assigned back to n, e.g.
@@ -41,7 +40,6 @@ func walkStmt(n ir.Node) ir.Node {
ir.OAS2MAPR,
ir.OCLOSE,
ir.OCOPY,
- ir.OCALLMETH,
ir.OCALLINTER,
ir.OCALL,
ir.OCALLFUNC,
@@ -50,7 +48,7 @@ func walkStmt(n ir.Node) ir.Node {
ir.OPRINT,
ir.OPRINTN,
ir.OPANIC,
- ir.ORECOVER,
+ ir.ORECOVERFP,
ir.OGETG:
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
@@ -187,33 +185,28 @@ func walkFor(n *ir.ForStmt) ir.Node {
return n
}
+// validGoDeferCall reports whether call is a valid call to appear in
+// a go or defer statement; that is, whether it's a regular function
+// call without arguments or results.
+func validGoDeferCall(call ir.Node) bool {
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC && len(call.KeepAlive) == 0 {
+ sig := call.X.Type()
+ return sig.NumParams()+sig.NumResults() == 0
+ }
+ return false
+}
+
// walkGoDefer walks an OGO or ODEFER node.
func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
- var init ir.Nodes
- switch call := n.Call; call.Op() {
- case ir.OPRINT, ir.OPRINTN:
- call := call.(*ir.CallExpr)
- n.Call = wrapCall(call, &init)
-
- case ir.ODELETE:
- call := call.(*ir.CallExpr)
- n.Call = wrapCall(call, &init)
+ if !validGoDeferCall(n.Call) {
+ base.FatalfAt(n.Pos(), "invalid %v call: %v", n.Op(), n.Call)
+ }
- case ir.OCOPY:
- call := call.(*ir.BinaryExpr)
- n.Call = walkCopy(call, &init, true)
+ var init ir.Nodes
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- call := call.(*ir.CallExpr)
- if len(call.KeepAlive) > 0 {
- n.Call = wrapCall(call, &init)
- } else {
- n.Call = walkExpr(call, &init)
- }
+ call := n.Call.(*ir.CallExpr)
+ call.X = walkExpr(call.X, &init)
- default:
- n.Call = walkExpr(call, &init)
- }
if len(init) > 0 {
init.Append(n)
return ir.NewBlockStmt(n.Pos(), init)
@@ -228,110 +221,3 @@ func walkIf(n *ir.IfStmt) ir.Node {
walkStmtList(n.Else)
return n
}
-
-// Rewrite
-// go builtin(x, y, z)
-// into
-// go func(a1, a2, a3) {
-// builtin(a1, a2, a3)
-// }(x, y, z)
-// for print, println, and delete.
-//
-// Rewrite
-// go f(x, y, uintptr(unsafe.Pointer(z)))
-// into
-// go func(a1, a2, a3) {
-// f(a1, a2, uintptr(a3))
-// }(x, y, unsafe.Pointer(z))
-// for function contains unsafe-uintptr arguments.
-
-var wrapCall_prgen int
-
-// The result of wrapCall MUST be assigned back to n, e.g.
-// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- if len(n.Init()) != 0 {
- walkStmtList(n.Init())
- init.Append(ir.TakeInit(n)...)
- }
-
- isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
-
- // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
- if !isBuiltinCall && n.IsDDD {
- undoVariadic(n)
- }
-
- wrapArgs := n.Args
- // If there's a receiver argument, it needs to be passed through the wrapper too.
- if n.Op() == ir.OCALLMETH || n.Op() == ir.OCALLINTER {
- recv := n.X.(*ir.SelectorExpr).X
- wrapArgs = append([]ir.Node{recv}, wrapArgs...)
- }
-
- // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]ir.Node, len(wrapArgs))
- var funcArgs []*ir.Field
- for i, arg := range wrapArgs {
- s := typecheck.LookupNum("a", i)
- if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
- origArgs[i] = arg
- arg = arg.(*ir.ConvExpr).X
- wrapArgs[i] = arg
- }
- funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type()))
- }
- t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
-
- wrapCall_prgen++
- sym := typecheck.LookupNum("wrap·", wrapCall_prgen)
- fn := typecheck.DeclFunc(sym, t)
-
- args := ir.ParamNames(t.Type())
- for i, origArg := range origArgs {
- if origArg == nil {
- continue
- }
- args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
- }
- if n.Op() == ir.OCALLMETH || n.Op() == ir.OCALLINTER {
- // Move wrapped receiver argument back to its appropriate place.
- recv := typecheck.Expr(args[0])
- n.X.(*ir.SelectorExpr).X = recv
- args = args[1:]
- }
- call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args)
- if !isBuiltinCall {
- call.SetOp(ir.OCALL)
- call.IsDDD = n.IsDDD
- }
- fn.Body = []ir.Node{call}
-
- typecheck.FinishFuncBody()
-
- typecheck.Func(fn)
- typecheck.Stmts(fn.Body)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-
- call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, wrapArgs)
- return walkExpr(typecheck.Stmt(call), init)
-}
-
-// undoVariadic turns a call to a variadic function of the form
-//
-// f(a, b, []T{c, d, e}...)
-//
-// back into
-//
-// f(a, b, c, d, e)
-//
-func undoVariadic(call *ir.CallExpr) {
- if call.IsDDD {
- last := len(call.Args) - 1
- if va := call.Args[last]; va.Op() == ir.OSLICELIT {
- va := va.(*ir.CompLitExpr)
- call.Args = append(call.Args[:last], va.List...)
- call.IsDDD = false
- }
- }
-}
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
index 162de018f6..3705c5b192 100644
--- a/src/cmd/compile/internal/walk/switch.go
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -360,10 +360,10 @@ func walkSwitchType(sw *ir.SwitchStmt) {
}
if singleType != nil && singleType.IsInterface() {
- s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
+ s.Add(ncase.Pos(), n1, caseVar, jmp)
caseVarInitialized = true
} else {
- s.Add(ncase.Pos(), n1.Type(), nil, jmp)
+ s.Add(ncase.Pos(), n1, nil, jmp)
}
}
@@ -377,6 +377,17 @@ func walkSwitchType(sw *ir.SwitchStmt) {
}
val = ifaceData(ncase.Pos(), s.facename, singleType)
}
+ if len(ncase.List) == 1 && ncase.List[0].Op() == ir.ODYNAMICTYPE {
+ dt := ncase.List[0].(*ir.DynamicType)
+ x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.X)
+ if dt.ITab != nil {
+ // TODO: make ITab a separate field in DynamicTypeAssertExpr?
+ x.T = dt.ITab
+ }
+ x.SetType(caseVar.Type())
+ x.SetTypecheck(1)
+ val = x
+ }
l := []ir.Node{
ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
ir.NewAssignStmt(ncase.Pos(), caseVar, val),
@@ -446,7 +457,8 @@ type typeClause struct {
body ir.Nodes
}
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar *ir.Name, jmp ir.Node) {
+func (s *typeSwitch) Add(pos src.XPos, n1 ir.Node, caseVar *ir.Name, jmp ir.Node) {
+ typ := n1.Type()
var body ir.Nodes
if caseVar != nil {
l := []ir.Node{
@@ -462,9 +474,25 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar *ir.Name, jmp ir
// cv, ok = iface.(type)
as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil)
as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok =
- dot := ir.NewTypeAssertExpr(pos, s.facename, nil)
- dot.SetType(typ) // iface.(type)
- as.Rhs = []ir.Node{dot}
+ switch n1.Op() {
+ case ir.OTYPE:
+ // Static type assertion (non-generic)
+ dot := ir.NewTypeAssertExpr(pos, s.facename, nil)
+ dot.SetType(typ) // iface.(type)
+ as.Rhs = []ir.Node{dot}
+ case ir.ODYNAMICTYPE:
+ // Dynamic type assertion (generic)
+ dt := n1.(*ir.DynamicType)
+ dot := ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, s.facename, dt.X)
+ if dt.ITab != nil {
+ dot.T = dt.ITab
+ }
+ dot.SetType(typ)
+ dot.SetTypecheck(1)
+ as.Rhs = []ir.Node{dot}
+ default:
+ base.Fatalf("unhandled type case %s", n1.Op())
+ }
appendWalkStmt(&body, as)
// if ok { goto label }
@@ -473,9 +501,10 @@ func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar *ir.Name, jmp ir
nif.Body = []ir.Node{jmp}
body.Append(nif)
- if !typ.IsInterface() {
+ if n1.Op() == ir.OTYPE && !typ.IsInterface() {
+ // Defer static, noninterface cases so they can be binary searched by hash.
s.clauses = append(s.clauses, typeClause{
- hash: types.TypeHash(typ),
+ hash: types.TypeHash(n1.Type()),
body: body,
})
return
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
index 26da6e3145..6551fe7a64 100644
--- a/src/cmd/compile/internal/walk/walk.go
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -113,8 +113,7 @@ func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallEx
base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
}
- call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va)
- typecheck.Call(call)
+ call := typecheck.Call(base.Pos, fn, va, false).(*ir.CallExpr)
call.SetType(t)
return walkExpr(call, init).(*ir.CallExpr)
}
@@ -308,7 +307,7 @@ func mayCall(n ir.Node) bool {
default:
base.FatalfAt(n.Pos(), "mayCall %+v", n)
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER,
+ case ir.OCALLFUNC, ir.OCALLINTER,
ir.OUNSAFEADD, ir.OUNSAFESLICE:
return true
@@ -343,7 +342,7 @@ func mayCall(n ir.Node) bool {
ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
ir.OCONVNOP, ir.ODOT,
ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
- ir.OBYTES2STRTMP, ir.OGETG, ir.OSLICEHEADER:
+ ir.OBYTES2STRTMP, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OSLICEHEADER:
// ok: operations that don't require function calls.
// Expand as needed.
}
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
index 31b09016eb..0b2ca3fdbb 100644
--- a/src/cmd/compile/internal/wasm/ssa.go
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -24,7 +24,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zeroRange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
@@ -126,7 +125,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
s.PrepareCall(v)
if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
- // add a resume point before call to deferreturn so it can be called again via jmpdefer
+ // The runtime needs to inject jumps to
+ // deferreturn calls using the address in
+ // _func.deferreturn. Hence, the call to
+ // deferreturn must itself be a resumption
+ // point so it gets a target PC.
s.Prog(wasm.ARESUMEPOINT)
}
if v.Op == ssa.OpWasmLoweredClosureCall {
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
index 00a20e429f..5565bd32c7 100644
--- a/src/cmd/compile/internal/x86/galign.go
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -34,7 +34,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
- arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
}
diff --git a/src/cmd/go.mod b/src/cmd/go.mod
index cd03968eed..ccfff09885 100644
--- a/src/cmd/go.mod
+++ b/src/cmd/go.mod
@@ -1,6 +1,6 @@
module cmd
-go 1.17
+go 1.18
require (
github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a
@@ -10,6 +10,6 @@ require (
golang.org/x/mod v0.4.3-0.20210608190319-0f08993efd8a
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744 // indirect
golang.org/x/term v0.0.0-20210503060354-a79de5458b56
- golang.org/x/tools v0.1.2-0.20210519160823-49064d2332f9
+ golang.org/x/tools v0.1.6-0.20210809225032-337cebd2c151
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
)
diff --git a/src/cmd/go.sum b/src/cmd/go.sum
index d728acaec9..f4d41f0d10 100644
--- a/src/cmd/go.sum
+++ b/src/cmd/go.sum
@@ -5,41 +5,18 @@ github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a h1:jmAp/2PZAScNd62lTD
github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/arch v0.0.0-20210502124803-cbf565b21d1e h1:pv3V0NlNSh5Q6AX/StwGLBjcLS7UN4m4Gq+V+uSecqM=
golang.org/x/arch v0.0.0-20210502124803-cbf565b21d1e/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e h1:8foAy0aoO5GkqCvAEJ4VC4P3zksTg4X4aJCDpZzmgQI=
golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.3-0.20210608190319-0f08993efd8a h1:e8qnjKz4EE6OjRki9wTadWSIogINvq10sMcuBRORxMY=
golang.org/x/mod v0.4.3-0.20210608190319-0f08993efd8a/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744 h1:yhBbb4IRs2HS9PPlAg6DMC6mUOKexJBNsLf4Z+6En1Q=
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.2-0.20210519160823-49064d2332f9 h1:2XlR/j4I4xz5GQZI7zBjqTfezYyRIE2jD5IMousB2rg=
-golang.org/x/tools v0.1.2-0.20210519160823-49064d2332f9/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/tools v0.1.6-0.20210809225032-337cebd2c151 h1:jHjT6WuVKEMzjJgrS1+r1wk54oxwqumUnvtn0QZXyXE=
+golang.org/x/tools v0.1.6-0.20210809225032-337cebd2c151/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 6ce276537b..b13191f678 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -806,7 +806,9 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
"src/internal/abi",
"src/internal/bytealg",
"src/internal/cpu",
+ "src/internal/goarch",
"src/internal/goexperiment",
+ "src/internal/goos",
"src/math/bits",
"src/unsafe",
filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH),
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index 85da4f89f9..2ae908bc8f 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -29,6 +29,18 @@ import (
// The 'path' used for GOROOT_FINAL when -trimpath is specified
const trimPathGoRootFinal = "go"
+var runtimePackages = map[string]struct{}{
+ "internal/abi": struct{}{},
+ "internal/bytealg": struct{}{},
+ "internal/cpu": struct{}{},
+ "internal/goarch": struct{}{},
+ "internal/goos": struct{}{},
+ "runtime": struct{}{},
+ "runtime/internal/atomic": struct{}{},
+ "runtime/internal/math": struct{}{},
+ "runtime/internal/sys": struct{}{},
+}
+
// The Go toolchain.
type gcToolchain struct{}
@@ -88,11 +100,8 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg
if p.Standard {
gcargs = append(gcargs, "-std")
}
- compilingRuntime := p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal"))
- // The runtime package imports a couple of general internal packages.
- if p.Standard && (p.ImportPath == "internal/cpu" || p.ImportPath == "internal/bytealg" || p.ImportPath == "internal/abi") {
- compilingRuntime = true
- }
+ _, compilingRuntime := runtimePackages[p.ImportPath]
+ compilingRuntime = compilingRuntime && p.Standard
if compilingRuntime {
// runtime compiles with a special gc flag to check for
// memory allocations that are invalid in the runtime package,
diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go
index f0d3f8780f..9ef7676214 100644
--- a/src/cmd/gofmt/gofmt_test.go
+++ b/src/cmd/gofmt/gofmt_test.go
@@ -54,8 +54,6 @@ func gofmtFlags(filename string, maxLines int) string {
return ""
}
-var typeParamsEnabled = false
-
func runTest(t *testing.T, in, out string) {
// process flags
*simplifyAST = false
@@ -78,11 +76,6 @@ func runTest(t *testing.T, in, out string) {
case "-stdin":
// fake flag - pretend input is from stdin
stdin = true
- case "-G":
- // fake flag - test is for generic code
- if !typeParamsEnabled {
- return
- }
default:
t.Errorf("unrecognized flag name: %s", name)
}
diff --git a/src/cmd/gofmt/testdata/typeparams.golden b/src/cmd/gofmt/testdata/typeparams.golden
index 35f08d1379..f71bd130db 100644
--- a/src/cmd/gofmt/testdata/typeparams.golden
+++ b/src/cmd/gofmt/testdata/typeparams.golden
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//gofmt -G
+//gofmt
package typeparams
diff --git a/src/cmd/gofmt/testdata/typeparams.input b/src/cmd/gofmt/testdata/typeparams.input
index 7f3212c8e4..5d4c53d9f7 100644
--- a/src/cmd/gofmt/testdata/typeparams.input
+++ b/src/cmd/gofmt/testdata/typeparams.input
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//gofmt -G
+//gofmt
package typeparams
diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go
index ec441c2bcb..860c7d6c0d 100644
--- a/src/cmd/internal/dwarf/dwarf.go
+++ b/src/cmd/internal/dwarf/dwarf.go
@@ -1266,7 +1266,7 @@ func PutAbstractFunc(ctxt Context, s *FnState) error {
// its corresponding 'abstract' DIE (containing location-independent
// attributes such as name, type, etc). Inlined subroutine DIEs can
// have other inlined subroutine DIEs as children.
-func putInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error {
+func putInlinedFunc(ctxt Context, s *FnState, callIdx int) error {
ic := s.InlCalls.Calls[callIdx]
callee := ic.AbsFunSym
@@ -1277,7 +1277,7 @@ func putInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error
Uleb128put(ctxt, s.Info, int64(abbrev))
if logDwarf {
- ctxt.Logf("putInlinedFunc(caller=%v,callee=%v,abbrev=%d)\n", callersym, callee, abbrev)
+ ctxt.Logf("putInlinedFunc(callee=%v,abbrev=%d)\n", callee, abbrev)
}
// Abstract origin.
@@ -1312,8 +1312,7 @@ func putInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error
// Children of this inline.
for _, sib := range inlChildren(callIdx, &s.InlCalls) {
- absfn := s.InlCalls.Calls[sib].AbsFunSym
- err := putInlinedFunc(ctxt, s, absfn, sib)
+ err := putInlinedFunc(ctxt, s, sib)
if err != nil {
return err
}
@@ -1354,8 +1353,7 @@ func PutConcreteFunc(ctxt Context, s *FnState) error {
// Inlined subroutines.
for _, sib := range inlChildren(-1, &s.InlCalls) {
- absfn := s.InlCalls.Calls[sib].AbsFunSym
- err := putInlinedFunc(ctxt, s, absfn, sib)
+ err := putInlinedFunc(ctxt, s, sib)
if err != nil {
return err
}
@@ -1402,8 +1400,7 @@ func PutDefaultFunc(ctxt Context, s *FnState) error {
// Inlined subroutines.
for _, sib := range inlChildren(-1, &s.InlCalls) {
- absfn := s.InlCalls.Calls[sib].AbsFunSym
- err := putInlinedFunc(ctxt, s, absfn, sib)
+ err := putInlinedFunc(ctxt, s, sib)
if err != nil {
return err
}
diff --git a/src/cmd/internal/goobj/builtinlist.go b/src/cmd/internal/goobj/builtinlist.go
index 9f248137da..608c0d7222 100644
--- a/src/cmd/internal/goobj/builtinlist.go
+++ b/src/cmd/internal/goobj/builtinlist.go
@@ -33,6 +33,7 @@ var builtins = [...]struct {
{"runtime.goPanicSlice3BU", 1},
{"runtime.goPanicSlice3C", 1},
{"runtime.goPanicSlice3CU", 1},
+ {"runtime.goPanicSliceConvert", 1},
{"runtime.printbool", 1},
{"runtime.printfloat", 1},
{"runtime.printint", 1},
@@ -129,6 +130,8 @@ var builtins = [...]struct {
{"runtime.makeslice64", 1},
{"runtime.makeslicecopy", 1},
{"runtime.growslice", 1},
+ {"runtime.unsafeslice", 1},
+ {"runtime.unsafeslice64", 1},
{"runtime.memmove", 1},
{"runtime.memclrNoHeapPointers", 1},
{"runtime.memclrHasPointers", 1},
@@ -203,7 +206,9 @@ var builtins = [...]struct {
{"runtime.newproc", 1},
{"runtime.panicoverflow", 1},
{"runtime.sigpanic", 1},
- {"runtime.gcWriteBarrier", 0},
+ {"runtime.gcWriteBarrier", 1},
+ {"runtime.duffzero", 1},
+ {"runtime.duffcopy", 1},
{"runtime.morestack", 0},
{"runtime.morestackc", 0},
{"runtime.morestack_noctxt", 0},
diff --git a/src/cmd/internal/goobj/mkbuiltin.go b/src/cmd/internal/goobj/mkbuiltin.go
index 18b969586c..c9995fcede 100644
--- a/src/cmd/internal/goobj/mkbuiltin.go
+++ b/src/cmd/internal/goobj/mkbuiltin.go
@@ -151,7 +151,9 @@ var fextras = [...]extra{
{"sigpanic", 1},
// compiler backend inserted calls
- {"gcWriteBarrier", 0}, // asm function, ABI0
+ {"gcWriteBarrier", 1},
+ {"duffzero", 1},
+ {"duffcopy", 1},
// assembler backend inserted calls
{"morestack", 0}, // asm function, ABI0
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index ccf5f9e7f8..7b1682776e 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -355,11 +355,10 @@ var oprange [ALAST & obj.AMask][]Optab
var xcmp [C_GOK + 1][C_GOK + 1]bool
var (
- deferreturn *obj.LSym
- symdiv *obj.LSym
- symdivu *obj.LSym
- symmod *obj.LSym
- symmodu *obj.LSym
+ symdiv *obj.LSym
+ symdivu *obj.LSym
+ symmod *obj.LSym
+ symmodu *obj.LSym
)
// Note about encoding: Prog.scond holds the condition encoding,
@@ -1219,8 +1218,6 @@ func buildop(ctxt *obj.Link) {
return
}
- deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal)
-
symdiv = ctxt.Lookup("runtime._div")
symdivu = ctxt.Lookup("runtime._divu")
symmod = ctxt.Lookup("runtime._mod")
diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go
index e41fb3bb75..31b7c43245 100644
--- a/src/cmd/internal/obj/arm64/obj7.go
+++ b/src/cmd/internal/obj/arm64/obj7.go
@@ -52,7 +52,7 @@ var complements = []obj.As{
}
func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
- // MOV g_stackguard(g), R1
+ // MOV g_stackguard(g), RT1
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
@@ -63,7 +63,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R1
+ p.To.Reg = REGRT1
// Mark the stack bound check and morestack call async nonpreemptible.
// If we get preempted here, when resumed the preemption request is
@@ -74,25 +74,25 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
q := (*obj.Prog)(nil)
if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
- // MOV SP, R2
- // CMP stackguard, R2
+ // MOV SP, RT2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
p.From.Type = obj.TYPE_REG
p.From.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
} else if framesize <= objabi.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
- // SUB $(framesize-StackSmall), SP, R2
- // CMP stackguard, R2
+ // SUB $(framesize-StackSmall), SP, RT2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = ASUB
@@ -100,13 +100,13 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
} else {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
@@ -115,10 +115,10 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
// stack guard to incorrectly succeed. We explicitly
// guard against underflow.
//
- // SUBS $(framesize-StackSmall), SP, R2
+ // SUBS $(framesize-StackSmall), SP, RT2
// // On underflow, jump to morestack
// BLO label_of_call_to_morestack
- // CMP stackguard, R2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = ASUBS
@@ -126,7 +126,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
q = p
@@ -136,8 +136,8 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
}
// BLS do-morestack
@@ -161,17 +161,20 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog)
pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog)
+ if q != nil {
+ q.To.SetTarget(pcdata)
+ }
+ bls.To.SetTarget(pcdata)
+
+ spill := c.cursym.Func().SpillRegisterArgs(pcdata, c.newprog)
+
// MOV LR, R3
- movlr := obj.Appendp(pcdata, c.newprog)
+ movlr := obj.Appendp(spill, c.newprog)
movlr.As = AMOVD
movlr.From.Type = obj.TYPE_REG
movlr.From.Reg = REGLINK
movlr.To.Type = obj.TYPE_REG
movlr.To.Reg = REG_R3
- if q != nil {
- q.To.SetTarget(movlr)
- }
- bls.To.SetTarget(movlr)
debug := movlr
if false {
@@ -196,7 +199,8 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
}
call.To.Sym = c.ctxt.Lookup(morestack)
- pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1)
+ unspill := c.cursym.Func().UnspillRegisterArgs(call, c.newprog)
+ pcdata = c.ctxt.EndUnsafePoint(unspill, c.newprog, -1)
// B start
jmp := obj.Appendp(pcdata, c.newprog)
@@ -321,9 +325,9 @@ func (c *ctxt7) rewriteToUseGot(p *obj.Prog) {
// CALL REGTMP
var sym *obj.LSym
if p.As == obj.ADUFFZERO {
- sym = c.ctxt.Lookup("runtime.duffzero")
+ sym = c.ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
} else {
- sym = c.ctxt.Lookup("runtime.duffcopy")
+ sym = c.ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
}
offset := p.To.Offset
p.As = AMOVD
@@ -631,38 +635,38 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if c.cursym.Func().Text.From.Sym.Wrapper() {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
- // MOV g_panic(g), R1
+ // MOV g_panic(g), RT1
// CBNZ checkargp
// end:
// NOP
// ... function body ...
// checkargp:
- // MOV panic_argp(R1), R2
- // ADD $(autosize+8), RSP, R3
- // CMP R2, R3
+ // MOV panic_argp(RT1), RT2
+ // ADD $(autosize+8), RSP, R20
+ // CMP RT2, R20
// BNE end
- // ADD $8, RSP, R4
- // MOVD R4, panic_argp(R1)
+ // ADD $8, RSP, R20
+ // MOVD R20, panic_argp(RT1)
// B end
//
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not an ARM64 NOP: it encodes to 0 instruction bytes.
q = q1
- // MOV g_panic(g), R1
+ // MOV g_panic(g), RT1
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_MEM
q.From.Reg = REGG
q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R1
+ q.To.Reg = REGRT1
- // CBNZ R1, checkargp
+ // CBNZ RT1, checkargp
cbnz := obj.Appendp(q, c.newprog)
cbnz.As = ACBNZ
cbnz.From.Type = obj.TYPE_REG
- cbnz.From.Reg = REG_R1
+ cbnz.From.Reg = REGRT1
cbnz.To.Type = obj.TYPE_BRANCH
// Empty branch target at the top of the function body
@@ -674,33 +678,33 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
for last = end; last.Link != nil; last = last.Link {
}
- // MOV panic_argp(R1), R2
+ // MOV panic_argp(RT1), RT2
mov := obj.Appendp(last, c.newprog)
mov.As = AMOVD
mov.From.Type = obj.TYPE_MEM
- mov.From.Reg = REG_R1
+ mov.From.Reg = REGRT1
mov.From.Offset = 0 // Panic.argp
mov.To.Type = obj.TYPE_REG
- mov.To.Reg = REG_R2
+ mov.To.Reg = REGRT2
// CBNZ branches to the MOV above
cbnz.To.SetTarget(mov)
- // ADD $(autosize+8), SP, R3
+ // ADD $(autosize+8), SP, R20
q = obj.Appendp(mov, c.newprog)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = int64(c.autosize) + 8
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R3
+ q.To.Reg = REG_R20
- // CMP R2, R3
+ // CMP RT2, R20
q = obj.Appendp(q, c.newprog)
q.As = ACMP
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R2
- q.Reg = REG_R3
+ q.From.Reg = REGRT2
+ q.Reg = REG_R20
// BNE end
q = obj.Appendp(q, c.newprog)
@@ -708,22 +712,22 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Type = obj.TYPE_BRANCH
q.To.SetTarget(end)
- // ADD $8, SP, R4
+ // ADD $8, SP, R20
q = obj.Appendp(q, c.newprog)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = 8
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R4
+ q.To.Reg = REG_R20
- // MOV R4, panic_argp(R1)
+ // MOV R20, panic_argp(RT1)
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R4
+ q.From.Reg = REG_R20
q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R1
+ q.To.Reg = REGRT1
q.To.Offset = 0 // Panic.argp
// B end
diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go
index 24fb5a19de..01466ea736 100644
--- a/src/cmd/internal/obj/objfile.go
+++ b/src/cmd/internal/obj/objfile.go
@@ -452,6 +452,11 @@ func (w *writer) contentHash(s *LSym) goobj.HashType {
binary.LittleEndian.PutUint64(tmp[6:14], uint64(r.Add))
h.Write(tmp[:])
rs := r.Sym
+ if rs == nil {
+ fmt.Printf("symbol: %s\n", s)
+ fmt.Printf("relocation: %#v\n", r)
+ panic("nil symbol target in relocation")
+ }
switch rs.PkgIdx {
case goobj.PkgIdxHashed64:
h.Write([]byte{0})
diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go
index ceeae7a257..4d276db678 100644
--- a/src/cmd/internal/obj/wasm/wasmobj.go
+++ b/src/cmd/internal/obj/wasm/wasmobj.go
@@ -129,8 +129,6 @@ var (
morestackNoCtxt *obj.LSym
gcWriteBarrier *obj.LSym
sigpanic *obj.LSym
- deferreturn *obj.LSym
- jmpdefer *obj.LSym
)
const (
@@ -143,10 +141,6 @@ func instinit(ctxt *obj.Link) {
morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt")
gcWriteBarrier = ctxt.LookupABI("runtime.gcWriteBarrier", obj.ABIInternal)
sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal)
- deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal)
- // jmpdefer is defined in assembly as ABI0. The compiler will
- // generate a direct ABI0 call from Go, so look for that.
- jmpdefer = ctxt.LookupABI(`"".jmpdefer`, obj.ABI0)
}
func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
@@ -423,12 +417,6 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
pcAfterCall-- // sigpanic expects to be called without advancing the pc
}
- // jmpdefer manipulates the return address on the stack so deferreturn gets called repeatedly.
- // Model this in WebAssembly with a loop.
- if call.To.Sym == deferreturn {
- p = appendp(p, ALoop)
- }
-
// SP -= 8
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Const, constAddr(8))
@@ -479,15 +467,6 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
break
}
- // jmpdefer removes the frame of deferreturn from the Go stack.
- // However, its WebAssembly function still returns normally,
- // so we need to return from deferreturn without removing its
- // stack frame (no RET), because the frame is already gone.
- if call.To.Sym == jmpdefer {
- p = appendp(p, AReturn)
- break
- }
-
// return value of call is on the top of the stack, indicating whether to unwind the WebAssembly stack
if call.As == ACALLNORESUME && call.To.Sym != sigpanic { // sigpanic unwinds the stack, but it never resumes
// trying to unwind WebAssembly stack but call has no resume point, terminate with error
@@ -500,21 +479,6 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
unwindExitBranches = append(unwindExitBranches, p)
}
- // jump to before the call if jmpdefer has reset the return address to the call's PC
- if call.To.Sym == deferreturn {
- // get PC_B from -8(SP)
- p = appendp(p, AGet, regAddr(REG_SP))
- p = appendp(p, AI32Const, constAddr(8))
- p = appendp(p, AI32Sub)
- p = appendp(p, AI32Load16U, constAddr(0))
- p = appendp(p, ATee, regAddr(REG_PC_B))
-
- p = appendp(p, AI32Const, constAddr(call.Pc))
- p = appendp(p, AI32Eq)
- p = appendp(p, ABrIf, constAddr(0))
- p = appendp(p, AEnd) // end of Loop
- }
-
case obj.ARET, ARETUNWIND:
ret := *p
p.As = obj.ANOP
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 17fa76727e..331a98dfef 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -43,7 +43,6 @@ import (
var (
plan9privates *obj.LSym
- deferreturn *obj.LSym
)
// Instruction layout.
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index e2732d53e3..183ca2ebe9 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -35,7 +35,6 @@ import (
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
- "internal/buildcfg"
"log"
"math"
"path"
@@ -647,13 +646,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var regg int16
if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() {
- if ctxt.Arch.Family == sys.AMD64 && buildcfg.Experiment.RegabiG && cursym.ABI() == obj.ABIInternal {
+ if ctxt.Arch.Family == sys.AMD64 && cursym.ABI() == obj.ABIInternal {
regg = REGG // use the g register directly in ABIInternal
} else {
p = obj.Appendp(p, newprog)
regg = REG_CX
if ctxt.Arch.Family == sys.AMD64 {
- // Using this register means that stacksplit works w/ //go:registerparams even when !buildcfg.Experiment.RegabiG
regg = REGG // == REG_R14
}
p = load_g(ctxt, p, newprog, regg) // load g into regg
diff --git a/src/cmd/internal/objabi/funcid.go b/src/cmd/internal/objabi/funcid.go
index 93ebd7be94..68f6a26a76 100644
--- a/src/cmd/internal/objabi/funcid.go
+++ b/src/cmd/internal/objabi/funcid.go
@@ -34,7 +34,6 @@ const (
FuncID_gogo
FuncID_gopanic
FuncID_handleAsyncEvent
- FuncID_jmpdefer
FuncID_mcall
FuncID_morestack
FuncID_mstart
@@ -60,7 +59,6 @@ var funcIDs = map[string]FuncID{
"gogo": FuncID_gogo,
"gopanic": FuncID_gopanic,
"handleAsyncEvent": FuncID_handleAsyncEvent,
- "jmpdefer": FuncID_jmpdefer,
"main": FuncID_runtime_main,
"mcall": FuncID_mcall,
"morestack": FuncID_morestack,
@@ -74,7 +72,6 @@ var funcIDs = map[string]FuncID{
// Don't show in call stack but otherwise not special.
"deferreturn": FuncID_wrapper,
"runOpenDeferFrame": FuncID_wrapper,
- "reflectcallSave": FuncID_wrapper,
"deferCallSave": FuncID_wrapper,
}
diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go
index 416e5da398..dd5dafc21b 100644
--- a/src/cmd/link/internal/ld/deadcode.go
+++ b/src/cmd/link/internal/ld/deadcode.go
@@ -408,6 +408,9 @@ func (d *deadcodePass) decodeMethodSig(ldr *loader.Loader, arch *sys.Arch, symId
// Decode the method of interface type symbol symIdx at offset off.
func (d *deadcodePass) decodeIfaceMethod(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, off int64) methodsig {
p := ldr.Data(symIdx)
+ if p == nil {
+ panic(fmt.Sprintf("missing symbol %q", ldr.SymName(symIdx)))
+ }
if decodetypeKind(arch, p)&kindMask != kindInterface {
panic(fmt.Sprintf("symbol %q is not an interface", ldr.SymName(symIdx)))
}
diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go
index 2f59c2fe0a..543dd5caac 100644
--- a/src/cmd/link/internal/ld/dwarf_test.go
+++ b/src/cmd/link/internal/ld/dwarf_test.go
@@ -101,8 +101,11 @@ func gobuild(t *testing.T, dir string, testfile string, gcflags string) *builtFi
}
cmd := exec.Command(testenv.GoToolPath(t), "build", gcflags, "-o", dst, src)
- if b, err := cmd.CombinedOutput(); err != nil {
- t.Logf("build: %s\n", b)
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## build output:\n%s", b)
+ }
+ if err != nil {
t.Fatalf("build error: %v", err)
}
diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go
index 05fd302369..70e3e1284b 100644
--- a/src/cmd/link/internal/ld/pcln.go
+++ b/src/cmd/link/internal/ld/pcln.go
@@ -129,11 +129,10 @@ func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 {
for ri := 0; ri < relocs.Count(); ri++ {
r := relocs.At(ri)
if target.IsWasm() && r.Type() == objabi.R_ADDR {
- // Wasm does not have a live variable set at the deferreturn
- // call itself. Instead it has one identified by the
- // resumption point immediately preceding the deferreturn.
- // The wasm code has a R_ADDR relocation which is used to
- // set the resumption point to PC_B.
+ // wasm/ssa.go generates an ARESUMEPOINT just
+ // before the deferreturn call. The "PC" of
+ // the deferreturn call is stored in the
+ // R_ADDR relocation on the ARESUMEPOINT.
lastWasmAddr = uint32(r.Add())
}
if r.Type().IsDirectCall() && (r.Sym() == deferReturnSym || ldr.IsDeferReturnTramp(r.Sym())) {
diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go
index 00f557875a..1f5e333cfd 100644
--- a/src/cmd/link/internal/ld/symtab.go
+++ b/src/cmd/link/internal/ld/symtab.go
@@ -300,6 +300,7 @@ func putplan9sym(ctxt *Link, ldr *loader.Loader, s loader.Sym, char SymbolType)
ctxt.Out.Write8(uint8(t + 0x80)) /* 0x80 is variable length */
name := ldr.SymName(s)
+ name = mangleABIName(ctxt, ldr, s, name)
ctxt.Out.WriteString(name)
ctxt.Out.Write8(0)
diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go
index 7230054bed..fed9c7bc3f 100644
--- a/src/cmd/link/link_test.go
+++ b/src/cmd/link/link_test.go
@@ -545,14 +545,13 @@ const testFuncAlignSrc = `
package main
import (
"fmt"
- "reflect"
)
func alignPc()
+var alignPcFnAddr uintptr
func main() {
- addr := reflect.ValueOf(alignPc).Pointer()
- if (addr % 512) != 0 {
- fmt.Printf("expected 512 bytes alignment, got %v\n", addr)
+ if alignPcFnAddr % 512 != 0 {
+ fmt.Printf("expected 512 bytes alignment, got %v\n", alignPcFnAddr)
} else {
fmt.Printf("PASS")
}
@@ -567,6 +566,9 @@ TEXT ·alignPc(SB),NOSPLIT, $0-0
PCALIGN $512
MOVD $3, R1
RET
+
+GLOBL ·alignPcFnAddr(SB),RODATA,$8
+DATA ·alignPcFnAddr(SB)/8,$·alignPc(SB)
`
// TestFuncAlign verifies that the address of a function can be aligned
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
index eb0016b18f..7b82d0b6dd 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
@@ -51,6 +51,11 @@ type asmArch struct {
bigEndian bool
stack string
lr bool
+ // retRegs is a list of registers for return value in register ABI (ABIInternal).
+ // For now, as we only check whether we write to any result, here we only need to
+ // include the first integer register and first floating-point register. Accessing
+ // any of them counts as writing to result.
+ retRegs []string
// calculated during initialization
sizes types.Sizes
intSize int
@@ -79,8 +84,8 @@ type asmVar struct {
var (
asmArch386 = asmArch{name: "386", bigEndian: false, stack: "SP", lr: false}
asmArchArm = asmArch{name: "arm", bigEndian: false, stack: "R13", lr: true}
- asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true}
- asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false}
+ asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true, retRegs: []string{"R0", "F0"}}
+ asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false, retRegs: []string{"AX", "X0"}}
asmArchMips = asmArch{name: "mips", bigEndian: true, stack: "R29", lr: true}
asmArchMipsLE = asmArch{name: "mipsle", bigEndian: false, stack: "R29", lr: true}
asmArchMips64 = asmArch{name: "mips64", bigEndian: true, stack: "R29", lr: true}
@@ -137,7 +142,7 @@ var (
asmSP = re(`[^+\-0-9](([0-9]+)\(([A-Z0-9]+)\))`)
asmOpcode = re(`^\s*(?:[A-Z0-9a-z_]+:)?\s*([A-Z]+)\s*([^,]*)(?:,\s*(.*))?`)
ppc64Suff = re(`([BHWD])(ZU|Z|U|BR)?$`)
- abiSuff = re(`^(.+)<ABI.+>$`)
+ abiSuff = re(`^(.+)<(ABI.+)>$`)
)
func run(pass *analysis.Pass) (interface{}, error) {
@@ -185,6 +190,7 @@ Files:
var (
fn *asmFunc
fnName string
+ abi string
localSize, argSize int
wroteSP bool
noframe bool
@@ -195,18 +201,22 @@ Files:
flushRet := func() {
if fn != nil && fn.vars["ret"] != nil && !haveRetArg && len(retLine) > 0 {
v := fn.vars["ret"]
+ resultStr := fmt.Sprintf("%d-byte ret+%d(FP)", v.size, v.off)
+ if abi == "ABIInternal" {
+ resultStr = "result register"
+ }
for _, line := range retLine {
- pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %d-byte ret+%d(FP)", arch, fnName, v.size, v.off)
+ pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr)
}
}
retLine = nil
}
- trimABI := func(fnName string) string {
+ trimABI := func(fnName string) (string, string) {
m := abiSuff.FindStringSubmatch(fnName)
if m != nil {
- return m[1]
+ return m[1], m[2]
}
- return fnName
+ return fnName, ""
}
for lineno, line := range lines {
lineno++
@@ -273,11 +283,12 @@ Files:
// log.Printf("%s:%d: [%s] cannot check cross-package assembly function: %s is in package %s", fname, lineno, arch, fnName, pkgPath)
fn = nil
fnName = ""
+ abi = ""
continue
}
}
// Trim off optional ABI selector.
- fnName := trimABI(fnName)
+ fnName, abi = trimABI(fnName)
flag := m[3]
fn = knownFunc[fnName][arch]
if fn != nil {
@@ -305,6 +316,7 @@ Files:
flushRet()
fn = nil
fnName = ""
+ abi = ""
continue
}
@@ -335,6 +347,15 @@ Files:
haveRetArg = true
}
+ if abi == "ABIInternal" && !haveRetArg {
+ for _, reg := range archDef.retRegs {
+ if strings.Contains(line, reg) {
+ haveRetArg = true
+ break
+ }
+ }
+ }
+
for _, m := range asmSP.FindAllStringSubmatch(line, -1) {
if m[3] != archDef.stack || wroteSP || noframe {
continue
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
index 822820f06e..6589478af0 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
@@ -555,7 +555,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
format, idx := formatString(pass, call)
if idx < 0 {
if false {
- pass.Reportf(call.Lparen, "can't check non-constant format in call to %s", fn.Name())
+ pass.Reportf(call.Lparen, "can't check non-constant format in call to %s", fn.FullName())
}
return
}
@@ -563,7 +563,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
firstArg := idx + 1 // Arguments are immediately after format string.
if !strings.Contains(format, "%") {
if len(call.Args) > firstArg {
- pass.Reportf(call.Lparen, "%s call has arguments but no formatting directives", fn.Name())
+ pass.Reportf(call.Lparen, "%s call has arguments but no formatting directives", fn.FullName())
}
return
}
@@ -577,7 +577,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
if format[i] != '%' {
continue
}
- state := parsePrintfVerb(pass, call, fn.Name(), format[i:], firstArg, argNum)
+ state := parsePrintfVerb(pass, call, fn.FullName(), format[i:], firstArg, argNum)
if state == nil {
return
}
@@ -589,8 +589,12 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
anyIndex = true
}
if state.verb == 'w' {
- if kind != KindErrorf {
- pass.Reportf(call.Pos(), "%s call has error-wrapping directive %%w, which is only supported by Errorf", state.name)
+ switch kind {
+ case KindNone, KindPrint:
+ pass.Reportf(call.Pos(), "%s does not support error-wrapping directive %%w", state.name)
+ return
+ case KindPrintf:
+ pass.Reportf(call.Pos(), "%s call has error-wrapping directive %%w, which is only supported for functions backed by fmt.Errorf", state.name)
return
}
if anyW {
@@ -621,7 +625,7 @@ func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.F
if maxArgNum != len(call.Args) {
expect := maxArgNum - firstArg
numArgs := len(call.Args) - firstArg
- pass.ReportRangef(call, "%s call needs %v but has %v", fn.Name(), count(expect, "arg"), count(numArgs, "arg"))
+ pass.ReportRangef(call, "%s call needs %v but has %v", fn.FullName(), count(expect, "arg"), count(numArgs, "arg"))
}
}
@@ -949,7 +953,7 @@ func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) {
}
if id, ok := e.(*ast.Ident); ok {
if pass.TypesInfo.Uses[id] == sig.Recv() {
- return method.Name(), true
+ return method.FullName(), true
}
}
return "", false
@@ -1044,7 +1048,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) {
if sel, ok := call.Args[0].(*ast.SelectorExpr); ok {
if x, ok := sel.X.(*ast.Ident); ok {
if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") {
- pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", fn.Name(), analysisutil.Format(pass.Fset, call.Args[0]))
+ pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", fn.FullName(), analysisutil.Format(pass.Fset, call.Args[0]))
}
}
}
@@ -1058,7 +1062,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) {
if strings.Contains(s, "%") {
m := printFormatRE.FindStringSubmatch(s)
if m != nil {
- pass.ReportRangef(call, "%s call has possible formatting directive %s", fn.Name(), m[0])
+ pass.ReportRangef(call, "%s call has possible formatting directive %s", fn.FullName(), m[0])
}
}
}
@@ -1068,16 +1072,16 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) {
if lit, ok := arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
str, _ := strconv.Unquote(lit.Value)
if strings.HasSuffix(str, "\n") {
- pass.ReportRangef(call, "%s arg list ends with redundant newline", fn.Name())
+ pass.ReportRangef(call, "%s arg list ends with redundant newline", fn.FullName())
}
}
}
for _, arg := range args {
if isFunctionValue(pass, arg) {
- pass.ReportRangef(call, "%s arg %s is a func value, not called", fn.Name(), analysisutil.Format(pass.Fset, arg))
+ pass.ReportRangef(call, "%s arg %s is a func value, not called", fn.FullName(), analysisutil.Format(pass.Fset, arg))
}
if methodName, ok := recursiveStringer(pass, arg); ok {
- pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", fn.Name(), analysisutil.Format(pass.Fset, arg), methodName)
+ pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", fn.FullName(), analysisutil.Format(pass.Fset, arg), methodName)
}
}
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
index cf72ea990b..5fe75b14c7 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -9,6 +9,8 @@ import (
"go/ast"
"reflect"
"sort"
+
+ "golang.org/x/tools/internal/typeparams"
)
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
@@ -437,7 +439,13 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.
}
default:
- panic(fmt.Sprintf("Apply: unexpected node type %T", n))
+ if ix := typeparams.GetIndexExprData(n); ix != nil {
+ a.apply(n, "X", nil, ix.X)
+ // *ast.IndexExpr was handled above, so n must be an *ast.MultiIndexExpr.
+ a.applyList(n, "Indices")
+ } else {
+ panic(fmt.Sprintf("Apply: unexpected node type %T", n))
+ }
}
if a.post != nil && !a.post(&a.cursor) {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
index cffd7acbee..81e8fdcf0c 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -58,7 +58,7 @@ type Path string
// - The only OT operator is Object.Type,
// which we encode as '.' because dot cannot appear in an identifier.
// - The TT operators are encoded as [EKPRU].
-// - The OT operators are encoded as [AFMO];
+// - The TO operators are encoded as [AFMO];
// three of these (At,Field,Method) require an integer operand,
// which is encoded as a string of decimal digits.
// These indices are stable across different representations
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go
index ac377035ec..c1038163f1 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go
@@ -27,23 +27,23 @@ const (
// RuneRoles detects the roles of each byte rune in an input string and stores it in the output
// slice. The rune role depends on the input type. Stops when it parsed all the runes in the string
// or when it filled the output. If output is nil, then it gets created.
-func RuneRoles(str string, reuse []RuneRole) []RuneRole {
+func RuneRoles(candidate []byte, reuse []RuneRole) []RuneRole {
var output []RuneRole
- if cap(reuse) < len(str) {
- output = make([]RuneRole, 0, len(str))
+ if cap(reuse) < len(candidate) {
+ output = make([]RuneRole, 0, len(candidate))
} else {
output = reuse[:0]
}
prev, prev2 := rtNone, rtNone
- for i := 0; i < len(str); i++ {
- r := rune(str[i])
+ for i := 0; i < len(candidate); i++ {
+ r := rune(candidate[i])
role := RNone
curr := rtLower
- if str[i] <= unicode.MaxASCII {
- curr = runeType(rt[str[i]] - '0')
+ if candidate[i] <= unicode.MaxASCII {
+ curr = runeType(rt[candidate[i]] - '0')
}
if curr == rtLower {
@@ -58,7 +58,7 @@ func RuneRoles(str string, reuse []RuneRole) []RuneRole {
if prev == rtUpper {
// This and previous characters are both upper case.
- if i+1 == len(str) {
+ if i+1 == len(candidate) {
// This is last character, previous was also uppercase -> this is UCTail
// i.e., (current char is C): aBC / BC / ABC
role = RUCTail
@@ -118,11 +118,26 @@ func LastSegment(input string, roles []RuneRole) string {
return input[start+1 : end+1]
}
-// ToLower transforms the input string to lower case, which is stored in the output byte slice.
+// fromChunks copies string chunks into the given buffer.
+func fromChunks(chunks []string, buffer []byte) []byte {
+ ii := 0
+ for _, chunk := range chunks {
+ for i := 0; i < len(chunk); i++ {
+ if ii >= cap(buffer) {
+ break
+ }
+ buffer[ii] = chunk[i]
+ ii++
+ }
+ }
+ return buffer[:ii]
+}
+
+// toLower transforms the input string to lower case, which is stored in the output byte slice.
// The lower casing considers only ASCII values - non ASCII values are left unmodified.
// Stops when parsed all input or when it filled the output slice. If output is nil, then it gets
// created.
-func ToLower(input string, reuse []byte) []byte {
+func toLower(input []byte, reuse []byte) []byte {
output := reuse
if cap(reuse) < len(input) {
output = make([]byte, len(input))
@@ -130,7 +145,7 @@ func ToLower(input string, reuse []byte) []byte {
for i := 0; i < len(input); i++ {
r := rune(input[i])
- if r <= unicode.MaxASCII {
+ if input[i] <= unicode.MaxASCII {
if 'A' <= r && r <= 'Z' {
r += 'a' - 'A'
}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
index 16a643097d..265cdcf160 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
@@ -51,8 +51,12 @@ type Matcher struct {
lastCandidateLen int // in bytes
lastCandidateMatched bool
- // Here we save the last candidate in lower-case. This is basically a byte slice we reuse for
- // performance reasons, so the slice is not reallocated for every candidate.
+ // Reusable buffers to avoid allocating for every candidate.
+ // - inputBuf stores the concatenated input chunks
+ // - lowerBuf stores the last candidate in lower-case
+ // - rolesBuf stores the calculated roles for each rune in the last
+ // candidate.
+ inputBuf [MaxInputSize]byte
lowerBuf [MaxInputSize]byte
rolesBuf [MaxInputSize]RuneRole
}
@@ -72,7 +76,7 @@ func NewMatcher(pattern string) *Matcher {
m := &Matcher{
pattern: pattern,
- patternLower: ToLower(pattern, nil),
+ patternLower: toLower([]byte(pattern), nil),
}
for i, c := range m.patternLower {
@@ -88,7 +92,7 @@ func NewMatcher(pattern string) *Matcher {
m.patternShort = m.patternLower
}
- m.patternRoles = RuneRoles(pattern, nil)
+ m.patternRoles = RuneRoles([]byte(pattern), nil)
if len(pattern) > 0 {
maxCharScore := 4
@@ -102,10 +106,15 @@ func NewMatcher(pattern string) *Matcher {
// This is not designed for parallel use. Multiple candidates must be scored sequentially.
// Returns a score between 0 and 1 (0 - no match, 1 - perfect match).
func (m *Matcher) Score(candidate string) float32 {
+ return m.ScoreChunks([]string{candidate})
+}
+
+func (m *Matcher) ScoreChunks(chunks []string) float32 {
+ candidate := fromChunks(chunks, m.inputBuf[:])
if len(candidate) > MaxInputSize {
candidate = candidate[:MaxInputSize]
}
- lower := ToLower(candidate, m.lowerBuf[:])
+ lower := toLower(candidate, m.lowerBuf[:])
m.lastCandidateLen = len(candidate)
if len(m.pattern) == 0 {
@@ -174,7 +183,7 @@ func (m *Matcher) MatchedRanges() []int {
return ret
}
-func (m *Matcher) match(candidate string, candidateLower []byte) bool {
+func (m *Matcher) match(candidate []byte, candidateLower []byte) bool {
i, j := 0, 0
for ; i < len(candidateLower) && j < len(m.patternLower); i++ {
if candidateLower[i] == m.patternLower[j] {
@@ -192,7 +201,7 @@ func (m *Matcher) match(candidate string, candidateLower []byte) bool {
return true
}
-func (m *Matcher) computeScore(candidate string, candidateLower []byte) int {
+func (m *Matcher) computeScore(candidate []byte, candidateLower []byte) int {
pattLen, candLen := len(m.pattern), len(candidate)
for j := 0; j <= len(m.pattern); j++ {
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/symbol.go b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/symbol.go
new file mode 100644
index 0000000000..062f491fb5
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/lsp/fuzzy/symbol.go
@@ -0,0 +1,224 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy
+
+import (
+ "unicode"
+)
+
+// SymbolMatcher implements a fuzzy matching algorithm optimized for Go symbols
+// of the form:
+// example.com/path/to/package.object.field
+//
+// Knowing that we are matching symbols like this allows us to make the
+// following optimizations:
+// - We can incorporate right-to-left relevance directly into the score
+// calculation.
+// - We can match from right to left, discarding leading bytes if the input is
+// too long.
+// - We just take the right-most match without losing too much precision. This
+// allows us to use an O(n) algorithm.
+// - We can operate directly on chunked strings; in many cases we will
+// be storing the package path and/or package name separately from the
+// symbol or identifiers, so doing this avoids allocating strings.
+// - We can return the index of the right-most match, allowing us to trim
+// irrelevant qualification.
+//
+// This implementation is experimental, serving as a reference fast algorithm
+// to compare to the fuzzy algorithm implemented by Matcher.
+type SymbolMatcher struct {
+ // Using buffers of length 256 is both a reasonable size for most qualified
+ // symbols, and makes it easy to avoid bounds checks by using uint8 indexes.
+ pattern [256]rune
+ patternLen uint8
+ inputBuffer [256]rune // avoid allocating when considering chunks
+ roles [256]uint32 // which roles does a rune play (word start, etc.)
+ segments [256]uint8 // how many segments from the right is each rune
+}
+
+const (
+ segmentStart uint32 = 1 << iota
+ wordStart
+ separator
+)
+
+// NewSymbolMatcher creates a SymbolMatcher that may be used to match the given
+// search pattern.
+//
+// Currently this matcher only accepts case-insensitive fuzzy patterns.
+//
+// TODO(rfindley):
+// - implement smart-casing
+// - implement space-separated groups
+// - implement ', ^, and $ modifiers
+//
+// An empty pattern matches no input.
+func NewSymbolMatcher(pattern string) *SymbolMatcher {
+ m := &SymbolMatcher{}
+ for _, p := range pattern {
+ m.pattern[m.patternLen] = unicode.ToLower(p)
+ m.patternLen++
+ if m.patternLen == 255 || int(m.patternLen) == len(pattern) {
+ // break at 255 so that we can represent patternLen with a uint8.
+ break
+ }
+ }
+ return m
+}
+
+// Match looks for the right-most match of the search pattern within the symbol
+// represented by concatenating the given chunks, returning its offset and
+// score.
+//
+// If a match is found, the first return value will hold the absolute byte
+// offset within all chunks for the start of the symbol. In other words, the
+// index of the match within strings.Join(chunks, ""). If no match is found,
+// the first return value will be -1.
+//
+// The second return value will be the score of the match, which is always
+// between 0 and 1, inclusive. A score of 0 indicates no match.
+func (m *SymbolMatcher) Match(chunks []string) (int, float64) {
+ // Explicit behavior for an empty pattern.
+ //
+ // As a minor optimization, this also avoids nilness checks later on, since
+ // the compiler can prove that m != nil.
+ if m.patternLen == 0 {
+ return -1, 0
+ }
+
+ // First phase: populate the input buffer with lower-cased runes.
+ //
+ // We could also check for a forward match here, but since we'd have to write
+ // the entire input anyway this has negligible impact on performance.
+
+ var (
+ inputLen = uint8(0)
+ modifiers = wordStart | segmentStart
+ )
+
+input:
+ for _, chunk := range chunks {
+ for _, r := range chunk {
+ if r == '.' || r == '/' {
+ modifiers |= separator
+ }
+ // optimization: avoid calls to unicode.ToLower, which can't be inlined.
+ l := r
+ if r <= unicode.MaxASCII {
+ if 'A' <= r && r <= 'Z' {
+ l = r + 'a' - 'A'
+ }
+ } else {
+ l = unicode.ToLower(r)
+ }
+ if l != r {
+ modifiers |= wordStart
+ }
+ m.inputBuffer[inputLen] = l
+ m.roles[inputLen] = modifiers
+ inputLen++
+ if m.roles[inputLen-1]&separator != 0 {
+ modifiers = wordStart | segmentStart
+ } else {
+ modifiers = 0
+ }
+ // TODO: we should prefer the right-most input if it overflows, rather
+ // than the left-most as we're doing here.
+ if inputLen == 255 {
+ break input
+ }
+ }
+ }
+
+ // Second phase: find the right-most match, and count segments from the
+ // right.
+
+ var (
+ pi = uint8(m.patternLen - 1) // pattern index
+ p = m.pattern[pi] // pattern rune
+ start = -1 // start offset of match
+ rseg = uint8(0)
+ )
+ const maxSeg = 3 // maximum number of segments from the right to count, for scoring purposes.
+
+ for ii := inputLen - 1; ; ii-- {
+ r := m.inputBuffer[ii]
+ if rseg < maxSeg && m.roles[ii]&separator != 0 {
+ rseg++
+ }
+ m.segments[ii] = rseg
+ if p == r {
+ if pi == 0 {
+ start = int(ii)
+ break
+ }
+ pi--
+ p = m.pattern[pi]
+ }
+ // Don't check ii >= 0 in the loop condition: ii is a uint8.
+ if ii == 0 {
+ break
+ }
+ }
+
+ if start < 0 {
+ // no match: skip scoring
+ return -1, 0
+ }
+
+ // Third phase: find the shortest match, and compute the score.
+
+ // Score is the average score for each character.
+ //
+ // A character score is the multiple of:
+ // 1. 1.0 if the character starts a segment, .8 if the character start a
+ // mid-segment word, otherwise 0.6. This carries over to immediately
+ // following characters.
+ // 2. 1.0 if the character is part of the last segment, otherwise
+ // 1.0-.2*<segments from the right>, with a max segment count of 3.
+ //
+ // This is a very naive algorithm, but it is fast. There's lots of prior art
+ // here, and we should leverage it. For example, we could explicitly consider
+ // character distance, and exact matches of words or segments.
+ //
+ // Also note that this might not actually find the highest scoring match, as
+ // doing so could require a non-linear algorithm, depending on how the score
+ // is calculated.
+
+ pi = 0
+ p = m.pattern[pi]
+
+ const (
+ segStreak = 1.0
+ wordStreak = 0.8
+ noStreak = 0.6
+ perSegment = 0.2 // we count at most 3 segments above
+ )
+
+ streakBonus := noStreak
+ totScore := 0.0
+ for ii := uint8(start); ii < inputLen; ii++ {
+ r := m.inputBuffer[ii]
+ if r == p {
+ pi++
+ p = m.pattern[pi]
+ // Note: this could be optimized with some bit operations.
+ switch {
+ case m.roles[ii]&segmentStart != 0 && segStreak > streakBonus:
+ streakBonus = segStreak
+ case m.roles[ii]&wordStart != 0 && wordStreak > streakBonus:
+ streakBonus = wordStreak
+ }
+ totScore += streakBonus * (1.0 - float64(m.segments[ii])*perSegment)
+ if pi >= m.patternLen {
+ break
+ }
+ } else {
+ streakBonus = noStreak
+ }
+ }
+
+ return start, totScore / float64(m.patternLen)
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 0000000000..9fc6b4beb8
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,25 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams provides functions to work indirectly with type parameter
+// data stored in go/ast and go/types objects, while these API are guarded by a
+// build constraint.
+//
+// This package exists to make it easier for tools to work with generic code,
+// while also compiling against older Go versions.
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// A IndexExprData holds data from both ast.IndexExpr and the new
+// ast.MultiIndexExpr, which was introduced in Go 1.18.
+type IndexExprData struct {
+ X ast.Expr // expression
+ Lbrack token.Pos // position of "["
+ Indices []ast.Expr // index expressions
+ Rbrack token.Pos // position of "]"
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go
new file mode 100644
index 0000000000..e975e476f6
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go
@@ -0,0 +1,93 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !typeparams || !go1.18
+// +build !typeparams !go1.18
+
+package typeparams
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// NOTE: doc comments must be kept in sync with typeparams.go.
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = false
+
+// GetIndexExprData extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting IndexExprData will have exactly one
+// index expression. For an ast.MultiIndexExpr (go1.18+), it may have a
+// variable number of index expressions.
+//
+// For nodes that don't represent index expressions, GetIndexExprData returns
+// nil.
+func GetIndexExprData(n ast.Node) *IndexExprData {
+ if e, _ := n.(*ast.IndexExpr); e != nil {
+ return &IndexExprData{
+ X: e.X,
+ Lbrack: e.Lbrack,
+ Indices: []ast.Expr{e.Index},
+ Rbrack: e.Rbrack,
+ }
+ }
+ return nil
+}
+
+// ForTypeDecl extracts the (possibly nil) type parameter node list from n.
+func ForTypeDecl(*ast.TypeSpec) *ast.FieldList {
+ return nil
+}
+
+// ForFuncDecl extracts the (possibly nil) type parameter node list from n.
+func ForFuncDecl(*ast.FuncDecl) *ast.FieldList {
+ return nil
+}
+
+// ForSignature extracts the (possibly empty) type parameter object list from
+// sig.
+func ForSignature(*types.Signature) []*types.TypeName {
+ return nil
+}
+
+// IsComparable reports if iface is the comparable interface.
+func IsComparable(*types.Interface) bool {
+ return false
+}
+
+// IsConstraint reports whether iface may only be used as a type parameter
+// constraint (i.e. has a type set or is the comparable interface).
+func IsConstraint(*types.Interface) bool {
+ return false
+}
+
+// ForNamed extracts the (possibly empty) type parameter object list from
+// named.
+func ForNamed(*types.Named) []*types.TypeName {
+ return nil
+}
+
+// NamedTArgs extracts the (possibly empty) type argument list from named.
+func NamedTArgs(*types.Named) []types.Type {
+ return nil
+}
+
+// InitInferred initializes info to record inferred type information.
+func InitInferred(*types.Info) {
+}
+
+// GetInferred extracts inferred type information from info for e.
+//
+// The expression e may have an inferred type if it is an *ast.IndexExpr
+// representing partial instantiation of a generic function type for which type
+// arguments have been inferred using constraint type inference, or if it is an
+// *ast.CallExpr for which type type arguments have be inferred using both
+// constraint type inference and function argument inference.
+func GetInferred(*types.Info, ast.Expr) ([]types.Type, *types.Signature) {
+ return nil, nil
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams.go
new file mode 100644
index 0000000000..be6b0525f6
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/typeparams.go
@@ -0,0 +1,115 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build typeparams && go1.18
+// +build typeparams,go1.18
+
+package typeparams
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// NOTE: doc comments must be kept in sync with notypeparams.go.
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = true
+
+// GetIndexExprData extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting IndexExprData will have exactly one
+// index expression. For an ast.MultiIndexExpr (go1.18+), it may have a
+// variable number of index expressions.
+//
+// For nodes that don't represent index expressions, GetIndexExprData returns
+// nil.
+func GetIndexExprData(n ast.Node) *IndexExprData {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return &IndexExprData{
+ X: e.X,
+ Lbrack: e.Lbrack,
+ Indices: []ast.Expr{e.Index},
+ Rbrack: e.Rbrack,
+ }
+ case *ast.MultiIndexExpr:
+ return (*IndexExprData)(e)
+ }
+ return nil
+}
+
+// ForTypeDecl extracts the (possibly nil) type parameter node list from n.
+func ForTypeDecl(n *ast.TypeSpec) *ast.FieldList {
+ return n.TParams
+}
+
+// ForFuncDecl extracts the (possibly nil) type parameter node list from n.
+func ForFuncDecl(n *ast.FuncDecl) *ast.FieldList {
+ if n.Type != nil {
+ return n.Type.TParams
+ }
+ return nil
+}
+
+// ForSignature extracts the (possibly empty) type parameter object list from
+// sig.
+func ForSignature(sig *types.Signature) []*types.TypeName {
+ return tparamsSlice(sig.TParams())
+}
+
+// IsComparable reports if iface is the comparable interface.
+func IsComparable(iface *types.Interface) bool {
+ return iface.IsComparable()
+}
+
+// IsConstraint reports whether iface may only be used as a type parameter
+// constraint (i.e. has a type set or is the comparable interface).
+func IsConstraint(iface *types.Interface) bool {
+ return iface.IsConstraint()
+}
+
+// ForNamed extracts the (possibly empty) type parameter object list from
+// named.
+func ForNamed(named *types.Named) []*types.TypeName {
+ return tparamsSlice(named.TParams())
+}
+
+func tparamsSlice(tparams *types.TypeParams) []*types.TypeName {
+ if tparams.Len() == 0 {
+ return nil
+ }
+ result := make([]*types.TypeName, tparams.Len())
+ for i := 0; i < tparams.Len(); i++ {
+ result[i] = tparams.At(i)
+ }
+ return result
+}
+
+// NamedTArgs extracts the (possibly empty) type argument list from named.
+func NamedTArgs(named *types.Named) []types.Type {
+ return named.TArgs()
+}
+
+// InitInferred initializes info to record inferred type information.
+func InitInferred(info *types.Info) {
+ info.Inferred = make(map[ast.Expr]types.Inferred)
+}
+
+// GetInferred extracts inferred type information from info for e.
+//
+// The expression e may have an inferred type if it is an *ast.IndexExpr
+// representing partial instantiation of a generic function type for which type
+// arguments have been inferred using constraint type inference, or if it is an
+// *ast.CallExpr for which type type arguments have be inferred using both
+// constraint type inference and function argument inference.
+func GetInferred(info *types.Info, e ast.Expr) ([]types.Type, *types.Signature) {
+ if info.Inferred == nil {
+ return nil, nil
+ }
+ inf := info.Inferred[e]
+ return inf.TArgs, inf.Sig
+}
diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt
index 34dbdaf5dd..c98bdcd344 100644
--- a/src/cmd/vendor/modules.txt
+++ b/src/cmd/vendor/modules.txt
@@ -48,7 +48,7 @@ golang.org/x/sys/windows
# golang.org/x/term v0.0.0-20210503060354-a79de5458b56
## explicit; go 1.17
golang.org/x/term
-# golang.org/x/tools v0.1.2-0.20210519160823-49064d2332f9
+# golang.org/x/tools v0.1.6-0.20210809225032-337cebd2c151
## explicit; go 1.17
golang.org/x/tools/cover
golang.org/x/tools/go/analysis
@@ -92,6 +92,7 @@ golang.org/x/tools/go/types/objectpath
golang.org/x/tools/go/types/typeutil
golang.org/x/tools/internal/analysisinternal
golang.org/x/tools/internal/lsp/fuzzy
+golang.org/x/tools/internal/typeparams
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
## explicit; go 1.11
golang.org/x/xerrors
diff --git a/src/cmd/vet/testdata/print/print.go b/src/cmd/vet/testdata/print/print.go
index fca594925f..46240e87bf 100644
--- a/src/cmd/vet/testdata/print/print.go
+++ b/src/cmd/vet/testdata/print/print.go
@@ -491,10 +491,10 @@ type recursiveStringer int
func (s recursiveStringer) String() string {
_ = fmt.Sprintf("%d", s)
_ = fmt.Sprintf("%#v", s)
- _ = fmt.Sprintf("%v", s) // ERROR "Sprintf format %v with arg s causes recursive String method call"
- _ = fmt.Sprintf("%v", &s) // ERROR "Sprintf format %v with arg &s causes recursive String method call"
+ _ = fmt.Sprintf("%v", s) // ERROR "Sprintf format %v with arg s causes recursive .*String method call"
+ _ = fmt.Sprintf("%v", &s) // ERROR "Sprintf format %v with arg &s causes recursive .*String method call"
_ = fmt.Sprintf("%T", s) // ok; does not recursively call String
- return fmt.Sprintln(s) // ERROR "Sprintln arg s causes recursive call to String method"
+ return fmt.Sprintln(s) // ERROR "Sprintln arg s causes recursive call to .*String method"
}
type recursivePtrStringer int
@@ -502,7 +502,7 @@ type recursivePtrStringer int
func (p *recursivePtrStringer) String() string {
_ = fmt.Sprintf("%v", *p)
_ = fmt.Sprint(&p) // ok; prints address
- return fmt.Sprintln(p) // ERROR "Sprintln arg p causes recursive call to String method"
+ return fmt.Sprintln(p) // ERROR "Sprintln arg p causes recursive call to .*String method"
}
type BoolFormatter bool
diff --git a/src/go.mod b/src/go.mod
index 379dcf504e..1fb8cbfcbe 100644
--- a/src/go.mod
+++ b/src/go.mod
@@ -1,6 +1,6 @@
module std
-go 1.17
+go 1.18
require (
golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e
diff --git a/src/go.sum b/src/go.sum
index 6e869b96f7..b3de6c526c 100644
--- a/src/go.sum
+++ b/src/go.sum
@@ -1,15 +1,8 @@
golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e h1:8foAy0aoO5GkqCvAEJ4VC4P3zksTg4X4aJCDpZzmgQI=
golang.org/x/crypto v0.0.0-20210503195802-e9a32991a82e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744 h1:yhBbb4IRs2HS9PPlAg6DMC6mUOKexJBNsLf4Z+6En1Q=
golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f h1:yQJrRE0hDxDFmZLlRaw+3vusO4fwNHgHIjUOMO7bHYI=
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/src/go/ast/ast.go b/src/go/ast/ast.go
index 337c87fd79..b0f1330564 100644
--- a/src/go/ast/ast.go
+++ b/src/go/ast/ast.go
@@ -344,6 +344,15 @@ type (
Rbrack token.Pos // position of "]"
}
+ // A MultiIndexExpr node represents an expression followed by multiple
+ // indices.
+ MultiIndexExpr struct {
+ X Expr // expression
+ Lbrack token.Pos // position of "["
+ Indices []Expr // index expressions
+ Rbrack token.Pos // position of "]"
+ }
+
// A SliceExpr node represents an expression followed by slice indices.
SliceExpr struct {
X Expr // expression
@@ -440,6 +449,14 @@ type (
// Pointer types are represented via StarExpr nodes.
+ // A FuncType node represents a function type.
+ FuncType struct {
+ Func token.Pos // position of "func" keyword (token.NoPos if there is no "func")
+ TParams *FieldList // type parameters; or nil
+ Params *FieldList // (incoming) parameters; non-nil
+ Results *FieldList // (outgoing) results; or nil
+ }
+
// An InterfaceType node represents an interface type.
InterfaceType struct {
Interface token.Pos // position of "interface" keyword
@@ -479,6 +496,7 @@ func (x *CompositeLit) Pos() token.Pos {
func (x *ParenExpr) Pos() token.Pos { return x.Lparen }
func (x *SelectorExpr) Pos() token.Pos { return x.X.Pos() }
func (x *IndexExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *MultiIndexExpr) Pos() token.Pos { return x.X.Pos() }
func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() }
func (x *TypeAssertExpr) Pos() token.Pos { return x.X.Pos() }
func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() }
@@ -512,6 +530,7 @@ func (x *CompositeLit) End() token.Pos { return x.Rbrace + 1 }
func (x *ParenExpr) End() token.Pos { return x.Rparen + 1 }
func (x *SelectorExpr) End() token.Pos { return x.Sel.End() }
func (x *IndexExpr) End() token.Pos { return x.Rbrack + 1 }
+func (x *MultiIndexExpr) End() token.Pos { return x.Rbrack + 1 }
func (x *SliceExpr) End() token.Pos { return x.Rbrack + 1 }
func (x *TypeAssertExpr) End() token.Pos { return x.Rparen + 1 }
func (x *CallExpr) End() token.Pos { return x.Rparen + 1 }
@@ -543,6 +562,7 @@ func (*CompositeLit) exprNode() {}
func (*ParenExpr) exprNode() {}
func (*SelectorExpr) exprNode() {}
func (*IndexExpr) exprNode() {}
+func (*MultiIndexExpr) exprNode() {}
func (*SliceExpr) exprNode() {}
func (*TypeAssertExpr) exprNode() {}
func (*CallExpr) exprNode() {}
@@ -892,6 +912,16 @@ type (
Values []Expr // initial values; or nil
Comment *CommentGroup // line comments; or nil
}
+
+ // A TypeSpec node represents a type declaration (TypeSpec production).
+ TypeSpec struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Name *Ident // type name
+ TParams *FieldList // type parameters; or nil
+ Assign token.Pos // position of '=', if any
+ Type Expr // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
+ Comment *CommentGroup // line comments; or nil
+ }
)
// Pos and End implementations for spec nodes.
diff --git a/src/go/ast/ast_notypeparams.go b/src/go/ast/ast_notypeparams.go
deleted file mode 100644
index fa132fba85..0000000000
--- a/src/go/ast/ast_notypeparams.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package ast
-
-import "go/token"
-
-type (
- // A FuncType node represents a function type.
- FuncType struct {
- Func token.Pos // position of "func" keyword (token.NoPos if there is no "func")
- Params *FieldList // (incoming) parameters; non-nil
- Results *FieldList // (outgoing) results; or nil
- }
-
- // A TypeSpec node represents a type declaration (TypeSpec production).
- TypeSpec struct {
- Doc *CommentGroup // associated documentation; or nil
- Name *Ident // type name
- Assign token.Pos // position of '=', if any
- Type Expr // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
- Comment *CommentGroup // line comments; or nil
- }
-)
diff --git a/src/go/ast/ast_typeparams.go b/src/go/ast/ast_typeparams.go
deleted file mode 100644
index 24fdc5f131..0000000000
--- a/src/go/ast/ast_typeparams.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build typeparams
-// +build typeparams
-
-package ast
-
-import "go/token"
-
-type (
- // A FuncType node represents a function type.
- FuncType struct {
- Func token.Pos // position of "func" keyword (token.NoPos if there is no "func")
- TParams *FieldList // type parameters; or nil
- Params *FieldList // (incoming) parameters; non-nil
- Results *FieldList // (outgoing) results; or nil
- }
-
- // A TypeSpec node represents a type declaration (TypeSpec production).
- TypeSpec struct {
- Doc *CommentGroup // associated documentation; or nil
- Name *Ident // type name
- TParams *FieldList // type parameters; or nil
- Assign token.Pos // position of '=', if any
- Type Expr // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
- Comment *CommentGroup // line comments; or nil
- }
-
- // A ListExpr node represents a list of expressions separated by commas.
- // ListExpr nodes are used as index in IndexExpr nodes representing type
- // or function instantiations with more than one type argument.
- ListExpr struct {
- ElemList []Expr
- }
-)
-
-func (*ListExpr) exprNode() {}
-func (x *ListExpr) Pos() token.Pos {
- if len(x.ElemList) > 0 {
- return x.ElemList[0].Pos()
- }
- return token.NoPos
-}
-func (x *ListExpr) End() token.Pos {
- if len(x.ElemList) > 0 {
- return x.ElemList[len(x.ElemList)-1].End()
- }
- return token.NoPos
-}
diff --git a/src/go/ast/walk.go b/src/go/ast/walk.go
index 9224264e29..c8abc40972 100644
--- a/src/go/ast/walk.go
+++ b/src/go/ast/walk.go
@@ -4,6 +4,8 @@
package ast
+import "fmt"
+
// A Visitor's Visit method is invoked for each node encountered by Walk.
// If the result visitor w is not nil, Walk visits each of the children
// of node with the visitor w, followed by a call of w.Visit(nil).
@@ -114,6 +116,12 @@ func Walk(v Visitor, node Node) {
Walk(v, n.X)
Walk(v, n.Index)
+ case *MultiIndexExpr:
+ Walk(v, n.X)
+ for _, index := range n.Indices {
+ Walk(v, index)
+ }
+
case *SliceExpr:
Walk(v, n.X)
if n.Low != nil {
@@ -161,7 +169,9 @@ func Walk(v Visitor, node Node) {
Walk(v, n.Fields)
case *FuncType:
- walkFuncTypeParams(v, n)
+ if n.TParams != nil {
+ Walk(v, n.TParams)
+ }
if n.Params != nil {
Walk(v, n.Params)
}
@@ -316,7 +326,9 @@ func Walk(v Visitor, node Node) {
Walk(v, n.Doc)
}
Walk(v, n.Name)
- walkTypeSpecParams(v, n)
+ if n.TParams != nil {
+ Walk(v, n.TParams)
+ }
Walk(v, n.Type)
if n.Comment != nil {
Walk(v, n.Comment)
@@ -363,7 +375,7 @@ func Walk(v Visitor, node Node) {
}
default:
- walkOtherNodes(v, n)
+ panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
}
v.Visit(nil)
diff --git a/src/go/ast/walk_notypeparams.go b/src/go/ast/walk_notypeparams.go
deleted file mode 100644
index d43e13dd11..0000000000
--- a/src/go/ast/walk_notypeparams.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package ast
-
-import "fmt"
-
-func walkFuncTypeParams(v Visitor, n *FuncType) {}
-func walkTypeSpecParams(v Visitor, n *TypeSpec) {}
-
-func walkOtherNodes(v Visitor, n Node) {
- panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
-}
diff --git a/src/go/ast/walk_typeparams.go b/src/go/ast/walk_typeparams.go
deleted file mode 100644
index b6621335b8..0000000000
--- a/src/go/ast/walk_typeparams.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build typeparams
-// +build typeparams
-
-package ast
-
-import (
- "fmt"
-)
-
-func walkFuncTypeParams(v Visitor, n *FuncType) {
- if n.TParams != nil {
- Walk(v, n.TParams)
- }
-}
-
-func walkTypeSpecParams(v Visitor, n *TypeSpec) {
- if n.TParams != nil {
- Walk(v, n.TParams)
- }
-}
-
-func walkOtherNodes(v Visitor, n Node) {
- if e, ok := n.(*ListExpr); ok {
- if e != nil {
- for _, elem := range e.ElemList {
- Walk(v, elem)
- }
- }
- } else {
- panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
- }
-}
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index 45e2f25df7..80f8e1a00d 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -71,17 +71,19 @@ var depsRules = `
# No dependencies allowed for any of these packages.
NONE
< container/list, container/ring,
- internal/cfg, internal/cpu, internal/goexperiment,
+ internal/cfg, internal/cpu, internal/goarch,
+ internal/goexperiment, internal/goos,
internal/goversion, internal/nettrace,
unicode/utf8, unicode/utf16, unicode,
unsafe;
- # These packages depend only on unsafe.
- unsafe
+ # These packages depend only on internal/goarch and unsafe.
+ internal/goarch, unsafe
< internal/abi;
# RUNTIME is the core runtime group of packages, all of them very light-weight.
- internal/abi, internal/cpu, internal/goexperiment, unsafe
+ internal/abi, internal/cpu, internal/goarch,
+ internal/goexperiment, internal/goos, unsafe
< internal/bytealg
< internal/itoa
< internal/unsafeheader
diff --git a/src/go/constant/kind_string.go b/src/go/constant/kind_string.go
new file mode 100644
index 0000000000..700332511d
--- /dev/null
+++ b/src/go/constant/kind_string.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type Kind"; DO NOT EDIT.
+
+package constant
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Unknown-0]
+ _ = x[Bool-1]
+ _ = x[String-2]
+ _ = x[Int-3]
+ _ = x[Float-4]
+ _ = x[Complex-5]
+}
+
+const _Kind_name = "UnknownBoolStringIntFloatComplex"
+
+var _Kind_index = [...]uint8{0, 7, 11, 17, 20, 25, 32}
+
+func (i Kind) String() string {
+ if i < 0 || i >= Kind(len(_Kind_index)-1) {
+ return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/src/go/constant/value.go b/src/go/constant/value.go
index 78cb3f896f..014e873100 100644
--- a/src/go/constant/value.go
+++ b/src/go/constant/value.go
@@ -24,6 +24,8 @@ import (
"unicode/utf8"
)
+//go:generate stringer -type Kind
+
// Kind specifies the kind of value represented by a Value.
type Kind int
diff --git a/src/go/internal/gcimporter/gcimporter_test.go b/src/go/internal/gcimporter/gcimporter_test.go
index 3c76aafde3..286b8a6347 100644
--- a/src/go/internal/gcimporter/gcimporter_test.go
+++ b/src/go/internal/gcimporter/gcimporter_test.go
@@ -138,6 +138,7 @@ func TestVersionHandling(t *testing.T) {
skipSpecialPlatforms(t)
// This package only handles gc export data.
+ // Disable test until we put in the new export version.
if runtime.Compiler != "gc" {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
diff --git a/src/go/internal/gcimporter/iimport.go b/src/go/internal/gcimporter/iimport.go
index 76d47d08f1..dbc9b3a83e 100644
--- a/src/go/internal/gcimporter/iimport.go
+++ b/src/go/internal/gcimporter/iimport.go
@@ -41,6 +41,16 @@ func (r *intReader) uint64() uint64 {
return i
}
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ // TODO: before release, change this back to 2.
+ iexportVersionGenerics = iexportVersionPosCol
+
+ iexportVersionCurrent = iexportVersionGenerics
+)
+
const predeclReserved = 32
type itag uint64
@@ -56,6 +66,8 @@ const (
signatureType
structType
interfaceType
+ typeParamType
+ instType
)
// iImportData imports a package from the serialized package data
@@ -63,7 +75,7 @@ const (
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataReader *bufio.Reader, path string) (pkg *types.Package, err error) {
- const currentVersion = 1
+ const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
@@ -79,9 +91,13 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
version = int64(r.uint64())
switch version {
- case currentVersion, 0:
+ case /* iexportVersionGenerics, */ iexportVersionPosCol, iexportVersionGo1_11:
default:
- errorf("unknown iexport format version %d", version)
+ if version > iexportVersionGenerics {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
}
sLen := int64(r.uint64())
@@ -95,8 +111,9 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
declData := data[sLen:]
p := iimporter{
- ipath: path,
- version: int(version),
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
stringData: stringData,
stringCache: make(map[uint64]string),
@@ -172,8 +189,9 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
}
type iimporter struct {
- ipath string
- version int
+ exportVersion int64
+ ipath string
+ version int
stringData []byte
stringCache map[uint64]string
@@ -276,6 +294,9 @@ func (r *importReader) obj(name string) {
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+ case 'G':
+ errorf("unexpected parameterized function/method")
+
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
@@ -297,6 +318,9 @@ func (r *importReader) obj(name string) {
}
}
+ case 'U':
+ errorf("unexpected parameterized type")
+
case 'V':
typ := r.typ()
@@ -549,6 +573,14 @@ func (r *importReader) doType(base *types.Named) types.Type {
typ := types.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
+
+ case typeParamType:
+ errorf("do not handle type param types yet")
+ return nil
+
+ case instType:
+ errorf("do not handle instantiated types yet")
+ return nil
}
}
diff --git a/src/go/internal/gcimporter/support.go b/src/go/internal/gcimporter/support.go
index b8bb14dc49..09810dd85b 100644
--- a/src/go/internal/gcimporter/support.go
+++ b/src/go/internal/gcimporter/support.go
@@ -122,6 +122,9 @@ var predeclared = []types.Type{
// used internally by gc; never used by this package or in .a files
anyType{},
+
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
}
type anyType struct{}
diff --git a/src/go/internal/typeparams/notypeparams.go b/src/go/internal/typeparams/notypeparams.go
deleted file mode 100644
index 2ceafaac1c..0000000000
--- a/src/go/internal/typeparams/notypeparams.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package typeparams
-
-import (
- "go/ast"
-)
-
-const Enabled = false
-
-func PackExpr(list []ast.Expr) ast.Expr {
- switch len(list) {
- case 1:
- return list[0]
- default:
- // The parser should not attempt to pack multiple expressions into an
- // IndexExpr if type params are disabled.
- panic("multiple index expressions are unsupported without type params")
- }
-}
-
-func UnpackExpr(expr ast.Expr) []ast.Expr {
- return []ast.Expr{expr}
-}
-
-func IsListExpr(n ast.Node) bool {
- return false
-}
-
-func Get(ast.Node) *ast.FieldList {
- return nil
-}
-
-func Set(node ast.Node, params *ast.FieldList) {
-}
diff --git a/src/go/internal/typeparams/typeparams.go b/src/go/internal/typeparams/typeparams.go
index 871e95d998..3191654d4f 100644
--- a/src/go/internal/typeparams/typeparams.go
+++ b/src/go/internal/typeparams/typeparams.go
@@ -2,48 +2,57 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build typeparams
-// +build typeparams
-
package typeparams
import (
"fmt"
"go/ast"
+ "go/token"
)
-const Enabled = true
-
-func PackExpr(list []ast.Expr) ast.Expr {
- switch len(list) {
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, exprs []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(exprs) {
case 0:
- // Return an empty ListExpr here, rather than nil, as IndexExpr.Index must
- // never be nil.
- // TODO(rFindley) would a BadExpr be more appropriate here?
- return &ast.ListExpr{}
+ panic("internal error: PackIndexExpr with empty expr slice")
case 1:
- return list[0]
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: exprs[0],
+ Rbrack: rbrack,
+ }
default:
- return &ast.ListExpr{ElemList: list}
+ return &ast.MultiIndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: exprs,
+ Rbrack: rbrack,
+ }
}
}
-// TODO(gri) Should find a more efficient solution that doesn't
-// require introduction of a new slice for simple
-// expressions.
-func UnpackExpr(x ast.Expr) []ast.Expr {
- if x, _ := x.(*ast.ListExpr); x != nil {
- return x.ElemList
- }
- if x != nil {
- return []ast.Expr{x}
- }
- return nil
+// IndexExpr wraps an ast.IndexExpr or ast.MultiIndexExpr into the
+// MultiIndexExpr interface.
+//
+// Orig holds the original ast.Expr from which this IndexExpr was derived.
+type IndexExpr struct {
+ Orig ast.Expr // the wrapped expr, which may be distinct from MultiIndexExpr below.
+ *ast.MultiIndexExpr
}
-func IsListExpr(n ast.Node) bool {
- _, ok := n.(*ast.ListExpr)
- return ok
+func UnpackIndexExpr(n ast.Node) *IndexExpr {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return &IndexExpr{e, &ast.MultiIndexExpr{
+ X: e.X,
+ Lbrack: e.Lbrack,
+ Indices: []ast.Expr{e.Index},
+ Rbrack: e.Rbrack,
+ }}
+ case *ast.MultiIndexExpr:
+ return &IndexExpr{e, e}
+ }
+ return nil
}
func Get(n ast.Node) *ast.FieldList {
diff --git a/src/go/parser/error_test.go b/src/go/parser/error_test.go
index f4f0a5240a..f35ba0b501 100644
--- a/src/go/parser/error_test.go
+++ b/src/go/parser/error_test.go
@@ -186,16 +186,14 @@ func TestErrors(t *testing.T) {
}
for _, d := range list {
name := d.Name()
- if !d.IsDir() && !strings.HasPrefix(name, ".") && (strings.HasSuffix(name, ".src") || strings.HasSuffix(name, ".go2")) {
- mode := DeclarationErrors | AllErrors
- if strings.HasSuffix(name, ".go2") {
- if !typeparams.Enabled {
- continue
+ t.Run(name, func(t *testing.T) {
+ if !d.IsDir() && !strings.HasPrefix(name, ".") && (strings.HasSuffix(name, ".src") || strings.HasSuffix(name, ".go2")) {
+ mode := DeclarationErrors | AllErrors
+ if !strings.HasSuffix(name, ".go2") {
+ mode |= typeparams.DisallowParsing
}
- } else {
- mode |= typeparams.DisallowParsing
+ checkErrors(t, filepath.Join(testdata, name), nil, mode, true)
}
- checkErrors(t, filepath.Join(testdata, name), nil, mode, true)
- }
+ })
}
}
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index f10c8650af..bdc2ad308c 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -77,7 +77,7 @@ func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mod
}
func (p *parser) parseTypeParams() bool {
- return typeparams.Enabled && p.mode&typeparams.DisallowParsing == 0
+ return p.mode&typeparams.DisallowParsing == 0
}
// ----------------------------------------------------------------------------
@@ -600,7 +600,7 @@ func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Ex
}
// x[P], x[P1, P2], ...
- return nil, &ast.IndexExpr{X: x, Lbrack: lbrack, Index: typeparams.PackExpr(args), Rbrack: rbrack}
+ return nil, typeparams.PackIndexExpr(x, lbrack, args, rbrack)
}
func (p *parser) parseFieldDecl() *ast.Field {
@@ -980,6 +980,7 @@ func (p *parser) parseMethodSpec() *ast.Field {
list := []ast.Expr{x}
if p.atComma("type argument list", token.RBRACK) {
p.exprLev++
+ p.next()
for p.tok != token.RBRACK && p.tok != token.EOF {
list = append(list, p.parseType())
if !p.atComma("type argument list", token.RBRACK) {
@@ -990,7 +991,7 @@ func (p *parser) parseMethodSpec() *ast.Field {
p.exprLev--
}
rbrack := p.expectClosing(token.RBRACK, "type argument list")
- typ = &ast.IndexExpr{X: ident, Lbrack: lbrack, Index: typeparams.PackExpr(list), Rbrack: rbrack}
+ typ = typeparams.PackIndexExpr(ident, lbrack, list, rbrack)
}
case p.tok == token.LPAREN:
// ordinary method
@@ -1011,11 +1012,56 @@ func (p *parser) parseMethodSpec() *ast.Field {
typ = p.parseTypeInstance(typ)
}
}
- p.expectSemi() // call before accessing p.linecomment
- spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
+ // Comment is added at the callsite: the field below may joined with
+ // additional type specs using '|'.
+ // TODO(rfindley) this should be refactored.
+ // TODO(rfindley) add more tests for comment handling.
+ return &ast.Field{Doc: doc, Names: idents, Type: typ}
+}
- return spec
+func (p *parser) embeddedElem(f *ast.Field) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "EmbeddedElem"))
+ }
+ if f == nil {
+ f = new(ast.Field)
+ f.Type = p.embeddedTerm()
+ }
+ for p.tok == token.OR {
+ t := new(ast.BinaryExpr)
+ t.OpPos = p.pos
+ t.Op = token.OR
+ p.next()
+ t.X = f.Type
+ t.Y = p.embeddedTerm()
+ f.Type = t
+ }
+ return f
+}
+
+func (p *parser) embeddedTerm() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "EmbeddedTerm"))
+ }
+ if p.tok == token.TILDE {
+ t := new(ast.UnaryExpr)
+ t.OpPos = p.pos
+ t.Op = token.TILDE
+ p.next()
+ t.X = p.parseType()
+ return t
+ }
+
+ t := p.tryIdentOrType()
+ if t == nil {
+ pos := p.pos
+ p.errorExpected(pos, "~ term or type")
+ p.advance(exprEnd)
+ return &ast.BadExpr{From: pos, To: p.pos}
+ }
+
+ return t
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
@@ -1025,11 +1071,28 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType {
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
+
var list []*ast.Field
- for p.tok == token.IDENT || p.parseTypeParams() && p.tok == token.TYPE {
- if p.tok == token.IDENT {
- list = append(list, p.parseMethodSpec())
- } else {
+
+parseElements:
+ for {
+ switch {
+ case p.tok == token.IDENT:
+ f := p.parseMethodSpec()
+ if f.Names == nil && p.parseTypeParams() {
+ f = p.embeddedElem(f)
+ }
+ p.expectSemi()
+ f.Comment = p.lineComment
+ list = append(list, f)
+ case p.tok == token.TILDE && p.parseTypeParams():
+ f := p.embeddedElem(nil)
+ p.expectSemi()
+ f.Comment = p.lineComment
+ list = append(list, f)
+ case p.tok == token.TYPE && p.parseTypeParams():
+ // TODO(rfindley): remove TypeList syntax and refactor the clauses above.
+
// all types in a type list share the same field name "type"
// (since type is a keyword, a Go program cannot have that field name)
name := []*ast.Ident{{NamePos: p.pos, Name: "type"}}
@@ -1039,8 +1102,22 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType {
list = append(list, &ast.Field{Names: name, Type: typ})
}
p.expectSemi()
+ case p.parseTypeParams():
+ if t := p.tryIdentOrType(); t != nil {
+ f := new(ast.Field)
+ f.Type = t
+ f = p.embeddedElem(f)
+ p.expectSemi()
+ f.Comment = p.lineComment
+ list = append(list, f)
+ } else {
+ break parseElements
+ }
+ default:
+ break parseElements
}
}
+
// TODO(rfindley): the error produced here could be improved, since we could
// accept a identifier, 'type', or a '}' at this point.
rbrace := p.expect(token.RBRACE)
@@ -1101,7 +1178,6 @@ func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
}
opening := p.expect(token.LBRACK)
-
p.exprLev++
var list []ast.Expr
for p.tok != token.RBRACK && p.tok != token.EOF {
@@ -1115,7 +1191,17 @@ func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
closing := p.expectClosing(token.RBRACK, "type argument list")
- return &ast.IndexExpr{X: typ, Lbrack: opening, Index: typeparams.PackExpr(list), Rbrack: closing}
+ if len(list) == 0 {
+ p.errorExpected(closing, "type argument list")
+ return &ast.IndexExpr{
+ X: typ,
+ Lbrack: opening,
+ Index: &ast.BadExpr{From: opening + 1, To: closing},
+ Rbrack: closing,
+ }
+ }
+
+ return typeparams.PackIndexExpr(typ, opening, list, closing)
}
func (p *parser) tryIdentOrType() ast.Expr {
@@ -1378,7 +1464,7 @@ func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
}
// instance expression
- return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: typeparams.PackExpr(args), Rbrack: rbrack}
+ return typeparams.PackIndexExpr(x, lbrack, args, rbrack)
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
@@ -1480,6 +1566,7 @@ func (p *parser) checkExpr(x ast.Expr) ast.Expr {
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
+ case *ast.MultiIndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
@@ -1569,7 +1656,7 @@ func (p *parser) parsePrimaryExpr() (x ast.Expr) {
return
}
// x is possibly a composite literal type
- case *ast.IndexExpr:
+ case *ast.IndexExpr, *ast.MultiIndexExpr:
if p.exprLev < 0 {
return
}
diff --git a/src/go/parser/resolver_test.go b/src/go/parser/resolver_test.go
index 625c009c91..0c06c592d5 100644
--- a/src/go/parser/resolver_test.go
+++ b/src/go/parser/resolver_test.go
@@ -41,11 +41,7 @@ func TestResolution(t *testing.T) {
path := filepath.Join(dir, fi.Name())
src := readFile(path) // panics on failure
var mode Mode
- if strings.HasSuffix(path, ".go2") {
- if !typeparams.Enabled {
- t.Skip("type params are not enabled")
- }
- } else {
+ if !strings.HasSuffix(path, ".go2") {
mode |= typeparams.DisallowParsing
}
file, err := ParseFile(fset, path, src, mode)
diff --git a/src/go/parser/short_test.go b/src/go/parser/short_test.go
index 67fef15665..bfc6f6714b 100644
--- a/src/go/parser/short_test.go
+++ b/src/go/parser/short_test.go
@@ -133,9 +133,6 @@ func TestValid(t *testing.T) {
}
})
t.Run("tparams", func(t *testing.T) {
- if !typeparams.Enabled {
- t.Skip("type params are not enabled")
- }
for _, src := range valids {
checkErrors(t, src, src, DeclarationErrors|AllErrors, false)
}
@@ -200,10 +197,12 @@ var invalids = []string{
`package p; func (type /* ERROR "found 'type'" */ T)(T) _()`,
`package p; type _[A+B, /* ERROR "expected ']'" */ ] int`,
- // TODO: this error should be positioned on the ':'
+ // TODO(rfindley): this error should be positioned on the ':'
`package p; var a = a[[]int:[ /* ERROR "expected expression" */ ]int];`,
- // TODO: the compiler error is better here: "cannot parenthesize embedded type"
- `package p; type I1 interface{}; type I2 interface{ (/* ERROR "expected '}', found '\('" */ I1) }`,
+
+ // TODO(rfindley): the compiler error is better here: "cannot parenthesize embedded type"
+ // TODO(rfindley): confirm that parenthesized types should now be accepted.
+ // `package p; type I1 interface{}; type I2 interface{ (/* ERROR "expected '}', found '\('" */ I1) }`,
// issue 8656
`package p; func f() (a b string /* ERROR "missing ','" */ , ok bool)`,
@@ -266,9 +265,6 @@ func TestInvalid(t *testing.T) {
}
})
t.Run("tparams", func(t *testing.T) {
- if !typeparams.Enabled {
- t.Skip("type params are not enabled")
- }
for _, src := range invalids {
checkErrors(t, src, src, DeclarationErrors|AllErrors, true)
}
diff --git a/src/go/parser/testdata/interface.go2 b/src/go/parser/testdata/interface.go2
new file mode 100644
index 0000000000..b399d75148
--- /dev/null
+++ b/src/go/parser/testdata/interface.go2
@@ -0,0 +1,80 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for interfaces containing
+// constraint elements.
+//
+// For now, we accept both ordinary type lists and the
+// more complex constraint elements.
+
+package p
+
+type _ interface {
+ m()
+ type int
+ type int, string
+ E
+}
+
+type _ interface {
+ m()
+ ~int
+ int | string
+ int | ~string
+ ~int | ~string
+}
+
+type _ interface {
+ m()
+ ~int
+ T[int, string] | string
+ int | ~T[string, struct{}]
+ ~int | ~string
+ type bool, int, float64
+}
+
+type _ interface {
+ int
+ []byte
+ [10]int
+ struct{}
+ *int
+ func()
+ interface{}
+ map[string]int
+ chan T
+ chan<- T
+ <-chan T
+ T[int]
+}
+
+type _ interface {
+ int | string
+ []byte | string
+ [10]int | string
+ struct{} | string
+ *int | string
+ func() | string
+ interface{} | string
+ map[string]int | string
+ chan T | string
+ chan<- T | string
+ <-chan T | string
+ T[int] | string
+}
+
+type _ interface {
+ ~int | string
+ ~[]byte | string
+ ~[10]int | string
+ ~struct{} | string
+ ~*int | string
+ ~func() | string
+ ~interface{} | string
+ ~map[string]int | string
+ ~chan T | string
+ ~chan<- T | string
+ ~<-chan T | string
+ ~T[int] | string
+}
diff --git a/src/go/printer/nodes.go b/src/go/printer/nodes.go
index 913281ea6c..239fcbde1c 100644
--- a/src/go/printer/nodes.go
+++ b/src/go/printer/nodes.go
@@ -871,17 +871,15 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
// TODO(gri): should treat[] like parentheses and undo one level of depth
p.expr1(x.X, token.HighestPrec, 1)
p.print(x.Lbrack, token.LBRACK)
- // Note: we're a bit defensive here to handle the case of a ListExpr of
- // length 1.
- if list := typeparams.UnpackExpr(x.Index); len(list) > 0 {
- if len(list) > 1 {
- p.exprList(x.Lbrack, list, depth+1, commaTerm, x.Rbrack, false)
- } else {
- p.expr0(list[0], depth+1)
- }
- } else {
- p.expr0(x.Index, depth+1)
- }
+ p.expr0(x.Index, depth+1)
+ p.print(x.Rbrack, token.RBRACK)
+
+ case *ast.MultiIndexExpr:
+ // TODO(gri): as for IndexExpr, should treat [] like parentheses and undo
+ // one level of depth
+ p.expr1(x.X, token.HighestPrec, 1)
+ p.print(x.Lbrack, token.LBRACK)
+ p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false)
p.print(x.Rbrack, token.RBRACK)
case *ast.SliceExpr:
diff --git a/src/go/printer/printer_test.go b/src/go/printer/printer_test.go
index 20c97b8c08..ff8be4ae97 100644
--- a/src/go/printer/printer_test.go
+++ b/src/go/printer/printer_test.go
@@ -10,7 +10,6 @@ import (
"flag"
"fmt"
"go/ast"
- "go/internal/typeparams"
"go/parser"
"go/token"
"io"
@@ -222,9 +221,6 @@ var data = []entry{
func TestFiles(t *testing.T) {
t.Parallel()
for _, e := range data {
- if !typeparams.Enabled && e.mode&allowTypeParams != 0 {
- continue
- }
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
mode := e.mode
diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go
index f08e28cdd6..ca4b5264cf 100644
--- a/src/go/scanner/scanner.go
+++ b/src/go/scanner/scanner.go
@@ -969,6 +969,8 @@ scanAgain:
}
case '|':
tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
+ case '~':
+ tok = token.TILDE
default:
// next reports unexpected BOMs - don't repeat
if ch != bom {
diff --git a/src/go/scanner/scanner_test.go b/src/go/scanner/scanner_test.go
index db123c32e0..de45e16606 100644
--- a/src/go/scanner/scanner_test.go
+++ b/src/go/scanner/scanner_test.go
@@ -40,7 +40,7 @@ type elt struct {
class int
}
-var tokens = [...]elt{
+var tokens = []elt{
// Special tokens
{token.COMMENT, "/* a comment */", special},
{token.COMMENT, "// a comment \n", special},
@@ -149,6 +149,7 @@ var tokens = [...]elt{
{token.RBRACE, "}", operator},
{token.SEMICOLON, ";", operator},
{token.COLON, ":", operator},
+ {token.TILDE, "~", operator},
// Keywords
{token.BREAK, "break", keyword},
diff --git a/src/go/token/token.go b/src/go/token/token.go
index 96a1079ec3..d22e575661 100644
--- a/src/go/token/token.go
+++ b/src/go/token/token.go
@@ -125,6 +125,11 @@ const (
TYPE
VAR
keyword_end
+
+ additional_beg
+ // additional tokens, handled in an ad-hoc manner
+ TILDE
+ additional_end
)
var tokens = [...]string{
@@ -225,6 +230,8 @@ var tokens = [...]string{
SWITCH: "switch",
TYPE: "type",
VAR: "var",
+
+ TILDE: "~",
}
// String returns the string corresponding to the token tok.
@@ -304,7 +311,9 @@ func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_en
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
-func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
+func (tok Token) IsOperator() bool {
+ return (operator_beg < tok && tok < operator_end) || tok == TILDE
+}
// IsKeyword returns true for tokens corresponding to keywords;
// it returns false otherwise.
diff --git a/src/go/types/api.go b/src/go/types/api.go
index 8c0d9d22bf..315f77f362 100644
--- a/src/go/types/api.go
+++ b/src/go/types/api.go
@@ -34,6 +34,8 @@ import (
"go/token"
)
+const allowTypeLists = false
+
// An Error describes a type-checking error; it implements the error interface.
// A "soft" error is an error that still permits a valid interpretation of a
// package (such as "unused variable"); "hard" errors may lead to unpredictable
@@ -101,12 +103,12 @@ type ImporterFrom interface {
// A Config specifies the configuration for type checking.
// The zero value for Config is a ready-to-use default configuration.
type Config struct {
- // goVersion describes the accepted Go language version. The string
+ // GoVersion describes the accepted Go language version. The string
// must follow the format "go%d.%d" (e.g. "go1.12") or it must be
// empty; an empty string indicates the latest language version.
// If the format is invalid, invoking the type checker will cause a
// panic.
- goVersion string
+ GoVersion string
// If IgnoreFuncBodies is set, function bodies are not
// type-checked.
@@ -160,7 +162,101 @@ func srcimporter_setUsesCgo(conf *Config) {
conf.go115UsesCgo = true
}
-// The Info struct is found in api_notypeparams.go and api_typeparams.go.
+// Info holds result type information for a type-checked package.
+// Only the information for which a map is provided is collected.
+// If the package has type errors, the collected information may
+// be incomplete.
+type Info struct {
+ // Types maps expressions to their types, and for constant
+ // expressions, also their values. Invalid expressions are
+ // omitted.
+ //
+ // For (possibly parenthesized) identifiers denoting built-in
+ // functions, the recorded signatures are call-site specific:
+ // if the call result is not a constant, the recorded type is
+ // an argument-specific signature. Otherwise, the recorded type
+ // is invalid.
+ //
+ // The Types map does not record the type of every identifier,
+ // only those that appear where an arbitrary expression is
+ // permitted. For instance, the identifier f in a selector
+ // expression x.f is found only in the Selections map, the
+ // identifier z in a variable declaration 'var z int' is found
+ // only in the Defs map, and identifiers denoting packages in
+ // qualified identifiers are collected in the Uses map.
+ Types map[ast.Expr]TypeAndValue
+
+ // Inferred maps calls of parameterized functions that use
+ // type inference to the inferred type arguments and signature
+ // of the function called. The recorded "call" expression may be
+ // an *ast.CallExpr (as in f(x)), or an *ast.IndexExpr (s in f[T]).
+ Inferred map[ast.Expr]Inferred
+
+ // Defs maps identifiers to the objects they define (including
+ // package names, dots "." of dot-imports, and blank "_" identifiers).
+ // For identifiers that do not denote objects (e.g., the package name
+ // in package clauses, or symbolic variables t in t := x.(type) of
+ // type switch headers), the corresponding objects are nil.
+ //
+ // For an embedded field, Defs returns the field *Var it defines.
+ //
+ // Invariant: Defs[id] == nil || Defs[id].Pos() == id.Pos()
+ Defs map[*ast.Ident]Object
+
+ // Uses maps identifiers to the objects they denote.
+ //
+ // For an embedded field, Uses returns the *TypeName it denotes.
+ //
+ // Invariant: Uses[id].Pos() != id.Pos()
+ Uses map[*ast.Ident]Object
+
+ // Implicits maps nodes to their implicitly declared objects, if any.
+ // The following node and object types may appear:
+ //
+ // node declared object
+ //
+ // *ast.ImportSpec *PkgName for imports without renames
+ // *ast.CaseClause type-specific *Var for each type switch case clause (incl. default)
+ // *ast.Field anonymous parameter *Var (incl. unnamed results)
+ //
+ Implicits map[ast.Node]Object
+
+ // Selections maps selector expressions (excluding qualified identifiers)
+ // to their corresponding selections.
+ Selections map[*ast.SelectorExpr]*Selection
+
+ // Scopes maps ast.Nodes to the scopes they define. Package scopes are not
+ // associated with a specific node but with all files belonging to a package.
+ // Thus, the package scope can be found in the type-checked Package object.
+ // Scopes nest, with the Universe scope being the outermost scope, enclosing
+ // the package scope, which contains (one or more) files scopes, which enclose
+ // function scopes which in turn enclose statement and function literal scopes.
+ // Note that even though package-level functions are declared in the package
+ // scope, the function scopes are embedded in the file scope of the file
+ // containing the function declaration.
+ //
+ // The following node types may appear in Scopes:
+ //
+ // *ast.File
+ // *ast.FuncType
+ // *ast.BlockStmt
+ // *ast.IfStmt
+ // *ast.SwitchStmt
+ // *ast.TypeSwitchStmt
+ // *ast.CaseClause
+ // *ast.CommClause
+ // *ast.ForStmt
+ // *ast.RangeStmt
+ //
+ Scopes map[ast.Node]*Scope
+
+ // InitOrder is the list of package-level initializers in the order in which
+ // they must be executed. Initializers referring to variables related by an
+ // initialization dependency appear in topological order, the others appear
+ // in source order. Variables without an initialization expression do not
+ // appear in this list.
+ InitOrder []*Initializer
+}
// TypeOf returns the type of expression e, or nil if not found.
// Precondition: the Types, Uses and Defs maps are populated.
@@ -252,10 +348,10 @@ func (tv TypeAndValue) HasOk() bool {
return tv.mode == commaok || tv.mode == mapindex
}
-// _Inferred reports the _Inferred type arguments and signature
+// Inferred reports the Inferred type arguments and signature
// for a parameterized function call that uses type inference.
-type _Inferred struct {
- Targs []Type
+type Inferred struct {
+ TArgs []Type
Sig *Signature
}
@@ -324,11 +420,11 @@ func Implements(V Type, T *Interface) bool {
// Identical reports whether x and y are identical types.
// Receivers of Signature types are ignored.
func Identical(x, y Type) bool {
- return (*Checker)(nil).identical(x, y)
+ return identical(x, y, true, nil)
}
// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
// Receivers of Signature types are ignored.
func IdenticalIgnoreTags(x, y Type) bool {
- return (*Checker)(nil).identicalIgnoreTags(x, y)
+ return identical(x, y, false, nil)
}
diff --git a/src/go/types/api_notypeparams.go b/src/go/types/api_notypeparams.go
deleted file mode 100644
index 9f7cb7eccf..0000000000
--- a/src/go/types/api_notypeparams.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package types
-
-import "go/ast"
-
-// Info holds result type information for a type-checked package.
-// Only the information for which a map is provided is collected.
-// If the package has type errors, the collected information may
-// be incomplete.
-type Info struct {
- // Types maps expressions to their types, and for constant
- // expressions, also their values. Invalid expressions are
- // omitted.
- //
- // For (possibly parenthesized) identifiers denoting built-in
- // functions, the recorded signatures are call-site specific:
- // if the call result is not a constant, the recorded type is
- // an argument-specific signature. Otherwise, the recorded type
- // is invalid.
- //
- // The Types map does not record the type of every identifier,
- // only those that appear where an arbitrary expression is
- // permitted. For instance, the identifier f in a selector
- // expression x.f is found only in the Selections map, the
- // identifier z in a variable declaration 'var z int' is found
- // only in the Defs map, and identifiers denoting packages in
- // qualified identifiers are collected in the Uses map.
- Types map[ast.Expr]TypeAndValue
-
- // Defs maps identifiers to the objects they define (including
- // package names, dots "." of dot-imports, and blank "_" identifiers).
- // For identifiers that do not denote objects (e.g., the package name
- // in package clauses, or symbolic variables t in t := x.(type) of
- // type switch headers), the corresponding objects are nil.
- //
- // For an embedded field, Defs returns the field *Var it defines.
- //
- // Invariant: Defs[id] == nil || Defs[id].Pos() == id.Pos()
- Defs map[*ast.Ident]Object
-
- // Uses maps identifiers to the objects they denote.
- //
- // For an embedded field, Uses returns the *TypeName it denotes.
- //
- // Invariant: Uses[id].Pos() != id.Pos()
- Uses map[*ast.Ident]Object
-
- // Implicits maps nodes to their implicitly declared objects, if any.
- // The following node and object types may appear:
- //
- // node declared object
- //
- // *ast.ImportSpec *PkgName for imports without renames
- // *ast.CaseClause type-specific *Var for each type switch case clause (incl. default)
- // *ast.Field anonymous parameter *Var (incl. unnamed results)
- //
- Implicits map[ast.Node]Object
-
- // Selections maps selector expressions (excluding qualified identifiers)
- // to their corresponding selections.
- Selections map[*ast.SelectorExpr]*Selection
-
- // Scopes maps ast.Nodes to the scopes they define. Package scopes are not
- // associated with a specific node but with all files belonging to a package.
- // Thus, the package scope can be found in the type-checked Package object.
- // Scopes nest, with the Universe scope being the outermost scope, enclosing
- // the package scope, which contains (one or more) files scopes, which enclose
- // function scopes which in turn enclose statement and function literal scopes.
- // Note that even though package-level functions are declared in the package
- // scope, the function scopes are embedded in the file scope of the file
- // containing the function declaration.
- //
- // The following node types may appear in Scopes:
- //
- // *ast.File
- // *ast.FuncType
- // *ast.BlockStmt
- // *ast.IfStmt
- // *ast.SwitchStmt
- // *ast.TypeSwitchStmt
- // *ast.CaseClause
- // *ast.CommClause
- // *ast.ForStmt
- // *ast.RangeStmt
- //
- Scopes map[ast.Node]*Scope
-
- // InitOrder is the list of package-level initializers in the order in which
- // they must be executed. Initializers referring to variables related by an
- // initialization dependency appear in topological order, the others appear
- // in source order. Variables without an initialization expression do not
- // appear in this list.
- InitOrder []*Initializer
-}
-
-func getInferred(info *Info) map[ast.Expr]_Inferred {
- return nil
-}
diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go
index f964c656f9..f138af5fbf 100644
--- a/src/go/types/api_test.go
+++ b/src/go/types/api_test.go
@@ -343,16 +343,16 @@ func TestTypesInfo(t *testing.T) {
{broken + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string][-1]int`},
// parameterized functions
- {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[T₁ interface{}](T₁)`},
+ {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[generic_p0.T₁ interface{}](generic_p0.T₁)`},
{genericPkg + `p1; func f[T any](T); var _ = f[int]`, `f[int]`, `func(int)`},
- {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[T₁ interface{}](T₁)`},
+ {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[generic_p2.T₁ interface{}](generic_p2.T₁)`},
{genericPkg + `p3; func f[T any](T); func _() { f(42) }`, `f(42)`, `()`},
// type parameters
{genericPkg + `t0; type t[] int; var _ t`, `t`, `generic_t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
- {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[P₁ interface{}]`},
- {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[P₁ interface{}]`},
- {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[P₁, Q₂ interface{}]`},
+ {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[generic_t1.P₁ interface{}]`},
+ {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[generic_t2.P₁ interface{}]`},
+ {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[generic_t3.P₁, generic_t3.Q₂ interface{}]`},
// TODO (rFindley): compare with types2, which resolves the type broken_t4.t[P₁, Q₂ interface{m()}] here
{broken + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t`},
@@ -361,14 +361,10 @@ func TestTypesInfo(t *testing.T) {
{genericPkg + `g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `generic_g0.t[int]`},
// issue 45096
- {genericPkg + `issue45096; func _[T interface{ type int8, int16, int32 }](x T) { _ = x < 0 }`, `0`, `T₁`},
+ {genericPkg + `issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `generic_issue45096.T₁`},
}
for _, test := range tests {
- ResetId() // avoid renumbering of type parameter ids when adding tests
- if strings.HasPrefix(test.src, genericPkg) && !typeparams.Enabled {
- continue
- }
info := Info{Types: make(map[ast.Expr]TypeAndValue)}
var name string
if strings.HasPrefix(test.src, broken) {
@@ -402,6 +398,129 @@ func TestTypesInfo(t *testing.T) {
}
}
+func TestInferredInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ fun string
+ targs []string
+ sig string
+ }{
+ {genericPkg + `p0; func f[T any](T); func _() { f(42) }`,
+ `f`,
+ []string{`int`},
+ `func(int)`,
+ },
+ {genericPkg + `p1; func f[T any](T) T; func _() { f('@') }`,
+ `f`,
+ []string{`rune`},
+ `func(rune) rune`,
+ },
+ {genericPkg + `p2; func f[T any](...T) T; func _() { f(0i) }`,
+ `f`,
+ []string{`complex128`},
+ `func(...complex128) complex128`,
+ },
+ {genericPkg + `p3; func f[A, B, C any](A, *B, []C); func _() { f(1.2, new(string), []byte{}) }`,
+ `f`,
+ []string{`float64`, `string`, `byte`},
+ `func(float64, *string, []byte)`,
+ },
+ {genericPkg + `p4; func f[A, B any](A, *B, ...[]B); func _() { f(1.2, new(byte)) }`,
+ `f`,
+ []string{`float64`, `byte`},
+ `func(float64, *byte, ...[]byte)`,
+ },
+
+ {genericPkg + `s1; func f[T any, P interface{~*T}](x T); func _(x string) { f(x) }`,
+ `f`,
+ []string{`string`, `*string`},
+ `func(x string)`,
+ },
+ {genericPkg + `s2; func f[T any, P interface{~*T}](x []T); func _(x []int) { f(x) }`,
+ `f`,
+ []string{`int`, `*int`},
+ `func(x []int)`,
+ },
+ {genericPkg + `s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
+ `f`,
+ []string{`int`, `chan<- int`},
+ `func(x []int)`,
+ },
+ {genericPkg + `s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
+ `f`,
+ []string{`int`, `chan<- int`, `chan<- []*chan<- int`},
+ `func(x []int)`,
+ },
+
+ {genericPkg + `t1; func f[T any, P interface{~*T}]() T; func _() { _ = f[string] }`,
+ `f`,
+ []string{`string`, `*string`},
+ `func() string`,
+ },
+ {genericPkg + `t2; type C[T any] interface{~chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
+ `f`,
+ []string{`int`, `chan<- int`},
+ `func() []int`,
+ },
+ {genericPkg + `t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
+ `f`,
+ []string{`int`, `chan<- int`, `chan<- []*chan<- int`},
+ `func() []int`,
+ },
+ }
+
+ for _, test := range tests {
+ info := Info{}
+ info.Inferred = make(map[ast.Expr]Inferred)
+ name, err := mayTypecheck(t, "InferredInfo", test.src, &info)
+ if err != nil {
+ t.Errorf("package %s: %v", name, err)
+ continue
+ }
+
+ // look for inferred type arguments and signature
+ var targs []Type
+ var sig *Signature
+ for call, inf := range info.Inferred {
+ var fun ast.Expr
+ switch x := call.(type) {
+ case *ast.CallExpr:
+ fun = x.Fun
+ case *ast.IndexExpr:
+ fun = x.X
+ default:
+ panic(fmt.Sprintf("unexpected call expression type %T", call))
+ }
+ if ExprString(fun) == test.fun {
+ targs = inf.TArgs
+ sig = inf.Sig
+ break
+ }
+ }
+ if targs == nil {
+ t.Errorf("package %s: no inferred information found for %s", name, test.fun)
+ continue
+ }
+
+ // check that type arguments are correct
+ if len(targs) != len(test.targs) {
+ t.Errorf("package %s: got %d type arguments; want %d", name, len(targs), len(test.targs))
+ continue
+ }
+ for i, targ := range targs {
+ if got := targ.String(); got != test.targs[i] {
+ t.Errorf("package %s, %d. type argument: got %s; want %s", name, i, got, test.targs[i])
+ continue
+ }
+ }
+
+ // check that signature is correct
+ if got := sig.String(); got != test.sig {
+ t.Errorf("package %s: got %s; want %s", name, got, test.sig)
+ }
+ }
+}
+
func TestDefsInfo(t *testing.T) {
var tests = []struct {
src string
@@ -424,9 +543,6 @@ func TestDefsInfo(t *testing.T) {
}
for _, test := range tests {
- if strings.HasPrefix(test.src, genericPkg) && !typeparams.Enabled {
- continue
- }
info := Info{
Defs: make(map[*ast.Ident]Object),
}
@@ -472,9 +588,6 @@ func TestUsesInfo(t *testing.T) {
}
for _, test := range tests {
- if strings.HasPrefix(test.src, genericPkg) && !typeparams.Enabled {
- continue
- }
info := Info{
Uses: make(map[*ast.Ident]Object),
}
@@ -1716,3 +1829,28 @@ func f(x T) T { return foo.F(x) }
}
}
}
+
+func TestInstantiate(t *testing.T) {
+ // eventually we like more tests but this is a start
+ const src = genericPkg + "p; type T[P any] *T[P]"
+ pkg, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // type T should have one type parameter
+ T := pkg.Scope().Lookup("T").Type().(*Named)
+ if n := T.TParams().Len(); n != 1 {
+ t.Fatalf("expected 1 type parameter; found %d", n)
+ }
+
+ // instantiation should succeed (no endless recursion)
+ // even with a nil *Checker
+ var check *Checker
+ res := check.Instantiate(token.NoPos, T, []Type{Typ[Int]}, nil, false)
+
+ // instantiated type should point to itself
+ if res.Underlying().(*Pointer).Elem() != res {
+ t.Fatalf("unexpected result type: %s", res)
+ }
+}
diff --git a/src/go/types/api_typeparams.go b/src/go/types/api_typeparams.go
deleted file mode 100644
index ed744c4dba..0000000000
--- a/src/go/types/api_typeparams.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build typeparams
-// +build typeparams
-
-package types
-
-import (
- "go/ast"
-)
-
-type (
- Inferred = _Inferred
- Sum = _Sum
- TypeParam = _TypeParam
-)
-
-func NewSum(types []Type) Type { return _NewSum(types) }
-
-func (s *Signature) TParams() []*TypeName { return s._TParams() }
-func (s *Signature) SetTParams(tparams []*TypeName) { s._SetTParams(tparams) }
-
-func (t *Interface) HasTypeList() bool { return t._HasTypeList() }
-func (t *Interface) IsComparable() bool { return t._IsComparable() }
-func (t *Interface) IsConstraint() bool { return t._IsConstraint() }
-
-func (t *Named) TParams() []*TypeName { return t._TParams() }
-func (t *Named) TArgs() []Type { return t._TArgs() }
-func (t *Named) SetTArgs(args []Type) { t._SetTArgs(args) }
-
-// Info is documented in api_notypeparams.go.
-type Info struct {
- Types map[ast.Expr]TypeAndValue
-
- // Inferred maps calls of parameterized functions that use type inference to
- // the Inferred type arguments and signature of the function called. The
- // recorded "call" expression may be an *ast.CallExpr (as in f(x)), or an
- // *ast.IndexExpr (s in f[T]).
- Inferred map[ast.Expr]_Inferred
-
- Defs map[*ast.Ident]Object
- Uses map[*ast.Ident]Object
- Implicits map[ast.Node]Object
- Selections map[*ast.SelectorExpr]*Selection
- Scopes map[ast.Node]*Scope
- InitOrder []*Initializer
-}
-
-func getInferred(info *Info) map[ast.Expr]_Inferred {
- return info.Inferred
-}
diff --git a/src/go/types/api_typeparams_test.go b/src/go/types/api_typeparams_test.go
deleted file mode 100644
index 15c9bf09f9..0000000000
--- a/src/go/types/api_typeparams_test.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build typeparams
-// +build typeparams
-
-package types_test
-
-import (
- "fmt"
- "go/ast"
- "testing"
-
- . "go/types"
-)
-
-func TestInferredInfo(t *testing.T) {
- var tests = []struct {
- src string
- fun string
- targs []string
- sig string
- }{
- {genericPkg + `p0; func f[T any](T); func _() { f(42) }`,
- `f`,
- []string{`int`},
- `func(int)`,
- },
- {genericPkg + `p1; func f[T any](T) T; func _() { f('@') }`,
- `f`,
- []string{`rune`},
- `func(rune) rune`,
- },
- {genericPkg + `p2; func f[T any](...T) T; func _() { f(0i) }`,
- `f`,
- []string{`complex128`},
- `func(...complex128) complex128`,
- },
- {genericPkg + `p3; func f[A, B, C any](A, *B, []C); func _() { f(1.2, new(string), []byte{}) }`,
- `f`,
- []string{`float64`, `string`, `byte`},
- `func(float64, *string, []byte)`,
- },
- {genericPkg + `p4; func f[A, B any](A, *B, ...[]B); func _() { f(1.2, new(byte)) }`,
- `f`,
- []string{`float64`, `byte`},
- `func(float64, *byte, ...[]byte)`,
- },
-
- {genericPkg + `s1; func f[T any, P interface{type *T}](x T); func _(x string) { f(x) }`,
- `f`,
- []string{`string`, `*string`},
- `func(x string)`,
- },
- {genericPkg + `s2; func f[T any, P interface{type *T}](x []T); func _(x []int) { f(x) }`,
- `f`,
- []string{`int`, `*int`},
- `func(x []int)`,
- },
- {genericPkg + `s3; type C[T any] interface{type chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
- `f`,
- []string{`int`, `chan<- int`},
- `func(x []int)`,
- },
- {genericPkg + `s4; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
- `f`,
- []string{`int`, `chan<- int`, `chan<- []*chan<- int`},
- `func(x []int)`,
- },
-
- {genericPkg + `t1; func f[T any, P interface{type *T}]() T; func _() { _ = f[string] }`,
- `f`,
- []string{`string`, `*string`},
- `func() string`,
- },
- {genericPkg + `t2; type C[T any] interface{type chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
- `f`,
- []string{`int`, `chan<- int`},
- `func() []int`,
- },
- {genericPkg + `t3; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
- `f`,
- []string{`int`, `chan<- int`, `chan<- []*chan<- int`},
- `func() []int`,
- },
- }
-
- for _, test := range tests {
- info := Info{}
- info.Inferred = make(map[ast.Expr]Inferred)
- name, err := mayTypecheck(t, "InferredInfo", test.src, &info)
- if err != nil {
- t.Errorf("package %s: %v", name, err)
- continue
- }
-
- // look for inferred type arguments and signature
- var targs []Type
- var sig *Signature
- for call, inf := range info.Inferred {
- var fun ast.Expr
- switch x := call.(type) {
- case *ast.CallExpr:
- fun = x.Fun
- case *ast.IndexExpr:
- fun = x.X
- default:
- panic(fmt.Sprintf("unexpected call expression type %T", call))
- }
- if ExprString(fun) == test.fun {
- targs = inf.Targs
- sig = inf.Sig
- break
- }
- }
- if targs == nil {
- t.Errorf("package %s: no inferred information found for %s", name, test.fun)
- continue
- }
-
- // check that type arguments are correct
- if len(targs) != len(test.targs) {
- t.Errorf("package %s: got %d type arguments; want %d", name, len(targs), len(test.targs))
- continue
- }
- for i, targ := range targs {
- if got := targ.String(); got != test.targs[i] {
- t.Errorf("package %s, %d. type argument: got %s; want %s", name, i, got, test.targs[i])
- continue
- }
- }
-
- // check that signature is correct
- if got := sig.String(); got != test.sig {
- t.Errorf("package %s: got %s; want %s", name, got, test.sig)
- }
- }
-}
diff --git a/src/go/types/array.go b/src/go/types/array.go
new file mode 100644
index 0000000000..5b28474bb3
--- /dev/null
+++ b/src/go/types/array.go
@@ -0,0 +1,25 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// An Array represents an array type.
+type Array struct {
+ len int64
+ elem Type
+}
+
+// NewArray returns a new array type for the given element type and length.
+// A negative length indicates an unknown length.
+func NewArray(elem Type, len int64) *Array { return &Array{len: len, elem: elem} }
+
+// Len returns the length of array a.
+// A negative result indicates an unknown length.
+func (a *Array) Len() int64 { return a.len }
+
+// Elem returns element type of array a.
+func (a *Array) Elem() Type { return a.elem }
+
+func (t *Array) Underlying() Type { return t }
+func (t *Array) String() string { return TypeString(t, nil) }
diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go
index 18eae62184..595f426e10 100644
--- a/src/go/types/assignments.go
+++ b/src/go/types/assignments.go
@@ -71,7 +71,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
}
// A generic (non-instantiated) function value cannot be assigned to a variable.
- if sig := asSignature(x.typ); sig != nil && len(sig.tparams) > 0 {
+ if sig := asSignature(x.typ); sig != nil && sig.TParams().Len() > 0 {
check.errorf(x, _Todo, "cannot use generic function %s without instantiation in %s", x, context)
}
diff --git a/src/go/types/basic.go b/src/go/types/basic.go
new file mode 100644
index 0000000000..215923f657
--- /dev/null
+++ b/src/go/types/basic.go
@@ -0,0 +1,82 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// BasicKind describes the kind of basic type.
+type BasicKind int
+
+const (
+ Invalid BasicKind = iota // type is invalid
+
+ // predeclared types
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ String
+ UnsafePointer
+
+ // types for untyped values
+ UntypedBool
+ UntypedInt
+ UntypedRune
+ UntypedFloat
+ UntypedComplex
+ UntypedString
+ UntypedNil
+
+ // aliases
+ Byte = Uint8
+ Rune = Int32
+)
+
+// BasicInfo is a set of flags describing properties of a basic type.
+type BasicInfo int
+
+// Properties of basic types.
+const (
+ IsBoolean BasicInfo = 1 << iota
+ IsInteger
+ IsUnsigned
+ IsFloat
+ IsComplex
+ IsString
+ IsUntyped
+
+ IsOrdered = IsInteger | IsFloat | IsString
+ IsNumeric = IsInteger | IsFloat | IsComplex
+ IsConstType = IsBoolean | IsNumeric | IsString
+)
+
+// A Basic represents a basic type.
+type Basic struct {
+ kind BasicKind
+ info BasicInfo
+ name string
+}
+
+// Kind returns the kind of basic type b.
+func (b *Basic) Kind() BasicKind { return b.kind }
+
+// Info returns information about properties of basic type b.
+func (b *Basic) Info() BasicInfo { return b.info }
+
+// Name returns the name of basic type b.
+func (b *Basic) Name() string { return b.name }
+
+func (t *Basic) Underlying() Type { return t }
+func (t *Basic) String() string { return TypeString(t, nil) }
diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go
index 2a2d54da88..c73d94658a 100644
--- a/src/go/types/builtins.go
+++ b/src/go/types/builtins.go
@@ -179,9 +179,9 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
mode = value
}
- case *_Sum:
- if t.is(func(t Type) bool {
- switch t := under(t).(type) {
+ case *Union:
+ if t.underIs(func(t Type) bool {
+ switch t := t.(type) {
case *Basic:
if isString(t) && id == _Len {
return true
@@ -217,19 +217,23 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
case _Close:
// close(c)
- c := asChan(x.typ)
- if c == nil {
- check.invalidArg(x, _InvalidClose, "%s is not a channel", x)
- return
- }
- if c.dir == RecvOnly {
- check.invalidArg(x, _InvalidClose, "%s must not be a receive-only channel", x)
+ if !underIs(x.typ, func(u Type) bool {
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.invalidOp(x, _InvalidClose, "cannot close non-channel %s", x)
+ return false
+ }
+ if uch.dir == RecvOnly {
+ check.invalidOp(x, _InvalidClose, "cannot close receive-only channel %s", x)
+ return false
+ }
+ return true
+ }) {
return
}
-
x.mode = novalue
if check.Types != nil {
- check.recordBuiltinType(call.Fun, makeSig(nil, c))
+ check.recordBuiltinType(call.Fun, makeSig(nil, x.typ))
}
case _Complex:
@@ -286,7 +290,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
}
// both argument types must be identical
- if !check.identical(x.typ, y.typ) {
+ if !Identical(x.typ, y.typ) {
check.invalidArg(x, _InvalidComplex, "mismatched types %s and %s", x.typ, y.typ)
return
}
@@ -337,13 +341,15 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
return
}
var src Type
- switch t := optype(y.typ).(type) {
+ switch t := under(y.typ).(type) {
case *Basic:
if isString(y.typ) {
src = universeByte
}
case *Slice:
src = t.elem
+ case *TypeParam:
+ check.error(x, _Todo, "copy on generic operands not yet implemented")
}
if dst == nil || src == nil {
@@ -351,7 +357,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
return
}
- if !check.identical(dst, src) {
+ if !Identical(dst, src) {
check.invalidArg(x, _InvalidCopy, "arguments to copy %s and %s have different element types %s and %s", x, &y, dst, src)
return
}
@@ -363,25 +369,40 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
x.typ = Typ[Int]
case _Delete:
- // delete(m, k)
- m := asMap(x.typ)
- if m == nil {
- check.invalidArg(x, _InvalidDelete, "%s is not a map", x)
+ // delete(map_, key)
+ // map_ must be a map type or a type parameter describing map types.
+ // The key cannot be a type parameter for now.
+ map_ := x.typ
+ var key Type
+ if !underIs(map_, func(u Type) bool {
+ map_, _ := u.(*Map)
+ if map_ == nil {
+ check.invalidArg(x, _InvalidDelete, "%s is not a map", x)
+ return false
+ }
+ if key != nil && !Identical(map_.key, key) {
+ check.invalidArg(x, _Todo, "maps of %s must have identical key types", x)
+ return false
+ }
+ key = map_.key
+ return true
+ }) {
return
}
+
arg(x, 1) // k
if x.mode == invalid {
return
}
- check.assignment(x, m.key, "argument to delete")
+ check.assignment(x, key, "argument to delete")
if x.mode == invalid {
return
}
x.mode = novalue
if check.Types != nil {
- check.recordBuiltinType(call.Fun, makeSig(nil, m, m.key))
+ check.recordBuiltinType(call.Fun, makeSig(nil, map_, key))
}
case _Imag, _Real:
@@ -464,13 +485,13 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
var valid func(t Type) bool
valid = func(t Type) bool {
var m int
- switch t := optype(t).(type) {
+ switch t := under(t).(type) {
case *Slice:
m = 2
case *Map, *Chan:
m = 1
- case *_Sum:
- return t.is(valid)
+ case *TypeParam:
+ return t.underIs(valid)
default:
return false
}
@@ -612,19 +633,22 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
case _Alignof:
// unsafe.Alignof(x T) uintptr
- if asTypeParam(x.typ) != nil {
- check.invalidOp(call, _Todo, "unsafe.Alignof undefined for %s", x)
- return
- }
check.assignment(x, nil, "argument to unsafe.Alignof")
if x.mode == invalid {
return
}
- x.mode = constant_
- x.val = constant.MakeInt64(check.conf.alignof(x.typ))
+ if hasVarSize(x.typ) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.alignof(x.typ))
+ // result is constant - no need to record signature
+ }
x.typ = Typ[Uintptr]
- // result is constant - no need to record signature
case _Offsetof:
// unsafe.Offsetof(x T) uintptr, where x must be a selector
@@ -644,7 +668,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
base := derefStructPtr(x.typ)
sel := selx.Sel.Name
- obj, index, indirect := check.lookupFieldOrMethod(base, false, check.pkg, sel)
+ obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel)
switch obj.(type) {
case nil:
check.invalidArg(x, _MissingFieldOrMethod, "%s has no single field %s", base, sel)
@@ -662,30 +686,43 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
return
}
- // TODO(gri) Should we pass x.typ instead of base (and indirect report if derefStructPtr indirected)?
+ // TODO(gri) Should we pass x.typ instead of base (and have indirect report if derefStructPtr indirected)?
check.recordSelection(selx, FieldVal, base, obj, index, false)
- offs := check.conf.offsetof(base, index)
- x.mode = constant_
- x.val = constant.MakeInt64(offs)
+ // The field offset is considered a variable even if the field is declared before
+ // the part of the struct which is variable-sized. This makes both the rules
+ // simpler and also permits (or at least doesn't prevent) a compiler from re-
+ // arranging struct fields if it wanted to.
+ if hasVarSize(base) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], obj.Type()))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.offsetof(base, index))
+ // result is constant - no need to record signature
+ }
x.typ = Typ[Uintptr]
- // result is constant - no need to record signature
case _Sizeof:
// unsafe.Sizeof(x T) uintptr
- if asTypeParam(x.typ) != nil {
- check.invalidOp(call, _Todo, "unsafe.Sizeof undefined for %s", x)
- return
- }
check.assignment(x, nil, "argument to unsafe.Sizeof")
if x.mode == invalid {
return
}
- x.mode = constant_
- x.val = constant.MakeInt64(check.conf.sizeof(x.typ))
+ if hasVarSize(x.typ) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.sizeof(x.typ))
+ // result is constant - no need to record signature
+ }
x.typ = Typ[Uintptr]
- // result is constant - no need to record signature
case _Slice:
// unsafe.Slice(ptr *T, len IntegerType) []T
@@ -757,6 +794,25 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
return true
}
+// hasVarSize reports if the size of type t is variable due to type parameters.
+func hasVarSize(t Type) bool {
+ switch t := under(t).(type) {
+ case *Array:
+ return hasVarSize(t.elem)
+ case *Struct:
+ for _, f := range t.fields {
+ if hasVarSize(f.typ) {
+ return true
+ }
+ }
+ case *TypeParam:
+ return true
+ case *Named, *Union, *top:
+ unreachable()
+ }
+ return false
+}
+
// applyTypeFunc applies f to x. If x is a type parameter,
// the result is a type parameter constrained by an new
// interface bound. The type bounds for that interface
@@ -769,9 +825,11 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
// Test if t satisfies the requirements for the argument
// type and collect possible result types at the same time.
var rtypes []Type
- if !tp.Bound().is(func(x Type) bool {
- if r := f(x); r != nil {
+ var tildes []bool
+ if !tp.iface().is(func(typ Type, tilde bool) bool {
+ if r := f(typ); r != nil {
rtypes = append(rtypes, r)
+ tildes = append(tildes, tilde)
return true
}
return false
@@ -779,11 +837,14 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
return nil
}
- // construct a suitable new type parameter
- tpar := NewTypeName(token.NoPos, nil /* = Universe pkg */, "<type parameter>", nil)
- ptyp := check.newTypeParam(tpar, 0, &emptyInterface) // assigns type to tpar as a side-effect
- tsum := _NewSum(rtypes)
- ptyp.bound = &Interface{types: tsum, allMethods: markComplete, allTypes: tsum}
+ // Construct a suitable new type parameter for the sum type. The
+ // type param is placed in the current package so export/import
+ // works as expected.
+ tpar := NewTypeName(token.NoPos, check.pkg, "<type parameter>", nil)
+ ptyp := check.NewTypeParam(tpar, &emptyInterface) // assigns type to tpar as a side-effect
+ ptyp.index = tp.index
+ tsum := newUnion(rtypes, tildes)
+ ptyp.bound = &Interface{complete: true, tset: &_TypeSet{types: tsum}}
return ptyp
}
diff --git a/src/go/types/builtins_test.go b/src/go/types/builtins_test.go
index 11de9a1ac1..cee3d315e5 100644
--- a/src/go/types/builtins_test.go
+++ b/src/go/types/builtins_test.go
@@ -113,12 +113,15 @@ var builtinCalls = []struct {
{"Alignof", `_ = unsafe.Alignof(0)`, `invalid type`}, // constant
{"Alignof", `var x struct{}; _ = unsafe.Alignof(x)`, `invalid type`}, // constant
+ {"Alignof", `var x P; _ = unsafe.Alignof(x)`, `func(p.P₁) uintptr`},
{"Offsetof", `var x struct{f bool}; _ = unsafe.Offsetof(x.f)`, `invalid type`}, // constant
{"Offsetof", `var x struct{_ int; f bool}; _ = unsafe.Offsetof((&x).f)`, `invalid type`}, // constant
+ {"Offsetof", `var x struct{_ int; f P}; _ = unsafe.Offsetof((&x).f)`, `func(p.P₁) uintptr`},
{"Sizeof", `_ = unsafe.Sizeof(0)`, `invalid type`}, // constant
{"Sizeof", `var x struct{}; _ = unsafe.Sizeof(x)`, `invalid type`}, // constant
+ {"Sizeof", `var x P; _ = unsafe.Sizeof(x)`, `func(p.P₁) uintptr`},
{"Slice", `var p *int; _ = unsafe.Slice(p, 1)`, `func(*int, int) []int`},
{"Slice", `var p *byte; var n uintptr; _ = unsafe.Slice(p, n)`, `func(*byte, uintptr) []byte`},
@@ -151,8 +154,10 @@ func TestBuiltinSignatures(t *testing.T) {
}
}
+// parseGenericSrc in types2 is not necessary. We can just parse in testBuiltinSignature below.
+
func testBuiltinSignature(t *testing.T, name, src0, want string) {
- src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _() { %s }`, src0)
+ src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _[P any]() { %s }`, src0)
f, err := parser.ParseFile(fset, "", src, 0)
if err != nil {
t.Errorf("%s: %s", src0, err)
diff --git a/src/go/types/call.go b/src/go/types/call.go
index 631ea426c6..da2f319a4a 100644
--- a/src/go/types/call.go
+++ b/src/go/types/call.go
@@ -16,23 +16,22 @@ import (
// funcInst type-checks a function instantiation inst and returns the result in x.
// The operand x must be the evaluation of inst.X and its type must be a signature.
-func (check *Checker) funcInst(x *operand, inst *ast.IndexExpr) {
- xlist := typeparams.UnpackExpr(inst.Index)
- targs := check.typeList(xlist)
+func (check *Checker) funcInst(x *operand, ix *typeparams.IndexExpr) {
+ targs := check.typeList(ix.Indices)
if targs == nil {
x.mode = invalid
- x.expr = inst
+ x.expr = ix.Orig
return
}
- assert(len(targs) == len(xlist))
+ assert(len(targs) == len(ix.Indices))
// check number of type arguments (got) vs number of type parameters (want)
sig := x.typ.(*Signature)
- got, want := len(targs), len(sig.tparams)
+ got, want := len(targs), sig.TParams().Len()
if got > want {
- check.errorf(xlist[got-1], _Todo, "got %d type arguments but want %d", got, want)
+ check.errorf(ix.Indices[got-1], _Todo, "got %d type arguments but want %d", got, want)
x.mode = invalid
- x.expr = inst
+ x.expr = ix.Orig
return
}
@@ -40,11 +39,11 @@ func (check *Checker) funcInst(x *operand, inst *ast.IndexExpr) {
inferred := false
if got < want {
- targs = check.infer(inst, sig.tparams, targs, nil, nil, true)
+ targs = check.infer(ix.Orig, sig.TParams().list(), targs, nil, nil, true)
if targs == nil {
// error was already reported
x.mode = invalid
- x.expr = inst
+ x.expr = ix.Orig
return
}
got = len(targs)
@@ -55,34 +54,36 @@ func (check *Checker) funcInst(x *operand, inst *ast.IndexExpr) {
// determine argument positions (for error reporting)
// TODO(rFindley) use a positioner here? instantiate would need to be
// updated accordingly.
- poslist := make([]token.Pos, len(xlist))
- for i, x := range xlist {
+ poslist := make([]token.Pos, len(ix.Indices))
+ for i, x := range ix.Indices {
poslist[i] = x.Pos()
}
// instantiate function signature
- res := check.instantiate(x.Pos(), sig, targs, poslist).(*Signature)
- assert(res.tparams == nil) // signature is not generic anymore
+ res := check.Instantiate(x.Pos(), sig, targs, poslist, true).(*Signature)
+ assert(res.TParams().Len() == 0) // signature is not generic anymore
if inferred {
- check.recordInferred(inst, targs, res)
+ check.recordInferred(ix.Orig, targs, res)
}
x.typ = res
x.mode = value
- x.expr = inst
+ x.expr = ix.Orig
}
func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind {
- var inst *ast.IndexExpr
- if iexpr, _ := call.Fun.(*ast.IndexExpr); iexpr != nil {
- if check.indexExpr(x, iexpr) {
+ ix := typeparams.UnpackIndexExpr(call.Fun)
+ if ix != nil {
+ if check.indexExpr(x, ix) {
// Delay function instantiation to argument checking,
// where we combine type and value arguments for type
// inference.
assert(x.mode == value)
- inst = iexpr
+ } else {
+ ix = nil
}
- x.expr = iexpr
+ x.expr = call.Fun
check.record(x)
+
} else {
check.exprOrType(x, call.Fun)
}
@@ -108,8 +109,7 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind {
break
}
if t := asInterface(T); t != nil {
- check.completeInterface(token.NoPos, t)
- if t._IsConstraint() {
+ if t.IsConstraint() {
check.errorf(call, _Todo, "cannot use interface %s in conversion (contains type list or is comparable)", T)
break
}
@@ -149,21 +149,20 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind {
// evaluate type arguments, if any
var targs []Type
- if inst != nil {
- xlist := typeparams.UnpackExpr(inst.Index)
- targs = check.typeList(xlist)
+ if ix != nil {
+ targs = check.typeList(ix.Indices)
if targs == nil {
check.use(call.Args...)
x.mode = invalid
x.expr = call
return statement
}
- assert(len(targs) == len(xlist))
+ assert(len(targs) == len(ix.Indices))
// check number of type arguments (got) vs number of type parameters (want)
- got, want := len(targs), len(sig.tparams)
+ got, want := len(targs), sig.TParams().Len()
if got > want {
- check.errorf(xlist[want], _Todo, "got %d type arguments but want %d", got, want)
+ check.errorf(ix.Indices[want], _Todo, "got %d type arguments but want %d", got, want)
check.use(call.Args...)
x.mode = invalid
x.expr = call
@@ -195,7 +194,7 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind {
// if type inference failed, a parametrized result must be invalidated
// (operands cannot have a parametrized type)
- if x.mode == value && len(sig.tparams) > 0 && isParameterized(sig.tparams, x.typ) {
+ if x.mode == value && sig.TParams().Len() > 0 && isParameterized(sig.TParams().list(), x.typ) {
x.mode = invalid
}
@@ -325,24 +324,24 @@ func (check *Checker) arguments(call *ast.CallExpr, sig *Signature, targs []Type
}
// infer type arguments and instantiate signature if necessary
- if len(sig.tparams) > 0 {
+ if sig.TParams().Len() > 0 {
// TODO(gri) provide position information for targs so we can feed
// it to the instantiate call for better error reporting
- targs := check.infer(call, sig.tparams, targs, sigParams, args, true)
+ targs := check.infer(call, sig.TParams().list(), targs, sigParams, args, true)
if targs == nil {
return // error already reported
}
// compute result signature
- rsig = check.instantiate(call.Pos(), sig, targs, nil).(*Signature)
- assert(rsig.tparams == nil) // signature is not generic anymore
+ rsig = check.Instantiate(call.Pos(), sig, targs, nil, true).(*Signature)
+ assert(rsig.TParams().Len() == 0) // signature is not generic anymore
check.recordInferred(call, targs, rsig)
// Optimization: Only if the parameter list was adjusted do we
// need to compute it from the adjusted list; otherwise we can
// simply use the result signature's parameter list.
if adjusted {
- sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.tparams, targs)).(*Tuple)
+ sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TParams().list(), targs)).(*Tuple)
} else {
sigParams = rsig.params
}
@@ -471,7 +470,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
check.instantiatedOperand(x)
- obj, index, indirect = check.lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
+ obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
if obj == nil {
switch {
case index != nil:
@@ -483,11 +482,10 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
var why string
if tpar := asTypeParam(x.typ); tpar != nil {
// Type parameter bounds don't specify fields, so don't mention "field".
- switch obj := tpar.Bound().obj.(type) {
- case nil:
+ if tname := tpar.iface().obj; tname != nil {
+ why = check.sprintf("interface %s has no method %s", tname.name, sel)
+ } else {
why = check.sprintf("type bound for %s has no method %s", x.typ, sel)
- case *TypeName:
- why = check.sprintf("interface %s has no method %s", obj.name, sel)
}
} else {
why = check.sprintf("type %s has no field or method %s", x.typ, sel)
@@ -501,7 +499,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
} else {
changeCase = string(unicode.ToUpper(r)) + sel[1:]
}
- if obj, _, _ = check.lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
+ if obj, _, _ = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
why += ", but does have " + changeCase
}
}
@@ -519,7 +517,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
// the signature accordingly.
// TODO(gri) factor this code out
sig := m.typ.(*Signature)
- if len(sig.rparams) > 0 {
+ if sig.RParams().Len() > 0 {
// For inference to work, we must use the receiver type
// matching the receiver in the actual method declaration.
// If the method is embedded, the matching receiver is the
@@ -547,7 +545,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
// the receiver type arguments here, the receiver must be be otherwise invalid
// and an error has been reported elsewhere.
arg := operand{mode: variable, expr: x.expr, typ: recv}
- targs := check.infer(m, sig.rparams, nil, NewTuple(sig.recv), []*operand{&arg}, false /* no error reporting */)
+ targs := check.infer(m, sig.RParams().list(), nil, NewTuple(sig.recv), []*operand{&arg}, false /* no error reporting */)
if targs == nil {
// We may reach here if there were other errors (see issue #40056).
goto Error
@@ -556,7 +554,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
// (If we modify m, some tests will fail; possibly because the m is in use.)
// TODO(gri) investigate and provide a correct explanation here
copy := *m
- copy.typ = check.subst(e.Pos(), m.typ, makeSubstMap(sig.rparams, targs))
+ copy.typ = check.subst(e.Pos(), m.typ, makeSubstMap(sig.RParams().list(), targs))
obj = &copy
}
// TODO(gri) we also need to do substitution for parameterized interface methods
@@ -575,17 +573,37 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
+ sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, _InvalidDeclCycle, "illegal cycle in method declaration")
+ goto Error
+ }
+
// the receiver type becomes the type of the first function
// argument of the method expression's function type
var params []*Var
- sig := m.typ.(*Signature)
if sig.params != nil {
params = sig.params.vars
}
+ // Be consistent about named/unnamed parameters. This is not needed
+ // for type-checking, but the newly constructed signature may appear
+ // in an error message and then have mixed named/unnamed parameters.
+ // (An alternative would be to not print parameter names in errors,
+ // but it's useful to see them; this is cheap and method expressions
+ // are rare.)
+ name := ""
+ if len(params) > 0 && params[0].name != "" {
+ // name needed
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
x.mode = value
x.typ = &Signature{
tparams: sig.tparams,
- params: NewTuple(append([]*Var{NewVar(token.NoPos, check.pkg, "_", x.typ)}, params...)...),
+ params: NewTuple(params...),
results: sig.results,
variadic: sig.variadic,
}
diff --git a/src/go/types/chan.go b/src/go/types/chan.go
new file mode 100644
index 0000000000..1f7b72be30
--- /dev/null
+++ b/src/go/types/chan.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// A Chan represents a channel type.
+type Chan struct {
+ dir ChanDir
+ elem Type
+}
+
+// A ChanDir value indicates a channel direction.
+type ChanDir int
+
+// The direction of a channel is indicated by one of these constants.
+const (
+ SendRecv ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// NewChan returns a new channel type for the given direction and element type.
+func NewChan(dir ChanDir, elem Type) *Chan {
+ return &Chan{dir: dir, elem: elem}
+}
+
+// Dir returns the direction of channel c.
+func (c *Chan) Dir() ChanDir { return c.dir }
+
+// Elem returns the element type of channel c.
+func (c *Chan) Elem() Type { return c.elem }
+
+func (t *Chan) Underlying() Type { return t }
+func (t *Chan) String() string { return TypeString(t, nil) }
diff --git a/src/go/types/check.go b/src/go/types/check.go
index a923c3c612..b2d076dc68 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -73,7 +73,7 @@ type importKey struct {
// A dotImportKey describes a dot-imported object in the given scope.
type dotImportKey struct {
scope *Scope
- obj Object
+ name string
}
// A Checker maintains the state of the type checker.
@@ -85,11 +85,11 @@ type Checker struct {
fset *token.FileSet
pkg *Package
*Info
- version version // accepted language version
- objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
- impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
- posMap map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions
- typMap map[string]*Named // maps an instantiated named type hash to a *Named type
+ version version // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
+ objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
+ impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
+ typMap map[string]*Named // maps an instantiated named type hash to a *Named type
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
@@ -179,9 +179,9 @@ func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Ch
info = new(Info)
}
- version, err := parseGoVersion(conf.goVersion)
+ version, err := parseGoVersion(conf.GoVersion)
if err != nil {
- panic(fmt.Sprintf("invalid Go version %q (%v)", conf.goVersion, err))
+ panic(fmt.Sprintf("invalid Go version %q (%v)", conf.GoVersion, err))
}
return &Checker{
@@ -192,7 +192,6 @@ func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Ch
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
- posMap: make(map[*Interface][]token.Pos),
typMap: make(map[string]*Named),
}
}
@@ -274,10 +273,6 @@ func (check *Checker) checkFiles(files []*ast.File) (err error) {
check.recordUntyped()
- if check.Info != nil {
- sanitizeInfo(check.Info)
- }
-
check.pkg.complete = true
// no longer needed - release memory
@@ -411,8 +406,8 @@ func (check *Checker) recordCommaOkTypes(x ast.Expr, a [2]Type) {
func (check *Checker) recordInferred(call ast.Expr, targs []Type, sig *Signature) {
assert(call != nil)
assert(sig != nil)
- if m := getInferred(check.Info); m != nil {
- m[call] = _Inferred{targs, sig}
+ if m := check.Info.Inferred; m != nil {
+ m[call] = Inferred{targs, sig}
}
}
diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go
index f83abf11ce..8c8452c9c6 100644
--- a/src/go/types/check_test.go
+++ b/src/go/types/check_test.go
@@ -202,15 +202,13 @@ func asGoVersion(s string) string {
return ""
}
-func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string, srcs [][]byte, manual bool, imp Importer) {
+func testFiles(t *testing.T, sizes Sizes, filenames []string, srcs [][]byte, manual bool, imp Importer) {
if len(filenames) == 0 {
t.Fatal("no source files")
}
- if strings.HasSuffix(filenames[0], ".go2") && !typeparams.Enabled {
- t.Skip("type params are not enabled")
- }
- if strings.HasSuffix(filenames[0], ".go1") && typeparams.Enabled {
+ if strings.HasSuffix(filenames[0], ".go1") {
+ // TODO(rfindley): re-enable this test by using GoVersion.
t.Skip("type params are enabled")
}
@@ -228,6 +226,7 @@ func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string,
}
// if no Go version is given, consider the package name
+ goVersion := *goVersion
if goVersion == "" {
goVersion = asGoVersion(pkgName)
}
@@ -243,7 +242,7 @@ func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string,
// typecheck and collect typechecker errors
var conf Config
conf.Sizes = sizes
- SetGoVersion(&conf, goVersion)
+ conf.GoVersion = goVersion
// special case for importC.src
if len(filenames) == 1 {
@@ -303,29 +302,48 @@ func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string,
}
}
-// TestManual is for manual testing of input files, provided as a list
-// of arguments after the test arguments (and a separating "--"). For
-// instance, to check the files foo.go and bar.go, use:
+// TestManual is for manual testing of a package - either provided
+// as a list of filenames belonging to the package, or a directory
+// name containing the package files - after the test arguments
+// (and a separating "--"). For instance, to test the package made
+// of the files foo.go and bar.go, use:
//
// go test -run Manual -- foo.go bar.go
//
-// Provide the -verify flag to verify errors against ERROR comments in
-// the input files rather than having a list of errors reported.
-// The accepted Go language version can be controlled with the -lang flag.
+// If no source arguments are provided, the file testdata/manual.go2
+// is used instead.
+// Provide the -verify flag to verify errors against ERROR comments
+// in the input files rather than having a list of errors reported.
+// The accepted Go language version can be controlled with the -lang
+// flag.
func TestManual(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
filenames := flag.Args()
if len(filenames) == 0 {
- return
+ filenames = []string{filepath.FromSlash("testdata/manual.go2")}
}
- testenv.MustHaveGoBuild(t)
+
+ info, err := os.Stat(filenames[0])
+ if err != nil {
+ t.Fatalf("TestManual: %v", err)
+ }
+
DefPredeclaredTestFuncs()
- testPkg(t, filenames, *goVersion, true)
+ if info.IsDir() {
+ if len(filenames) > 1 {
+ t.Fatal("TestManual: must have only one directory argument")
+ }
+ testDir(t, filenames[0], true)
+ } else {
+ testPkg(t, filenames, true)
+ }
}
func TestLongConstants(t *testing.T) {
format := "package longconst\n\nconst _ = %s\nconst _ = %s // ERROR excessively long constant"
src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001))
- checkFiles(t, nil, "", []string{"longconst.go"}, [][]byte{[]byte(src)}, false, nil)
+ testFiles(t, nil, []string{"longconst.go"}, [][]byte{[]byte(src)}, false, nil)
}
// TestIndexRepresentability tests that constant index operands must
@@ -333,32 +351,24 @@ func TestLongConstants(t *testing.T) {
// represent larger values.
func TestIndexRepresentability(t *testing.T) {
const src = "package index\n\nvar s []byte\nvar _ = s[int64 /* ERROR \"int64\\(1\\) << 40 \\(.*\\) overflows int\" */ (1) << 40]"
- checkFiles(t, &StdSizes{4, 4}, "", []string{"index.go"}, [][]byte{[]byte(src)}, false, nil)
-}
-
-func TestIssue46453(t *testing.T) {
- if typeparams.Enabled {
- t.Skip("type params are enabled")
- }
- const src = "package p\ntype _ comparable // ERROR \"undeclared name: comparable\""
- checkFiles(t, nil, "", []string{"issue46453.go"}, [][]byte{[]byte(src)}, false, nil)
+ testFiles(t, &StdSizes{4, 4}, []string{"index.go"}, [][]byte{[]byte(src)}, false, nil)
}
func TestIssue47243_TypedRHS(t *testing.T) {
// The RHS of the shift expression below overflows uint on 32bit platforms,
// but this is OK as it is explicitly typed.
const src = "package issue47243\n\nvar a uint64; var _ = a << uint64(4294967296)" // uint64(1<<32)
- checkFiles(t, &StdSizes{4, 4}, "", []string{"p.go"}, [][]byte{[]byte(src)}, false, nil)
+ testFiles(t, &StdSizes{4, 4}, []string{"p.go"}, [][]byte{[]byte(src)}, false, nil)
}
-func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "check") }
-func TestExamples(t *testing.T) { testDir(t, "examples") }
-func TestFixedbugs(t *testing.T) { testDir(t, "fixedbugs") }
+func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/check", false) }
+func TestExamples(t *testing.T) { testDirFiles(t, "testdata/examples", false) }
+func TestFixedbugs(t *testing.T) { testDirFiles(t, "testdata/fixedbugs", false) }
-func testDir(t *testing.T, dir string) {
+func testDirFiles(t *testing.T, dir string, manual bool) {
testenv.MustHaveGoBuild(t)
+ dir = filepath.FromSlash(dir)
- dir = filepath.Join("testdata", dir)
fis, err := os.ReadDir(dir)
if err != nil {
t.Error(err)
@@ -368,28 +378,38 @@ func testDir(t *testing.T, dir string) {
for _, fi := range fis {
path := filepath.Join(dir, fi.Name())
- // if fi is a directory, its files make up a single package
- var filenames []string
+ // If fi is a directory, its files make up a single package.
if fi.IsDir() {
- fis, err := os.ReadDir(path)
- if err != nil {
- t.Error(err)
- continue
- }
- for _, fi := range fis {
- filenames = append(filenames, filepath.Join(path, fi.Name()))
- }
+ testDir(t, path, manual)
} else {
- filenames = []string{path}
+ t.Run(filepath.Base(path), func(t *testing.T) {
+ testPkg(t, []string{path}, manual)
+ })
}
- t.Run(filepath.Base(path), func(t *testing.T) {
- testPkg(t, filenames, "", false)
- })
}
}
+func testDir(t *testing.T, dir string, manual bool) {
+ testenv.MustHaveGoBuild(t)
+
+ fis, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var filenames []string
+ for _, fi := range fis {
+ filenames = append(filenames, filepath.Join(dir, fi.Name()))
+ }
+
+ t.Run(filepath.Base(dir), func(t *testing.T) {
+ testPkg(t, filenames, manual)
+ })
+}
+
// TODO(rFindley) reconcile the different test setup in go/types with types2.
-func testPkg(t *testing.T, filenames []string, goVersion string, manual bool) {
+func testPkg(t *testing.T, filenames []string, manual bool) {
srcs := make([][]byte, len(filenames))
for i, filename := range filenames {
src, err := os.ReadFile(filename)
@@ -398,5 +418,5 @@ func testPkg(t *testing.T, filenames []string, goVersion string, manual bool) {
}
srcs[i] = src
}
- checkFiles(t, nil, goVersion, filenames, srcs, manual, nil)
+ testFiles(t, nil, filenames, srcs, manual, nil)
}
diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go
index ad6d3eef10..a1fcdd4fd8 100644
--- a/src/go/types/conversions.go
+++ b/src/go/types/conversions.go
@@ -94,7 +94,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, reason *string) bool {
V := x.typ
Vu := under(V)
Tu := under(T)
- if check.identicalIgnoreTags(Vu, Tu) {
+ if IdenticalIgnoreTags(Vu, Tu) {
return true
}
@@ -102,7 +102,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, reason *string) bool {
// have identical underlying types if tags are ignored"
if V, ok := V.(*Pointer); ok {
if T, ok := T.(*Pointer); ok {
- if check.identicalIgnoreTags(under(V.base), under(T.base)) {
+ if IdenticalIgnoreTags(under(V.base), under(T.base)) {
return true
}
}
@@ -143,7 +143,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, reason *string) bool {
if s := asSlice(V); s != nil {
if p := asPointer(T); p != nil {
if a := asArray(p.Elem()); a != nil {
- if check.identical(s.Elem(), a.Elem()) {
+ if Identical(s.Elem(), a.Elem()) {
if check == nil || check.allowVersion(check.pkg, 1, 17) {
return true
}
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index 9211febc6d..831b1da589 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -317,6 +317,7 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
}
case *Named:
+ t.expand()
// don't touch the type if it is from a different package or the Universe scope
// (doing so would lead to a race condition - was issue #35049)
if t.obj.pkg != check.pkg {
@@ -333,7 +334,7 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
switch t.info {
case unknown:
t.info = marked
- t.info = check.validType(t.orig, append(path, t.obj)) // only types of current package added to path
+ t.info = check.validType(t.fromRHS, append(path, t.obj)) // only types of current package added to path
case marked:
// cycle detected
for i, tn := range path {
@@ -349,9 +350,6 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
panic("internal error: cycle start not found")
}
return t.info
-
- case *instance:
- return check.validType(t.expand(), path)
}
return valid
@@ -569,102 +567,6 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init ast.Expr) {
check.initVars(lhs, []ast.Expr{init}, token.NoPos)
}
-// under returns the expanded underlying type of n0; possibly by following
-// forward chains of named types. If an underlying type is found, resolve
-// the chain by setting the underlying type for each defined type in the
-// chain before returning it. If no underlying type is found or a cycle
-// is detected, the result is Typ[Invalid]. If a cycle is detected and
-// n0.check != nil, the cycle is reported.
-func (n0 *Named) under() Type {
- u := n0.underlying
-
- if u == Typ[Invalid] {
- return u
- }
-
- // If the underlying type of a defined type is not a defined
- // (incl. instance) type, then that is the desired underlying
- // type.
- switch u.(type) {
- case nil:
- return Typ[Invalid]
- default:
- // common case
- return u
- case *Named, *instance:
- // handled below
- }
-
- if n0.check == nil {
- panic("internal error: Named.check == nil but type is incomplete")
- }
-
- // Invariant: after this point n0 as well as any named types in its
- // underlying chain should be set up when this function exits.
- check := n0.check
-
- // If we can't expand u at this point, it is invalid.
- n := asNamed(u)
- if n == nil {
- n0.underlying = Typ[Invalid]
- return n0.underlying
- }
-
- // Otherwise, follow the forward chain.
- seen := map[*Named]int{n0: 0}
- path := []Object{n0.obj}
- for {
- u = n.underlying
- if u == nil {
- u = Typ[Invalid]
- break
- }
- var n1 *Named
- switch u1 := u.(type) {
- case *Named:
- n1 = u1
- case *instance:
- n1, _ = u1.expand().(*Named)
- if n1 == nil {
- u = Typ[Invalid]
- }
- }
- if n1 == nil {
- break // end of chain
- }
-
- seen[n] = len(seen)
- path = append(path, n.obj)
- n = n1
-
- if i, ok := seen[n]; ok {
- // cycle
- check.cycleError(path[i:])
- u = Typ[Invalid]
- break
- }
- }
-
- for n := range seen {
- // We should never have to update the underlying type of an imported type;
- // those underlying types should have been resolved during the import.
- // Also, doing so would lead to a race condition (was issue #31749).
- // Do this check always, not just in debug mode (it's cheap).
- if n.obj.pkg != check.pkg {
- panic("internal error: imported type with unresolved underlying type")
- }
- n.underlying = u
- }
-
- return u
-}
-
-func (n *Named) setUnderlying(typ Type) {
- if n != nil {
- n.underlying = typ
- }
-}
-
func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *Named) {
assert(obj.typ == nil)
@@ -680,58 +582,57 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *Named) {
alias = false
}
+ // alias declaration
if alias {
- // type alias declaration
if !check.allowVersion(check.pkg, 1, 9) {
check.errorf(atPos(tdecl.Assign), _BadDecl, "type aliases requires go1.9 or later")
}
obj.typ = Typ[Invalid]
obj.typ = check.anyType(tdecl.Type)
+ return
+ }
- } else {
- // defined type declaration
-
- named := check.newNamed(obj, nil, nil)
- def.setUnderlying(named)
- obj.typ = named // make sure recursive type declarations terminate
-
- if tparams := typeparams.Get(tdecl); tparams != nil {
- check.openScope(tdecl, "type parameters")
- defer check.closeScope()
- named.tparams = check.collectTypeParams(tparams)
- }
-
- // determine underlying type of named
- named.orig = check.definedType(tdecl.Type, named)
+ // type definition or generic type declaration
+ named := check.newNamed(obj, nil, nil, nil, nil)
+ def.setUnderlying(named)
- // The underlying type of named may be itself a named type that is
- // incomplete:
- //
- // type (
- // A B
- // B *C
- // C A
- // )
- //
- // The type of C is the (named) type of A which is incomplete,
- // and which has as its underlying type the named type B.
- // Determine the (final, unnamed) underlying type by resolving
- // any forward chain.
- // TODO(gri) Investigate if we can just use named.origin here
- // and rely on lazy computation of the underlying type.
- named.underlying = under(named)
+ if tparams := typeparams.Get(tdecl); tparams != nil {
+ check.openScope(tdecl, "type parameters")
+ defer check.closeScope()
+ named.tparams = check.collectTypeParams(tparams)
}
-}
+ // determine underlying type of named
+ named.fromRHS = check.definedType(tdecl.Type, named)
+ assert(named.fromRHS != nil)
-func (check *Checker) collectTypeParams(list *ast.FieldList) (tparams []*TypeName) {
- // Type parameter lists should not be empty. The parser will
- // complain but we still may get an incorrect AST: ignore it.
- if list.NumFields() == 0 {
- return
+ // The underlying type of named may be itself a named type that is
+ // incomplete:
+ //
+ // type (
+ // A B
+ // B *C
+ // C A
+ // )
+ //
+ // The type of C is the (named) type of A which is incomplete,
+ // and which has as its underlying type the named type B.
+ // Determine the (final, unnamed) underlying type by resolving
+ // any forward chain.
+ // TODO(gri) Investigate if we can just use named.fromRHS here
+ // and rely on lazy computation of the underlying type.
+ named.underlying = under(named)
+
+ // If the RHS is a type parameter, it must be from this type declaration.
+ if tpar, _ := named.underlying.(*TypeParam); tpar != nil && tparamIndex(named.TParams().list(), tpar) < 0 {
+ check.errorf(tdecl.Type, _Todo, "cannot use function type parameter %s as RHS in type declaration", tpar)
+ named.underlying = Typ[Invalid]
}
+}
+func (check *Checker) collectTypeParams(list *ast.FieldList) *TypeParams {
+ var tparams []*TypeName
// Declare type parameters up-front, with empty interface as type bound.
// The scope of type parameters starts at the beginning of the type parameter
// list (so we can have mutually recursive parameterized interfaces).
@@ -739,52 +640,28 @@ func (check *Checker) collectTypeParams(list *ast.FieldList) (tparams []*TypeNam
tparams = check.declareTypeParams(tparams, f.Names)
}
- setBoundAt := func(at int, bound Type) {
- assert(IsInterface(bound))
- tparams[at].typ.(*_TypeParam).bound = bound
- }
-
index := 0
var bound Type
for _, f := range list.List {
if f.Type == nil {
goto next
}
-
- // The predeclared identifier "any" is visible only as a constraint
- // in a type parameter list. Look for it before general constraint
- // resolution.
- if tident, _ := unparen(f.Type).(*ast.Ident); tident != nil && tident.Name == "any" && check.lookup("any") == nil {
- bound = universeAny
- } else {
- bound = check.typ(f.Type)
- }
-
- // type bound must be an interface
- // TODO(gri) We should delay the interface check because
- // we may not have a complete interface yet:
- // type C(type T C) interface {}
- // (issue #39724).
- if _, ok := under(bound).(*Interface); ok {
- // Otherwise, set the bound for each type parameter.
- for i := range f.Names {
- setBoundAt(index+i, bound)
- }
- } else if bound != Typ[Invalid] {
- check.errorf(f.Type, _Todo, "%s is not an interface", bound)
+ bound = check.boundType(f.Type)
+ for i := range f.Names {
+ tparams[index+i].typ.(*TypeParam).bound = bound
}
next:
index += len(f.Names)
}
- return
+ return bindTParams(tparams)
}
func (check *Checker) declareTypeParams(tparams []*TypeName, names []*ast.Ident) []*TypeName {
for _, name := range names {
tpar := NewTypeName(name.Pos(), check.pkg, name.Name, nil)
- check.newTypeParam(tpar, len(tparams), &emptyInterface) // assigns type to tpar as a side-effect
+ check.NewTypeParam(tpar, &emptyInterface) // assigns type to tpar as a side-effect
check.declare(check.scope, name, tpar, check.scope.pos) // TODO(gri) check scope position
tparams = append(tparams, tpar)
}
@@ -796,6 +673,25 @@ func (check *Checker) declareTypeParams(tparams []*TypeName, names []*ast.Ident)
return tparams
}
+// boundType type-checks the type expression e and returns its type, or Typ[Invalid].
+// The type must be an interface, including the predeclared type "any".
+func (check *Checker) boundType(e ast.Expr) Type {
+ // The predeclared identifier "any" is visible only as a type bound in a type parameter list.
+ // If we allow "any" for general use, this if-statement can be removed (issue #33232).
+ if name, _ := unparen(e).(*ast.Ident); name != nil && name.Name == "any" && check.lookup("any") == universeAny {
+ return universeAny.Type()
+ }
+
+ bound := check.typ(e)
+ check.later(func() {
+ u := under(bound)
+ if _, ok := u.(*Interface); !ok && u != Typ[Invalid] {
+ check.errorf(e, _Todo, "%s is not an interface", bound)
+ }
+ })
+ return bound
+}
+
func (check *Checker) collectMethods(obj *TypeName) {
// get associated methods
// (Checker.collectObjects only collects methods with non-blank names;
@@ -815,7 +711,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
// and field names must be distinct."
base := asNamed(obj.typ) // shouldn't fail but be conservative
if base != nil {
- if t, _ := base.underlying.(*Struct); t != nil {
+ if t, _ := base.Underlying().(*Struct); t != nil {
for _, fld := range t.fields {
if fld.name != "_" {
assert(mset.insert(fld) == nil)
@@ -851,6 +747,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
}
if base != nil {
+ base.load() // TODO(mdempsky): Probably unnecessary.
base.methods = append(base.methods, m)
}
}
diff --git a/src/go/types/errorcodes.go b/src/go/types/errorcodes.go
index 3d24da7b53..bcc850f753 100644
--- a/src/go/types/errorcodes.go
+++ b/src/go/types/errorcodes.go
@@ -281,16 +281,7 @@ const (
_IncomparableMapKey
// _InvalidIfaceEmbed occurs when a non-interface type is embedded in an
- // interface.
- //
- // Example:
- // type T struct {}
- //
- // func (T) m()
- //
- // type I interface {
- // T
- // }
+ // interface (for go 1.17 or earlier).
_InvalidIfaceEmbed
// _InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
@@ -884,7 +875,7 @@ const (
// context in which it is used.
//
// Example:
- // var _ = 1 + ""
+ // var _ = 1 + nil
_InvalidUntypedConversion
// _BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
diff --git a/src/go/types/expr.go b/src/go/types/expr.go
index 58962e777b..c9a55aa871 100644
--- a/src/go/types/expr.go
+++ b/src/go/types/expr.go
@@ -144,6 +144,14 @@ var op2str2 = [...]string{
token.SHL: "shift",
}
+func underIs(typ Type, f func(Type) bool) bool {
+ u := under(typ)
+ if tpar, _ := u.(*TypeParam); tpar != nil {
+ return tpar.underIs(f)
+ }
+ return f(u)
+}
+
// The unary expression e may be nil. It's passed in for better error messages only.
func (check *Checker) unary(x *operand, e *ast.UnaryExpr) {
check.expr(x, e.X)
@@ -164,19 +172,29 @@ func (check *Checker) unary(x *operand, e *ast.UnaryExpr) {
return
case token.ARROW:
- typ := asChan(x.typ)
- if typ == nil {
- check.invalidOp(x, _InvalidReceive, "cannot receive from non-channel %s", x)
- x.mode = invalid
- return
- }
- if typ.dir == SendOnly {
- check.invalidOp(x, _InvalidReceive, "cannot receive from send-only channel %s", x)
+ var elem Type
+ if !underIs(x.typ, func(u Type) bool {
+ ch, _ := u.(*Chan)
+ if ch == nil {
+ check.invalidOp(x, _InvalidReceive, "cannot receive from non-channel %s", x)
+ return false
+ }
+ if ch.dir == SendOnly {
+ check.invalidOp(x, _InvalidReceive, "cannot receive from send-only channel %s", x)
+ return false
+ }
+ if elem != nil && !Identical(ch.elem, elem) {
+ check.invalidOp(x, _Todo, "channels of %s must have the same element type", x)
+ return false
+ }
+ elem = ch.elem
+ return true
+ }) {
x.mode = invalid
return
}
x.mode = commaok
- x.typ = typ.elem
+ x.typ = elem
check.hasCallOrRecv = true
return
}
@@ -622,7 +640,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
return x.typ, nil, 0
}
- switch t := optype(target).(type) {
+ switch t := under(target).(type) {
case *Basic:
if x.mode == constant_ {
v, code := check.representation(x, t)
@@ -661,8 +679,8 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
default:
return nil, nil, _InvalidUntypedConversion
}
- case *_Sum:
- ok := t.is(func(t Type) bool {
+ case *TypeParam:
+ ok := t.underIs(func(t Type) bool {
target, _, _ := check.implicitTypeAndValue(x, t)
return target != nil
})
@@ -682,7 +700,6 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
return Typ[UntypedNil], nil, 0
}
// cannot assign untyped values to non-empty interfaces
- check.completeInterface(token.NoPos, t)
if !t.Empty() {
return nil, nil, _InvalidUntypedConversion
}
@@ -943,14 +960,28 @@ func (check *Checker) binary(x *operand, e ast.Expr, lhs, rhs ast.Expr, op token
return
}
- check.convertUntyped(x, y.typ)
- if x.mode == invalid {
- return
+ canMix := func(x, y *operand) bool {
+ if IsInterface(x.typ) || IsInterface(y.typ) {
+ return true
+ }
+ if isBoolean(x.typ) != isBoolean(y.typ) {
+ return false
+ }
+ if isString(x.typ) != isString(y.typ) {
+ return false
+ }
+ return true
}
- check.convertUntyped(&y, x.typ)
- if y.mode == invalid {
- x.mode = invalid
- return
+ if canMix(x, &y) {
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
}
if isComparison(op) {
@@ -958,7 +989,7 @@ func (check *Checker) binary(x *operand, e ast.Expr, lhs, rhs ast.Expr, op token
return
}
- if !check.identical(x.typ, y.typ) {
+ if !Identical(x.typ, y.typ) {
// only report an error if we have valid types
// (otherwise we had an error reported elsewhere already)
if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
@@ -1154,7 +1185,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
goto Error
}
- switch utyp := optype(base).(type) {
+ switch utyp := under(base).(type) {
case *Struct:
if len(e.Elts) == 0 {
break
@@ -1284,7 +1315,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
xkey := keyVal(x.val)
if asInterface(utyp.key) != nil {
for _, vtyp := range visited[xkey] {
- if check.identical(vtyp, x.typ) {
+ if Identical(vtyp, x.typ) {
duplicate = true
break
}
@@ -1333,9 +1364,10 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
case *ast.SelectorExpr:
check.selector(x, e)
- case *ast.IndexExpr:
- if check.indexExpr(x, e) {
- check.funcInst(x, e)
+ case *ast.IndexExpr, *ast.MultiIndexExpr:
+ ix := typeparams.UnpackIndexExpr(e)
+ if check.indexExpr(x, ix) {
+ check.funcInst(x, ix)
}
if x.mode == invalid {
goto Error
@@ -1384,13 +1416,24 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
case typexpr:
x.typ = &Pointer{base: x.typ}
default:
- if typ := asPointer(x.typ); typ != nil {
- x.mode = variable
- x.typ = typ.base
- } else {
- check.invalidOp(x, _InvalidIndirection, "cannot indirect %s", x)
+ var base Type
+ if !underIs(x.typ, func(u Type) bool {
+ p, _ := u.(*Pointer)
+ if p == nil {
+ check.invalidOp(x, _InvalidIndirection, "cannot indirect %s", x)
+ return false
+ }
+ if base != nil && !Identical(p.base, base) {
+ check.invalidOp(x, _Todo, "pointers of %s must have identical base types", x)
+ return false
+ }
+ base = p.base
+ return true
+ }) {
goto Error
}
+ x.mode = variable
+ x.typ = base
}
case *ast.UnaryExpr:
@@ -1425,12 +1468,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
// types, which are comparatively rare.
default:
- if typeparams.IsListExpr(e) {
- // catch-all for unexpected expression lists
- check.errorf(e, _Todo, "unexpected list of expressions")
- } else {
- panic(fmt.Sprintf("%s: unknown expression type %T", check.fset.Position(e.Pos()), e))
- }
+ panic(fmt.Sprintf("%s: unknown expression type %T", check.fset.Position(e.Pos()), e))
}
// everything went well
@@ -1475,7 +1513,7 @@ func (check *Checker) typeAssertion(at positioner, x *operand, xtyp *Interface,
}
var msg string
if wrongType != nil {
- if check.identical(method.typ, wrongType.typ) {
+ if Identical(method.typ, wrongType.typ) {
msg = fmt.Sprintf("missing method %s (%s has pointer receiver)", method.name, method.name)
} else {
msg = fmt.Sprintf("wrong type for method %s (have %s, want %s)", method.name, wrongType.typ, method.typ)
diff --git a/src/go/types/exprstring.go b/src/go/types/exprstring.go
index f05e6424d4..aee8a5ba5f 100644
--- a/src/go/types/exprstring.go
+++ b/src/go/types/exprstring.go
@@ -67,11 +67,11 @@ func WriteExpr(buf *bytes.Buffer, x ast.Expr) {
buf.WriteByte('.')
buf.WriteString(x.Sel.Name)
- case *ast.IndexExpr:
- WriteExpr(buf, x.X)
+ case *ast.IndexExpr, *ast.MultiIndexExpr:
+ ix := typeparams.UnpackIndexExpr(x)
+ WriteExpr(buf, ix.X)
buf.WriteByte('[')
- exprs := typeparams.UnpackExpr(x.Index)
- for i, e := range exprs {
+ for i, e := range ix.Indices {
if i > 0 {
buf.WriteString(", ")
}
diff --git a/src/go/types/exprstring_test.go b/src/go/types/exprstring_test.go
index 51102881c9..a67f6a978a 100644
--- a/src/go/types/exprstring_test.go
+++ b/src/go/types/exprstring_test.go
@@ -27,6 +27,40 @@ var testExprs = []testEntry{
{"func(x int) complex128 {}", "(func(x int) complex128 literal)"},
{"[]int{1, 2, 3}", "([]int literal)"},
+ // type expressions
+ dup("[1 << 10]byte"),
+ dup("[]int"),
+ dup("*int"),
+ dup("struct{x int}"),
+ dup("func()"),
+ dup("func(int, float32) string"),
+ dup("interface{m()}"),
+ dup("interface{m() string; n(x int)}"),
+ dup("interface{type int}"),
+
+ // The following exprs do not get formatted correctly: each element in the
+ // type list is printed on a separate line. This is left as a placeholder
+ // until type lists are removed.
+ // TODO(rfindley): remove this once type lists are gone.
+ // dup("interface{type int, float64, string}"),
+ // dup("interface{type int; m()}"),
+ // dup("interface{type int, float64, string; m() string; n(x int)}"),
+ dup("map[string]int"),
+ dup("chan E"),
+ dup("<-chan E"),
+ dup("chan<- E"),
+
+ // new interfaces
+ dup("interface{int}"),
+ dup("interface{~int}"),
+ dup("interface{~int}"),
+ dup("interface{int | string}"),
+ dup("interface{~int | ~string; float64; m()}"),
+
+ // See above.
+ // dup("interface{type a, b, c; ~int | ~string; float64; m()}"),
+ dup("interface{~T[int, string] | string}"),
+
// non-type expressions
dup("(x)"),
dup("x.f"),
diff --git a/src/go/types/index.go b/src/go/types/index.go
index 2ba3475f89..a49bc5519c 100644
--- a/src/go/types/index.go
+++ b/src/go/types/index.go
@@ -15,25 +15,25 @@ import (
// If e is a valid function instantiation, indexExpr returns true.
// In that case x represents the uninstantiated function value and
// it is the caller's responsibility to instantiate the function.
-func (check *Checker) indexExpr(x *operand, e *ast.IndexExpr) (isFuncInst bool) {
+func (check *Checker) indexExpr(x *operand, e *typeparams.IndexExpr) (isFuncInst bool) {
check.exprOrType(x, e.X)
switch x.mode {
case invalid:
- check.use(typeparams.UnpackExpr(e.Index)...)
+ check.use(e.Indices...)
return false
case typexpr:
// type instantiation
x.mode = invalid
- x.typ = check.varType(e)
+ x.typ = check.varType(e.Orig)
if x.typ != Typ[Invalid] {
x.mode = typexpr
}
return false
case value:
- if sig := asSignature(x.typ); sig != nil && len(sig.tparams) > 0 {
+ if sig := asSignature(x.typ); sig != nil && sig.TParams().Len() > 0 {
// function instantiation
return true
}
@@ -41,7 +41,7 @@ func (check *Checker) indexExpr(x *operand, e *ast.IndexExpr) (isFuncInst bool)
valid := false
length := int64(-1) // valid if >= 0
- switch typ := optype(x.typ).(type) {
+ switch typ := under(x.typ).(type) {
case *Basic:
if isString(typ) {
valid = true
@@ -80,7 +80,7 @@ func (check *Checker) indexExpr(x *operand, e *ast.IndexExpr) (isFuncInst bool)
index := check.singleIndex(e)
if index == nil {
x.mode = invalid
- return
+ return false
}
var key operand
check.expr(&key, index)
@@ -88,88 +88,81 @@ func (check *Checker) indexExpr(x *operand, e *ast.IndexExpr) (isFuncInst bool)
// ok to continue even if indexing failed - map element type is known
x.mode = mapindex
x.typ = typ.elem
- x.expr = e
- return
+ x.expr = e.Orig
+ return false
- case *_Sum:
- // A sum type can be indexed if all of the sum's types
- // support indexing and have the same index and element
- // type. Special rules apply for maps in the sum type.
- var tkey, telem Type // key is for map types only
- nmaps := 0 // number of map types in sum type
- if typ.is(func(t Type) bool {
- var e Type
- switch t := under(t).(type) {
+ case *TypeParam:
+ // TODO(gri) report detailed failure cause for better error messages
+ var tkey, telem Type // tkey != nil if we have maps
+ if typ.underIs(func(u Type) bool {
+ var key, elem Type
+ alen := int64(-1) // valid if >= 0
+ switch t := u.(type) {
case *Basic:
- if isString(t) {
- e = universeByte
+ if !isString(t) {
+ return false
}
+ elem = universeByte
case *Array:
- e = t.elem
+ elem = t.elem
+ alen = t.len
case *Pointer:
- if t := asArray(t.base); t != nil {
- e = t.elem
+ a, _ := under(t.base).(*Array)
+ if a == nil {
+ return false
}
+ elem = a.elem
+ alen = a.len
case *Slice:
- e = t.elem
+ elem = t.elem
case *Map:
- // If there are multiple maps in the sum type,
- // they must have identical key types.
- // TODO(gri) We may be able to relax this rule
- // but it becomes complicated very quickly.
- if tkey != nil && !Identical(t.key, tkey) {
+ key = t.key
+ elem = t.elem
+ default:
+ return false
+ }
+ assert(elem != nil)
+ if telem == nil {
+ // first type
+ tkey, telem = key, elem
+ length = alen
+ } else {
+ // all map keys must be identical (incl. all nil)
+ if !Identical(key, tkey) {
return false
}
- tkey = t.key
- e = t.elem
- nmaps++
- case *_TypeParam:
- check.errorf(x, 0, "type of %s contains a type parameter - cannot index (implementation restriction)", x)
- case *instance:
- panic("unimplemented")
- }
- if e == nil || telem != nil && !Identical(e, telem) {
- return false
+ // all element types must be identical
+ if !Identical(elem, telem) {
+ return false
+ }
+ tkey, telem = key, elem
+ // track the minimal length for arrays
+ if alen >= 0 && alen < length {
+ length = alen
+ }
}
- telem = e
return true
}) {
- // If there are maps, the index expression must be assignable
- // to the map key type (as for simple map index expressions).
- if nmaps > 0 {
+ // For maps, the index expression must be assignable to the map key type.
+ if tkey != nil {
index := check.singleIndex(e)
if index == nil {
x.mode = invalid
- return
+ return false
}
var key operand
check.expr(&key, index)
check.assignment(&key, tkey, "map index")
// ok to continue even if indexing failed - map element type is known
-
- // If there are only maps, we are done.
- if nmaps == len(typ.types) {
- x.mode = mapindex
- x.typ = telem
- x.expr = e
- return
- }
-
- // Otherwise we have mix of maps and other types. For
- // now we require that the map key be an integer type.
- // TODO(gri) This is probably not good enough.
- valid = isInteger(tkey)
- // avoid 2nd indexing error if indexing failed above
- if !valid && key.mode == invalid {
- x.mode = invalid
- return
- }
- x.mode = value // map index expressions are not addressable
- } else {
- // no maps
- valid = true
- x.mode = variable
+ x.mode = mapindex
+ x.typ = telem
+ x.expr = e
+ return false
}
+
+ // no maps
+ valid = true
+ x.mode = variable
x.typ = telem
}
}
@@ -177,13 +170,13 @@ func (check *Checker) indexExpr(x *operand, e *ast.IndexExpr) (isFuncInst bool)
if !valid {
check.invalidOp(x, _NonIndexableOperand, "cannot index %s", x)
x.mode = invalid
- return
+ return false
}
index := check.singleIndex(e)
if index == nil {
x.mode = invalid
- return
+ return false
}
// In pathological (invalid) cases (e.g.: type T1 [][[]T1{}[0][0]]T0)
@@ -206,7 +199,7 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) {
valid := false
length := int64(-1) // valid if >= 0
- switch typ := optype(x.typ).(type) {
+ switch typ := under(x.typ).(type) {
case *Basic:
if isString(typ) {
if e.Slice3 {
@@ -246,8 +239,8 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) {
valid = true
// x.typ doesn't change
- case *_Sum, *_TypeParam:
- check.errorf(x, 0, "generic slice expressions not yet implemented")
+ case *TypeParam:
+ check.errorf(x, _Todo, "generic slice expressions not yet implemented")
x.mode = invalid
return
}
@@ -311,23 +304,16 @@ L:
// singleIndex returns the (single) index from the index expression e.
// If the index is missing, or if there are multiple indices, an error
// is reported and the result is nil.
-func (check *Checker) singleIndex(e *ast.IndexExpr) ast.Expr {
- index := e.Index
- if index == nil {
- check.invalidAST(e, "missing index for %s", e)
- return nil
- }
-
- indexes := typeparams.UnpackExpr(index)
- if len(indexes) == 0 {
- check.invalidAST(index, "index expression %v with 0 indices", index)
+func (check *Checker) singleIndex(expr *typeparams.IndexExpr) ast.Expr {
+ if len(expr.Indices) == 0 {
+ check.invalidAST(expr.Orig, "index expression %v with 0 indices", expr)
return nil
}
- if len(indexes) > 1 {
+ if len(expr.Indices) > 1 {
// TODO(rFindley) should this get a distinct error code?
- check.invalidOp(indexes[1], _InvalidIndex, "more than one index")
+ check.invalidOp(expr.Indices[1], _InvalidIndex, "more than one index")
}
- return indexes[0]
+ return expr.Indices[0]
}
// index checks an index expression for validity.
diff --git a/src/go/types/infer.go b/src/go/types/infer.go
index 5d49351e1f..6e70a103e7 100644
--- a/src/go/types/infer.go
+++ b/src/go/types/infer.go
@@ -93,7 +93,7 @@ func (check *Checker) infer(posn positioner, tparams []*TypeName, targs []Type,
// Unify parameter and argument types for generic parameters with typed arguments
// and collect the indices of generic parameters with untyped arguments.
// Terminology: generic parameter = function parameter with a type-parameterized type
- u := newUnifier(check, false)
+ u := newUnifier(false)
u.x.init(tparams)
// Set the type arguments which we know already.
@@ -189,7 +189,7 @@ func (check *Checker) infer(posn positioner, tparams []*TypeName, targs []Type,
// only parameter type it can possibly match against is a *TypeParam.
// Thus, only consider untyped arguments for generic parameters that
// are not of composite types and which don't have a type inferred yet.
- if tpar, _ := par.typ.(*_TypeParam); tpar != nil && targs[tpar.index] == nil {
+ if tpar, _ := par.typ.(*TypeParam); tpar != nil && targs[tpar.index] == nil {
arg := args[i]
targ := Default(arg.typ)
// The default type for an untyped nil is untyped nil. We must not
@@ -302,8 +302,8 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
}
}
- case *_Sum:
- return w.isParameterizedList(t.types)
+ case *Union:
+ return w.isParameterizedTermList(t.terms)
case *Signature:
// t.tparams may not be nil if we are looking at a signature
@@ -316,24 +316,13 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return w.isParameterized(t.params) || w.isParameterized(t.results)
case *Interface:
- if t.allMethods != nil {
- // TODO(rFindley) at some point we should enforce completeness here
- for _, m := range t.allMethods {
- if w.isParameterized(m.typ) {
- return true
- }
+ tset := t.typeSet()
+ for _, m := range tset.methods {
+ if w.isParameterized(m.typ) {
+ return true
}
- return w.isParameterizedList(unpackType(t.allTypes))
}
-
- return t.iterate(func(t *Interface) bool {
- for _, m := range t.methods {
- if w.isParameterized(m.typ) {
- return true
- }
- }
- return w.isParameterizedList(unpackType(t.types))
- }, nil)
+ return w.isParameterized(tset.types)
case *Map:
return w.isParameterized(t.key) || w.isParameterized(t.elem)
@@ -342,15 +331,12 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return w.isParameterized(t.elem)
case *Named:
- return w.isParameterizedList(t.targs)
+ return w.isParameterizedTypeList(t.targs)
- case *_TypeParam:
+ case *TypeParam:
// t must be one of w.tparams
return t.index < len(w.tparams) && w.tparams[t.index].typ == t
- case *instance:
- return w.isParameterizedList(t.targs)
-
default:
unreachable()
}
@@ -358,7 +344,7 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return false
}
-func (w *tpWalker) isParameterizedList(list []Type) bool {
+func (w *tpWalker) isParameterizedTypeList(list []Type) bool {
for _, t := range list {
if w.isParameterized(t) {
return true
@@ -367,6 +353,15 @@ func (w *tpWalker) isParameterizedList(list []Type) bool {
return false
}
+func (w *tpWalker) isParameterizedTermList(list []*term) bool {
+ for _, t := range list {
+ if w.isParameterized(t.typ) {
+ return true
+ }
+ }
+ return false
+}
+
// inferB returns the list of actual type arguments inferred from the type parameters'
// bounds and an initial set of type arguments. If type inference is impossible because
// unification fails, an error is reported if report is set to true, the resulting types
@@ -380,7 +375,7 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
// Setup bidirectional unification between those structural bounds
// and the corresponding type arguments (which may be nil!).
- u := newUnifier(check, false)
+ u := newUnifier(false)
u.x.init(tparams)
u.y = u.x // type parameters between LHS and RHS of unification are identical
@@ -393,7 +388,7 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
// Unify type parameters with their structural constraints, if any.
for _, tpar := range tparams {
- typ := tpar.typ.(*_TypeParam)
+ typ := tpar.typ.(*TypeParam)
sbound := check.structuralType(typ.bound)
if sbound != nil {
if !u.unify(typ, sbound) {
@@ -407,8 +402,8 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
// u.x.types() now contains the incoming type arguments plus any additional type
// arguments for which there were structural constraints. The newly inferred non-
- // nil entries may still contain references to other type parameters. For instance,
- // for [A any, B interface{type []C}, C interface{type *A}], if A == int
+ // nil entries may still contain references to other type parameters.
+ // For instance, for [A any, B interface{ []C }, C interface{ *A }], if A == int
// was given, unification produced the type list [int, []C, *A]. We eliminate the
// remaining type parameters by substituting the type parameters in this type list
// until nothing changes anymore.
@@ -471,12 +466,16 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
// structuralType returns the structural type of a constraint, if any.
func (check *Checker) structuralType(constraint Type) Type {
if iface, _ := under(constraint).(*Interface); iface != nil {
- check.completeInterface(token.NoPos, iface)
- types := unpackType(iface.allTypes)
- if len(types) == 1 {
- return types[0]
+ types := iface.typeSet().types
+ if u, _ := types.(*Union); u != nil {
+ if u.NumTerms() == 1 {
+ // TODO(gri) do we need to respect tilde?
+ t, _ := u.Term(0)
+ return t
+ }
+ return nil
}
- return nil
+ return types
}
- return constraint
+ return nil
}
diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go
new file mode 100644
index 0000000000..6d56eb7ea2
--- /dev/null
+++ b/src/go/types/instantiate.go
@@ -0,0 +1,248 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements instantiation of generic types
+// through substitution of type parameters by type arguments.
+
+package types
+
+import (
+ "fmt"
+ "go/token"
+)
+
+// Instantiate instantiates the type typ with the given type arguments
+// targs. To check type constraint satisfaction, verify must be set.
+// pos and posList correspond to the instantiation and type argument
+// positions respectively; posList may be nil or shorter than the number
+// of type arguments provided.
+// typ must be a *Named or a *Signature type, and its number of type
+// parameters must match the number of provided type arguments.
+// The receiver (check) may be nil if and only if verify is not set.
+// The result is a new, instantiated (not generic) type of the same kind
+// (either a *Named or a *Signature).
+// Any methods attached to a *Named are simply copied; they are not
+// instantiated.
+func (check *Checker) Instantiate(pos token.Pos, typ Type, targs []Type, posList []token.Pos, verify bool) (res Type) {
+ var tparams []*TypeName
+ switch t := typ.(type) {
+ case *Named:
+ tparams = t.TParams().list()
+ case *Signature:
+ tparams = t.TParams().list()
+ defer func() {
+ // If we had an unexpected failure somewhere don't panic below when
+ // asserting res.(*Signature). Check for *Signature in case Typ[Invalid]
+ // is returned.
+ if _, ok := res.(*Signature); !ok {
+ return
+ }
+ // If the signature doesn't use its type parameters, subst
+ // will not make a copy. In that case, make a copy now (so
+ // we can set tparams to nil w/o causing side-effects).
+ if t == res {
+ copy := *t
+ res = &copy
+ }
+ // After instantiating a generic signature, it is not generic
+ // anymore; we need to set tparams to nil.
+ res.(*Signature).tparams = nil
+ }()
+ default:
+ // only types and functions can be generic
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ }
+
+ inst := check.instantiate(pos, typ, tparams, targs, posList)
+ if verify && len(tparams) == len(targs) {
+ check.verify(pos, tparams, targs, posList)
+ }
+ return inst
+}
+
+func (check *Checker) instantiate(pos token.Pos, typ Type, tparams []*TypeName, targs []Type, posList []token.Pos) (res Type) {
+ // the number of supplied types must match the number of type parameters
+ if len(targs) != len(tparams) {
+ // TODO(gri) provide better error message
+ if check != nil {
+ check.errorf(atPos(pos), _Todo, "got %d arguments but %d type parameters", len(targs), len(tparams))
+ return Typ[Invalid]
+ }
+ panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, len(targs), len(tparams)))
+ }
+
+ if check != nil && trace {
+ check.trace(pos, "-- instantiating %s with %s", typ, typeListString(targs))
+ check.indent++
+ defer func() {
+ check.indent--
+ var under Type
+ if res != nil {
+ // Calling under() here may lead to endless instantiations.
+ // Test case: type T[P any] T[P]
+ // TODO(gri) investigate if that's a bug or to be expected.
+ under = res.Underlying()
+ }
+ check.trace(pos, "=> %s (under = %s)", res, under)
+ }()
+ }
+
+ assert(len(posList) <= len(targs))
+
+ // TODO(gri) What is better here: work with TypeParams, or work with TypeNames?
+
+ if len(tparams) == 0 {
+ return typ // nothing to do (minor optimization)
+ }
+
+ smap := makeSubstMap(tparams, targs)
+
+ return check.subst(pos, typ, smap)
+}
+
+// InstantiateLazy is like Instantiate, but avoids actually
+// instantiating the type until needed. typ must be a *Named
+// type.
+func (check *Checker) InstantiateLazy(pos token.Pos, typ Type, targs []Type, posList []token.Pos, verify bool) Type {
+ // Don't use asNamed here: we don't want to expand the base during lazy
+ // instantiation.
+ base := typ.(*Named)
+ if base == nil {
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ }
+ if verify && base.TParams().Len() == len(targs) {
+ check.later(func() {
+ check.verify(pos, base.tparams.list(), targs, posList)
+ })
+ }
+ h := instantiatedHash(base, targs)
+ if check != nil {
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ if named := check.typMap[h]; named != nil {
+ return named
+ }
+ }
+
+ tname := NewTypeName(pos, base.obj.pkg, base.obj.name, nil)
+ named := check.newNamed(tname, base, nil, nil, nil) // methods and tparams are set when named is loaded.
+ named.targs = targs
+ named.instance = &instance{pos, posList}
+
+ if check != nil {
+ check.typMap[h] = named
+ }
+
+ return named
+}
+
+func (check *Checker) verify(pos token.Pos, tparams []*TypeName, targs []Type, posList []token.Pos) {
+ if check == nil {
+ panic("cannot have nil Checker if verifying constraints")
+ }
+
+ smap := makeSubstMap(tparams, targs)
+ for i, tname := range tparams {
+ // best position for error reporting
+ pos := pos
+ if i < len(posList) {
+ pos = posList[i]
+ }
+
+ // stop checking bounds after the first failure
+ if !check.satisfies(pos, targs[i], tname.typ.(*TypeParam), smap) {
+ break
+ }
+ }
+}
+
+// satisfies reports whether the type argument targ satisfies the constraint of type parameter
+// parameter tpar (after any of its type parameters have been substituted through smap).
+// A suitable error is reported if the result is false.
+// TODO(gri) This should be a method of interfaces or type sets.
+func (check *Checker) satisfies(pos token.Pos, targ Type, tpar *TypeParam, smap *substMap) bool {
+ iface := tpar.iface()
+ if iface.Empty() {
+ return true // no type bound
+ }
+
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiate
+ // the parameterized type.
+ iface = check.subst(pos, iface, smap).(*Interface)
+
+ // if iface is comparable, targ must be comparable
+ // TODO(gri) the error messages needs to be better, here
+ if iface.IsComparable() && !Comparable(targ) {
+ if tpar := asTypeParam(targ); tpar != nil && tpar.iface().typeSet().IsTop() {
+ check.softErrorf(atPos(pos), _Todo, "%s has no constraints", targ)
+ return false
+ }
+ check.softErrorf(atPos(pos), _Todo, "%s does not satisfy comparable", targ)
+ return false
+ }
+
+ // targ must implement iface (methods)
+ // - check only if we have methods
+ if iface.NumMethods() > 0 {
+ // If the type argument is a pointer to a type parameter, the type argument's
+ // method set is empty.
+ // TODO(gri) is this what we want? (spec question)
+ if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
+ check.errorf(atPos(pos), 0, "%s has no methods", targ)
+ return false
+ }
+ if m, wrong := check.missingMethod(targ, iface, true); m != nil {
+ // TODO(gri) needs to print updated name to avoid major confusion in error message!
+ // (print warning for now)
+ // Old warning:
+ // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
+ if wrong != nil {
+ // TODO(gri) This can still report uninstantiated types which makes the error message
+ // more difficult to read then necessary.
+ // TODO(rFindley) should this use parentheses rather than ':' for qualification?
+ check.softErrorf(atPos(pos), _Todo,
+ "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
+ targ, tpar.bound, wrong, m,
+ )
+ } else {
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
+ }
+ return false
+ }
+ }
+
+ // targ's underlying type must also be one of the interface types listed, if any
+ if iface.typeSet().types == nil {
+ return true // nothing to do
+ }
+
+ // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
+ // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
+ if targ := asTypeParam(targ); targ != nil {
+ targBound := targ.iface()
+ if targBound.typeSet().types == nil {
+ check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
+ return false
+ }
+ return iface.is(func(typ Type, tilde bool) bool {
+ // TODO(gri) incorporate tilde information!
+ if !iface.isSatisfiedBy(typ) {
+ // TODO(gri) match this error message with the one below (or vice versa)
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, typ, iface.typeSet().types)
+ return false
+ }
+ return true
+ })
+ }
+
+ // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
+ if !iface.isSatisfiedBy(targ) {
+ check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, targ, iface.typeSet().types)
+ return false
+ }
+
+ return true
+}
diff --git a/src/go/types/interface.go b/src/go/types/interface.go
new file mode 100644
index 0000000000..d8f9671857
--- /dev/null
+++ b/src/go/types/interface.go
@@ -0,0 +1,280 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/internal/typeparams"
+ "go/token"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// An Interface represents an interface type.
+type Interface struct {
+ obj *TypeName // type name object defining this interface; or nil (for better error messages)
+ methods []*Func // ordered list of explicitly declared methods
+ embeddeds []Type // ordered list of explicitly embedded elements
+ embedPos *[]token.Pos // positions of embedded elements; or nil (for error messages) - use pointer to save space
+ complete bool // indicates that obj, methods, and embeddeds are set and type set can be computed
+
+ tset *_TypeSet // type set described by this interface, computed lazily
+}
+
+// typeSet returns the type set for interface t.
+func (t *Interface) typeSet() *_TypeSet { return computeTypeSet(nil, token.NoPos, t) }
+
+// is reports whether interface t represents types that all satisfy f.
+func (t *Interface) is(f func(Type, bool) bool) bool {
+ switch t := t.typeSet().types.(type) {
+ case nil, *top:
+ // TODO(gri) should settle on top or nil to represent this case
+ return false // we must have at least one type! (was bug)
+ case *Union:
+ return t.is(func(t *term) bool { return f(t.typ, t.tilde) })
+ default:
+ return f(t, false)
+ }
+}
+
+// emptyInterface represents the empty (completed) interface
+var emptyInterface = Interface{complete: true, tset: &topTypeSet}
+
+// NewInterface returns a new interface for the given methods and embedded types.
+// NewInterface takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
+//
+// Deprecated: Use NewInterfaceType instead which allows arbitrary embedded types.
+func NewInterface(methods []*Func, embeddeds []*Named) *Interface {
+ tnames := make([]Type, len(embeddeds))
+ for i, t := range embeddeds {
+ tnames[i] = t
+ }
+ return NewInterfaceType(methods, tnames)
+}
+
+// NewInterfaceType returns a new interface for the given methods and embedded types.
+// NewInterfaceType takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
+func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
+ if len(methods) == 0 && len(embeddeds) == 0 {
+ return &emptyInterface
+ }
+
+ // set method receivers if necessary
+ typ := new(Interface)
+ for _, m := range methods {
+ if sig := m.typ.(*Signature); sig.recv == nil {
+ sig.recv = NewVar(m.pos, m.pkg, "", typ)
+ }
+ }
+
+ // sort for API stability
+ sortMethods(methods)
+
+ typ.methods = methods
+ typ.embeddeds = embeddeds
+ typ.complete = true
+
+ return typ
+}
+
+// NumExplicitMethods returns the number of explicitly declared methods of interface t.
+func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
+
+// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
+
+// NumEmbeddeds returns the number of embedded types in interface t.
+func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
+
+// Embedded returns the i'th embedded defined (*Named) type of interface t for 0 <= i < t.NumEmbeddeds().
+// The result is nil if the i'th embedded type is not a defined type.
+//
+// Deprecated: Use EmbeddedType which is not restricted to defined (*Named) types.
+func (t *Interface) Embedded(i int) *Named { tname, _ := t.embeddeds[i].(*Named); return tname }
+
+// EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
+func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
+
+// NumMethods returns the total number of methods of interface t.
+func (t *Interface) NumMethods() int { return t.typeSet().NumMethods() }
+
+// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) Method(i int) *Func { return t.typeSet().Method(i) }
+
+// Empty reports whether t is the empty interface.
+func (t *Interface) Empty() bool { return t.typeSet().IsTop() }
+
+// IsComparable reports whether each type in interface t's type set is comparable.
+func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable() }
+
+// IsConstraint reports whether interface t is not just a method set.
+func (t *Interface) IsConstraint() bool { return !t.typeSet().IsMethodSet() }
+
+// isSatisfiedBy reports whether interface t's type list is satisfied by the type typ.
+// If the type list is empty (absent), typ trivially satisfies the interface.
+// TODO(gri) This is not a great name. Eventually, we should have a more comprehensive
+// "implements" predicate.
+func (t *Interface) isSatisfiedBy(typ Type) bool {
+ t.Complete()
+ switch t := t.typeSet().types.(type) {
+ case nil:
+ return true // no type restrictions
+ case *Union:
+ r, _ := t.intersect(typ, false)
+ return r != nil
+ default:
+ return Identical(t, typ)
+ }
+}
+
+// Complete computes the interface's type set. It must be called by users of
+// NewInterfaceType and NewInterface after the interface's embedded types are
+// fully defined and before using the interface type in any way other than to
+// form other types. The interface must not contain duplicate methods or a
+// panic occurs. Complete returns the receiver.
+//
+// Deprecated: Type sets are now computed lazily, on demand; this function
+// is only here for backward-compatibility. It does not have to
+// be called explicitly anymore.
+func (t *Interface) Complete() *Interface {
+ // Some tests are still depending on the state change
+ // (string representation of an Interface not containing an
+ // /* incomplete */ marker) caused by the explicit Complete
+ // call, so we compute the type set eagerly here.
+ t.complete = true
+ t.typeSet()
+ return t
+}
+
+func (t *Interface) Underlying() Type { return t }
+func (t *Interface) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
+ var tlist []ast.Expr
+ var tname *ast.Ident // "type" name of first entry in a type list declaration
+
+ addEmbedded := func(pos token.Pos, typ Type) {
+ ityp.embeddeds = append(ityp.embeddeds, typ)
+ if ityp.embedPos == nil {
+ ityp.embedPos = new([]token.Pos)
+ }
+ *ityp.embedPos = append(*ityp.embedPos, pos)
+ }
+
+ for _, f := range iface.Methods.List {
+ if len(f.Names) == 0 {
+ // We have an embedded type; possibly a union of types.
+ addEmbedded(f.Type.Pos(), parseUnion(check, flattenUnion(nil, f.Type)))
+ continue
+ }
+
+ // We have a method with name f.Names[0], or a type
+ // of a type list (name.Name == "type").
+ // (The parser ensures that there's only one method
+ // and we don't care if a constructed AST has more.)
+ name := f.Names[0]
+ if name.Name == "_" {
+ check.errorf(name, _BlankIfaceMethod, "invalid method name _")
+ continue // ignore
+ }
+
+ // TODO(rfindley) Remove type list handling once the parser doesn't accept type lists anymore.
+ if name.Name == "type" {
+ // Report an error for the first type list per interface
+ // if we don't allow type lists, but continue.
+ if !allowTypeLists && tlist == nil {
+ check.softErrorf(name, _Todo, "use generalized embedding syntax instead of a type list")
+ }
+ // For now, collect all type list entries as if it
+ // were a single union, where each union element is
+ // of the form ~T.
+ // TODO(rfindley) remove once we disallow type lists
+ op := new(ast.UnaryExpr)
+ op.Op = token.TILDE
+ op.X = f.Type
+ tlist = append(tlist, op)
+ // Report an error if we have multiple type lists in an
+ // interface, but only if they are permitted in the first place.
+ if allowTypeLists && tname != nil && tname != name {
+ check.errorf(name, _Todo, "cannot have multiple type lists in an interface")
+ }
+ tname = name
+ continue
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.invalidAST(f.Type, "%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (This extra check is needed here because interface method signatures don't have
+ // a receiver specification.)
+ if sig.tparams != nil {
+ var at positioner = f.Type
+ if tparams := typeparams.Get(f.Type); tparams != nil {
+ at = tparams
+ }
+ check.errorf(at, _Todo, "methods cannot have type parameters")
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ recvTyp = def
+ }
+ sig.recv = NewVar(name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(name.Pos(), check.pkg, name.Name, sig)
+ check.recordDef(name, m)
+ ityp.methods = append(ityp.methods, m)
+ }
+
+ // type constraints
+ if tlist != nil {
+ // TODO(rfindley): this differs from types2 due to the use of Pos() below,
+ // which should actually be on the ~. Confirm that this position is correct.
+ addEmbedded(tlist[0].Pos(), parseUnion(check, tlist))
+ }
+
+ // All methods and embedded elements for this interface are collected;
+ // i.e., this interface is may be used in a type set computation.
+ ityp.complete = true
+
+ if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.tset = &topTypeSet
+ return
+ }
+
+ // sort for API stability
+ sortMethods(ityp.methods)
+ // (don't sort embeddeds: they must correspond to *embedPos entries)
+
+ // Compute type set with a non-nil *Checker as soon as possible
+ // to report any errors. Subsequent uses of type sets will use
+ // this computed type set and won't need to pass in a *Checker.
+ check.later(func() { computeTypeSet(check, iface.Pos(), ityp) })
+}
+
+func flattenUnion(list []ast.Expr, x ast.Expr) []ast.Expr {
+ if o, _ := x.(*ast.BinaryExpr); o != nil && o.Op == token.OR {
+ list = flattenUnion(list, o.X)
+ x = o.Y
+ }
+ return append(list, x)
+}
diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go
index 519e199536..51995af30a 100644
--- a/src/go/types/issues_test.go
+++ b/src/go/types/issues_test.go
@@ -634,7 +634,7 @@ var _ T = template /* ERROR cannot use.*text/template.* as T value */.Template{}
}
imp := importHelper{pkg: a, fallback: importer.Default()}
- checkFiles(t, nil, "", []string{"b.go"}, [][]byte{[]byte(bsrc)}, false, imp)
- checkFiles(t, nil, "", []string{"c.go"}, [][]byte{[]byte(csrc)}, false, imp)
- checkFiles(t, nil, "", []string{"t.go"}, [][]byte{[]byte(tsrc)}, false, imp)
+ testFiles(t, nil, []string{"b.go"}, [][]byte{[]byte(bsrc)}, false, imp)
+ testFiles(t, nil, []string{"c.go"}, [][]byte{[]byte(csrc)}, false, imp)
+ testFiles(t, nil, []string{"t.go"}, [][]byte{[]byte(tsrc)}, false, imp)
}
diff --git a/src/go/types/labels.go b/src/go/types/labels.go
index 8cf6e63645..f3b7f211f3 100644
--- a/src/go/types/labels.go
+++ b/src/go/types/labels.go
@@ -36,7 +36,8 @@ func (check *Checker) labels(body *ast.BlockStmt) {
}
// spec: "It is illegal to define a label that is never used."
- for _, obj := range all.elems {
+ for name, obj := range all.elems {
+ obj = resolve(name, obj)
if lbl := obj.(*Label); !lbl.used {
check.softErrorf(lbl, _UnusedLabel, "label %s declared but not used", lbl.name)
}
diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go
index 9c7bfd4bb9..7cab336dbe 100644
--- a/src/go/types/lookup.go
+++ b/src/go/types/lookup.go
@@ -8,6 +8,11 @@ package types
import "go/token"
+// Internal use of LookupFieldOrMethod: If the obj result is a method
+// associated with a concrete (non-interface) type, the method's signature
+// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
+// the method's type.
+
// LookupFieldOrMethod looks up a field or method with given package and name
// in T and returns the corresponding *Var or *Func, an index sequence, and a
// bool indicating if there were any pointer indirections on the path to the
@@ -35,19 +40,6 @@ import "go/token"
// the method's formal receiver base type, nor was the receiver addressable.
//
func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
- return (*Checker)(nil).lookupFieldOrMethod(T, addressable, pkg, name)
-}
-
-// Internal use of Checker.lookupFieldOrMethod: If the obj result is a method
-// associated with a concrete (non-interface) type, the method's signature
-// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
-// the method's type.
-// TODO(gri) Now that we provide the *Checker, we can probably remove this
-// caveat by calling Checker.objDecl from lookupFieldOrMethod. Investigate.
-
-// lookupFieldOrMethod is like the external version but completes interfaces
-// as necessary.
-func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// Methods cannot be associated to a named pointer type
// (spec: "The type denoted by T is called the receiver base type;
// it must not be a pointer or interface type and it must be declared
@@ -56,8 +48,8 @@ func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package
// pointer type but discard the result if it is a method since we would
// not have found it for T (see also issue 8590).
if t := asNamed(T); t != nil {
- if p, _ := t.underlying.(*Pointer); p != nil {
- obj, index, indirect = check.rawLookupFieldOrMethod(p, false, pkg, name)
+ if p, _ := t.Underlying().(*Pointer); p != nil {
+ obj, index, indirect = lookupFieldOrMethod(p, false, pkg, name)
if _, ok := obj.(*Func); ok {
return nil, nil, false
}
@@ -65,7 +57,7 @@ func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package
}
}
- return check.rawLookupFieldOrMethod(T, addressable, pkg, name)
+ return lookupFieldOrMethod(T, addressable, pkg, name)
}
// TODO(gri) The named type consolidation and seen maps below must be
@@ -73,10 +65,9 @@ func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package
// types always have only one representation (even when imported
// indirectly via different packages.)
-// rawLookupFieldOrMethod should only be called by lookupFieldOrMethod and missingMethod.
-func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+// lookupFieldOrMethod should only be called by LookupFieldOrMethod and missingMethod.
+func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// WARNING: The code in this function is extremely subtle - do not modify casually!
- // This function and NewMethodSet should be kept in sync.
if name == "_" {
return // blank fields/methods are never found
@@ -84,10 +75,12 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
typ, isPtr := deref(T)
- // *typ where typ is an interface has no methods.
- // Be cautious: typ may be nil (issue 39634, crash #3).
- if typ == nil || isPtr && IsInterface(typ) {
- return
+ // *typ where typ is an interface or type parameter has no methods.
+ switch under(typ).(type) {
+ case *Interface, *TypeParam:
+ if isPtr {
+ return
+ }
}
// Start with typ as single entry at shallowest depth.
@@ -107,7 +100,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
var next []embeddedType // embedded types found at current depth
// look for (pkg, name) in all types at current depth
- var tpar *_TypeParam // set if obj receiver is a type parameter
+ var tpar *TypeParam // set if obj receiver is a type parameter
for _, e := range current {
typ := e.typ
@@ -128,6 +121,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
seen[named] = true
// look for a matching attached method
+ named.load()
if i, m := lookupMethod(named.methods, pkg, name); m != nil {
// potential match
// caution: method may not have a proper signature yet
@@ -185,9 +179,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
case *Interface:
// look for a matching method
- // TODO(gri) t.allMethods is sorted - use binary search
- check.completeInterface(token.NoPos, t)
- if i, m := lookupMethod(t.allMethods, pkg, name); m != nil {
+ if i, m := t.typeSet().LookupMethod(pkg, name); m != nil {
assert(m.typ != nil)
index = concat(e.index, i)
if obj != nil || e.multiples {
@@ -197,10 +189,8 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
indirect = e.indirect
}
- case *_TypeParam:
- // only consider explicit methods in the type parameter bound, not
- // methods that may be common to all types in the type list.
- if i, m := lookupMethod(t.Bound().allMethods, pkg, name); m != nil {
+ case *TypeParam:
+ if i, m := t.iface().typeSet().LookupMethod(pkg, name); m != nil {
assert(m.typ != nil)
index = concat(e.index, i)
if obj != nil || e.multiples {
@@ -229,7 +219,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
return
}
- current = check.consolidateMultiples(next)
+ current = consolidateMultiples(next)
}
return nil, nil, false // not found
@@ -246,7 +236,7 @@ type embeddedType struct {
// consolidateMultiples collects multiple list entries with the same type
// into a single entry marked as containing multiples. The result is the
// consolidated list.
-func (check *Checker) consolidateMultiples(list []embeddedType) []embeddedType {
+func consolidateMultiples(list []embeddedType) []embeddedType {
if len(list) <= 1 {
return list // at most one entry - nothing to do
}
@@ -254,7 +244,7 @@ func (check *Checker) consolidateMultiples(list []embeddedType) []embeddedType {
n := 0 // number of entries w/ unique type
prev := make(map[Type]int) // index at which type was previously seen
for _, e := range list {
- if i, found := check.lookupType(prev, e.typ); found {
+ if i, found := lookupType(prev, e.typ); found {
list[i].multiples = true
// ignore this entry
} else {
@@ -266,14 +256,14 @@ func (check *Checker) consolidateMultiples(list []embeddedType) []embeddedType {
return list[:n]
}
-func (check *Checker) lookupType(m map[Type]int, typ Type) (int, bool) {
+func lookupType(m map[Type]int, typ Type) (int, bool) {
// fast path: maybe the types are equal
if i, found := m[typ]; found {
return i, true
}
for t, i := range m {
- if check.identical(t, typ) {
+ if Identical(t, typ) {
return i, true
}
}
@@ -306,40 +296,40 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b
// To improve error messages, also report the wrong signature
// when the method exists on *V instead of V.
func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method, wrongType *Func) {
- check.completeInterface(token.NoPos, T)
-
// fast path for common case
if T.Empty() {
return
}
if ityp := asInterface(V); ityp != nil {
- check.completeInterface(token.NoPos, ityp)
- // TODO(gri) allMethods is sorted - can do this more efficiently
- for _, m := range T.allMethods {
- _, f := lookupMethod(ityp.allMethods, m.pkg, m.name)
+ // TODO(gri) the methods are sorted - could do this more efficiently
+ for _, m := range T.typeSet().methods {
+ _, f := ityp.typeSet().LookupMethod(m.pkg, m.name)
if f == nil {
- // if m is the magic method == we're ok (interfaces are comparable)
- if m.name == "==" || !static {
+ if !static {
continue
}
return m, f
}
+ // both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
- if len(ftyp.tparams) != len(mtyp.tparams) {
+ if ftyp.TParams().Len() != mtyp.TParams().Len() {
return m, f
}
+ if ftyp.TParams().Len() > 0 {
+ panic("internal error: method with type parameters")
+ }
// If the methods have type parameters we don't care whether they
// are the same or not, as long as they match up. Use unification
// to see if they can be made to match.
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ u := newUnifier(true)
+ u.x.init(ftyp.TParams().list())
if !u.unify(ftyp, mtyp) {
return m, f
}
@@ -351,14 +341,14 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// A concrete type implements T if it implements all methods of T.
Vd, _ := deref(V)
Vn := asNamed(Vd)
- for _, m := range T.allMethods {
+ for _, m := range T.typeSet().methods {
// TODO(gri) should this be calling lookupFieldOrMethod instead (and why not)?
- obj, _, _ := check.rawLookupFieldOrMethod(V, false, m.pkg, m.name)
+ obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name)
// Check if *V implements this method of T.
if obj == nil {
ptr := NewPointer(V)
- obj, _, _ = check.rawLookupFieldOrMethod(ptr, false, m.pkg, m.name)
+ obj, _, _ = lookupFieldOrMethod(ptr, false, m.pkg, m.name)
if obj != nil {
return m, obj.(*Func)
}
@@ -367,10 +357,6 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// we must have a method (not a field of matching function type)
f, _ := obj.(*Func)
if f == nil {
- // if m is the magic method == and V is comparable, we're ok
- if m.name == "==" && Comparable(V) {
- continue
- }
return m, nil
}
@@ -382,9 +368,12 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
- if len(ftyp.tparams) != len(mtyp.tparams) {
+ if ftyp.TParams().Len() != mtyp.TParams().Len() {
return m, f
}
+ if ftyp.TParams().Len() > 0 {
+ panic("internal error: method with type parameters")
+ }
// If V is a (instantiated) generic type, its methods are still
// parameterized using the original (declaration) receiver type
@@ -393,17 +382,17 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// In order to compare the signatures, substitute the receiver
// type parameters of ftyp with V's instantiation type arguments.
// This lazily instantiates the signature of method f.
- if Vn != nil && len(Vn.tparams) > 0 {
+ if Vn != nil && Vn.TParams().Len() > 0 {
// Be careful: The number of type arguments may not match
// the number of receiver parameters. If so, an error was
// reported earlier but the length discrepancy is still
// here. Exit early in this case to prevent an assertion
// failure in makeSubstMap.
// TODO(gri) Can we avoid this check by fixing the lengths?
- if len(ftyp.rparams) != len(Vn.targs) {
+ if len(ftyp.RParams().list()) != len(Vn.targs) {
return
}
- ftyp = check.subst(token.NoPos, ftyp, makeSubstMap(ftyp.rparams, Vn.targs)).(*Signature)
+ ftyp = check.subst(token.NoPos, ftyp, makeSubstMap(ftyp.RParams().list(), Vn.targs)).(*Signature)
}
// If the methods have type parameters we don't care whether they
@@ -411,8 +400,8 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// to see if they can be made to match.
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ u := newUnifier(true)
+ u.x.init(ftyp.RParams().list())
if !u.unify(ftyp, mtyp) {
return m, f
}
diff --git a/src/go/types/map.go b/src/go/types/map.go
new file mode 100644
index 0000000000..01e13b214e
--- /dev/null
+++ b/src/go/types/map.go
@@ -0,0 +1,24 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// A Map represents a map type.
+type Map struct {
+ key, elem Type
+}
+
+// NewMap returns a new map for the given key and element types.
+func NewMap(key, elem Type) *Map {
+ return &Map{key: key, elem: elem}
+}
+
+// Key returns the key type of map m.
+func (m *Map) Key() Type { return m.key }
+
+// Elem returns the element type of map m.
+func (m *Map) Elem() Type { return m.elem }
+
+func (t *Map) Underlying() Type { return t }
+func (t *Map) String() string { return TypeString(t, nil) }
diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go
index ae8011a2ee..1462601d58 100644
--- a/src/go/types/methodset.go
+++ b/src/go/types/methodset.go
@@ -130,7 +130,7 @@ func NewMethodSet(T Type) *MethodSet {
// continue with underlying type, but only if it's not a type parameter
// TODO(rFindley): should this use named.under()? Can there be a difference?
typ = named.underlying
- if _, ok := typ.(*_TypeParam); ok {
+ if _, ok := typ.(*TypeParam); ok {
continue
}
}
@@ -157,10 +157,10 @@ func NewMethodSet(T Type) *MethodSet {
}
case *Interface:
- mset = mset.add(t.allMethods, e.index, true, e.multiples)
+ mset = mset.add(t.typeSet().methods, e.index, true, e.multiples)
- case *_TypeParam:
- mset = mset.add(t.Bound().allMethods, e.index, true, e.multiples)
+ case *TypeParam:
+ mset = mset.add(t.iface().typeSet().methods, e.index, true, e.multiples)
}
}
@@ -190,12 +190,7 @@ func NewMethodSet(T Type) *MethodSet {
}
}
- // It's ok to call consolidateMultiples with a nil *Checker because
- // MethodSets are not used internally (outside debug mode). When used
- // externally, interfaces are expected to be completed and then we do
- // not need a *Checker to complete them when (indirectly) calling
- // Checker.identical via consolidateMultiples.
- current = (*Checker)(nil).consolidateMultiples(next)
+ current = consolidateMultiples(next)
}
if len(base) == 0 {
diff --git a/src/go/types/methodset_test.go b/src/go/types/methodset_test.go
index 4a373fa2c4..73a8442f21 100644
--- a/src/go/types/methodset_test.go
+++ b/src/go/types/methodset_test.go
@@ -7,7 +7,6 @@ package types_test
import (
"testing"
- "go/internal/typeparams"
. "go/types"
)
@@ -47,12 +46,15 @@ func TestNewMethodSet(t *testing.T) {
genericTests := map[string][]method{
// By convention, look up a in the scope of "g"
- "type C interface{ f() }; func g[T C](a T){}": {{"f", []int{0}, true}},
- "type C interface{ f() }; func g[T C]() { var a T; _ = a }": {{"f", []int{0}, true}},
- "type C interface{ f() }; func g[T C]() { var a struct{T}; _ = a }": {{"f", []int{0, 0}, true}},
+ "type C interface{ f() }; func g[T C](a T){}": {{"f", []int{0}, true}},
+ "type C interface{ f() }; func g[T C]() { var a T; _ = a }": {{"f", []int{0}, true}},
- // Issue #45639.
- "type C interface{ f() }; func g[T C]() { type Y T; var a Y; _ = a }": {},
+ // Issue #43621: We don't allow this anymore. Keep this code in case we
+ // decide to revisit this decision.
+ // "type C interface{ f() }; func g[T C]() { var a struct{T}; _ = a }": {{"f", []int{0, 0}, true}},
+
+ // Issue #45639: We also don't allow this anymore.
+ // "type C interface{ f() }; func g[T C]() { type Y T; var a Y; _ = a }": {},
}
check := func(src string, methods []method, generic bool) {
@@ -101,9 +103,7 @@ func TestNewMethodSet(t *testing.T) {
check(src, methods, false)
}
- if typeparams.Enabled {
- for src, methods := range genericTests {
- check(src, methods, true)
- }
+ for src, methods := range genericTests {
+ check(src, methods, true)
}
}
diff --git a/src/go/types/named.go b/src/go/types/named.go
new file mode 100644
index 0000000000..f26b50aa81
--- /dev/null
+++ b/src/go/types/named.go
@@ -0,0 +1,296 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/token"
+ "sync"
+)
+
+// TODO(rfindley) Clean up Named struct below; specifically the fromRHS field (can we use underlying?).
+
+// A Named represents a named (defined) type.
+type Named struct {
+ check *Checker
+ info typeInfo // for cycle detection
+ obj *TypeName // corresponding declared object
+ orig *Named // original, uninstantiated type
+ fromRHS Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
+ underlying Type // possibly a *Named during setup; never a *Named once set up completely
+ instance *instance // syntactic information for lazy instantiation
+ tparams *TypeParams // type parameters, or nil
+ targs []Type // type arguments (after instantiation), or nil
+ methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
+
+ resolve func(*Named) ([]*TypeName, Type, []*Func)
+ once sync.Once
+}
+
+// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
+// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
+// The underlying type must not be a *Named.
+func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ if _, ok := underlying.(*Named); ok {
+ panic("types.NewNamed: underlying type must not be *Named")
+ }
+ return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
+}
+
+func (t *Named) load() *Named {
+ // If t is an instantiated type, it derives its methods and tparams from its
+ // base type. Since we expect type parameters and methods to be set after a
+ // call to load, we must load the base and copy here.
+ //
+ // underlying is set when t is expanded.
+ //
+ // By convention, a type instance is loaded iff its tparams are set.
+ if len(t.targs) > 0 && t.tparams == nil {
+ t.orig.load()
+ t.tparams = t.orig.tparams
+ t.methods = t.orig.methods
+ }
+ if t.resolve == nil {
+ return t
+ }
+
+ t.once.Do(func() {
+ // TODO(mdempsky): Since we're passing t to resolve anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions
+ // (like SetTParams).
+
+ tparams, underlying, methods := t.resolve(t)
+
+ switch underlying.(type) {
+ case nil, *Named:
+ panic("invalid underlying type")
+ }
+
+ t.tparams = bindTParams(tparams)
+ t.underlying = underlying
+ t.methods = methods
+ })
+ return t
+}
+
+// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
+func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams *TypeParams, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
+ if typ.orig == nil {
+ typ.orig = typ
+ }
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ // Ensure that typ is always expanded, at which point the check field can be
+ // nilled out.
+ //
+ // Note that currently we cannot nil out check inside typ.under(), because
+ // it's possible that typ is expanded multiple times.
+ //
+ // TODO(rFindley): clean this up so that under is the only function mutating
+ // named types.
+ if check != nil {
+ check.later(func() {
+ switch typ.under().(type) {
+ case *Named:
+ panic("internal error: unexpanded underlying type")
+ }
+ typ.check = nil
+ })
+ }
+ return typ
+}
+
+// Obj returns the type name for the named type t.
+func (t *Named) Obj() *TypeName { return t.obj }
+
+// _Orig returns the original generic type an instantiated type is derived from.
+// If t is not an instantiated type, the result is t.
+func (t *Named) _Orig() *Named { return t.orig }
+
+// TODO(gri) Come up with a better representation and API to distinguish
+// between parameterized instantiated and non-instantiated types.
+
+// TParams returns the type parameters of the named type t, or nil.
+// The result is non-nil for an (originally) parameterized type even if it is instantiated.
+func (t *Named) TParams() *TypeParams { return t.load().tparams }
+
+// SetTParams sets the type parameters of the named type t.
+func (t *Named) SetTParams(tparams []*TypeName) { t.load().tparams = bindTParams(tparams) }
+
+// NumTArgs returns the number of type arguments used to instantiate the named
+// type t, or 0 if t is not an instantiated type.
+func (t *Named) NumTArgs() int { return len(t.targs) }
+
+// TArgs returns the i'th type argument of the named type t for 0 <= i < t.NumTArgs().
+func (t *Named) TArg(i int) Type { return t.targs[i] }
+
+// SetTArgs sets the type arguments of the named type t.
+func (t *Named) SetTArgs(args []Type) { t.targs = args }
+
+// NumMethods returns the number of explicit methods whose receiver is named type t.
+func (t *Named) NumMethods() int { return len(t.load().methods) }
+
+// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
+func (t *Named) Method(i int) *Func { return t.load().methods[i] }
+
+// SetUnderlying sets the underlying type and marks t as complete.
+func (t *Named) SetUnderlying(underlying Type) {
+ if underlying == nil {
+ panic("types.Named.SetUnderlying: underlying type must not be nil")
+ }
+ if _, ok := underlying.(*Named); ok {
+ panic("types.Named.SetUnderlying: underlying type must not be *Named")
+ }
+ t.load().underlying = underlying
+}
+
+// AddMethod adds method m unless it is already in the method list.
+func (t *Named) AddMethod(m *Func) {
+ t.load()
+ if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
+ t.methods = append(t.methods, m)
+ }
+}
+
+func (t *Named) Underlying() Type { return t.load().underlying }
+func (t *Named) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// under returns the expanded underlying type of n0; possibly by following
+// forward chains of named types. If an underlying type is found, resolve
+// the chain by setting the underlying type for each defined type in the
+// chain before returning it. If no underlying type is found or a cycle
+// is detected, the result is Typ[Invalid]. If a cycle is detected and
+// n0.check != nil, the cycle is reported.
+func (n0 *Named) under() Type {
+ n0.expand()
+
+ u := n0.Underlying()
+
+ if u == Typ[Invalid] {
+ return u
+ }
+
+ // If the underlying type of a defined type is not a defined
+ // (incl. instance) type, then that is the desired underlying
+ // type.
+ switch u.(type) {
+ case nil:
+ return Typ[Invalid]
+ default:
+ // common case
+ return u
+ case *Named:
+ // handled below
+ }
+
+ if n0.check == nil {
+ panic("internal error: Named.check == nil but type is incomplete")
+ }
+
+ // Invariant: after this point n0 as well as any named types in its
+ // underlying chain should be set up when this function exits.
+ check := n0.check
+
+ // If we can't expand u at this point, it is invalid.
+ n := asNamed(u)
+ if n == nil {
+ n0.underlying = Typ[Invalid]
+ return n0.underlying
+ }
+
+ // Otherwise, follow the forward chain.
+ seen := map[*Named]int{n0: 0}
+ path := []Object{n0.obj}
+ for {
+ u = n.Underlying()
+ if u == nil {
+ u = Typ[Invalid]
+ break
+ }
+ var n1 *Named
+ switch u1 := u.(type) {
+ case *Named:
+ u1.expand()
+ n1 = u1
+ }
+ if n1 == nil {
+ break // end of chain
+ }
+
+ seen[n] = len(seen)
+ path = append(path, n.obj)
+ n = n1
+
+ if i, ok := seen[n]; ok {
+ // cycle
+ check.cycleError(path[i:])
+ u = Typ[Invalid]
+ break
+ }
+ }
+
+ for n := range seen {
+ // We should never have to update the underlying type of an imported type;
+ // those underlying types should have been resolved during the import.
+ // Also, doing so would lead to a race condition (was issue #31749).
+ // Do this check always, not just in debug mode (it's cheap).
+ if n.obj.pkg != check.pkg {
+ panic("internal error: imported type with unresolved underlying type")
+ }
+ n.underlying = u
+ }
+
+ return u
+}
+
+func (n *Named) setUnderlying(typ Type) {
+ if n != nil {
+ n.underlying = typ
+ }
+}
+
+// instance holds position information for use in lazy instantiation.
+//
+// TODO(rfindley): come up with a better name for this type, now that its usage
+// has changed.
+type instance struct {
+ pos token.Pos // position of type instantiation; for error reporting only
+ posList []token.Pos // position of each targ; for error reporting only
+}
+
+// expand ensures that the underlying type of n is instantiated.
+// The underlying type will be Typ[Invalid] if there was an error.
+// TODO(rfindley): expand would be a better name for this method, but conflicts
+// with the existing concept of lazy expansion. Need to reconcile this.
+func (n *Named) expand() {
+ if n.instance != nil {
+ // n must be loaded before instantiation, in order to have accurate
+ // tparams. This is done implicitly by the call to n.TParams, but making it
+ // explicit is harmless: load is idempotent.
+ n.load()
+ inst := n.check.instantiate(n.instance.pos, n.orig.underlying, n.TParams().list(), n.targs, n.instance.posList)
+ n.underlying = inst
+ n.fromRHS = inst
+ n.instance = nil
+ }
+}
+
+// expand expands uninstantiated named types and leaves all other types alone.
+// expand does not recurse.
+func expand(typ Type) Type {
+ if t, _ := typ.(*Named); t != nil {
+ t.expand()
+ }
+ return typ
+}
diff --git a/src/go/types/object.go b/src/go/types/object.go
index 50346ec691..7266623fbe 100644
--- a/src/go/types/object.go
+++ b/src/go/types/object.go
@@ -230,6 +230,14 @@ func NewTypeName(pos token.Pos, pkg *Package, name string, typ Type) *TypeName {
return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), token.NoPos}}
}
+// _NewTypeNameLazy returns a new defined type like NewTypeName, but it
+// lazily calls resolve to finish constructing the Named object.
+func _NewTypeNameLazy(pos token.Pos, pkg *Package, name string, resolve func(named *Named) (tparams []*TypeName, underlying Type, methods []*Func)) *TypeName {
+ obj := NewTypeName(pos, pkg, name, nil)
+ NewNamed(obj, nil, nil).resolve = resolve
+ return obj
+}
+
// IsAlias reports whether obj is an alias name for a type.
func (obj *TypeName) IsAlias() bool {
switch t := obj.typ.(type) {
@@ -421,6 +429,9 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
if _, ok := typ.(*Basic); ok {
return
}
+ if named, _ := typ.(*Named); named != nil && named.TParams().Len() > 0 {
+ writeTParamList(buf, named.TParams().list(), qf, nil)
+ }
if tname.IsAlias() {
buf.WriteString(" =")
} else {
diff --git a/src/go/types/object_test.go b/src/go/types/object_test.go
index 2b6057bd93..0ff8fdd6fa 100644
--- a/src/go/types/object_test.go
+++ b/src/go/types/object_test.go
@@ -22,7 +22,7 @@ func TestIsAlias(t *testing.T) {
check(Unsafe.Scope().Lookup("Pointer").(*TypeName), false)
for _, name := range Universe.Names() {
if obj, _ := Universe.Lookup(name).(*TypeName); obj != nil {
- check(obj, name == "byte" || name == "rune")
+ check(obj, name == "any" || name == "byte" || name == "rune")
}
}
diff --git a/src/go/types/operand.go b/src/go/types/operand.go
index 6463728cec..aea8bf5e7a 100644
--- a/src/go/types/operand.go
+++ b/src/go/types/operand.go
@@ -159,16 +159,20 @@ func operandString(x *operand, qf Qualifier) string {
if hasType {
if x.typ != Typ[Invalid] {
var intro string
- switch {
- case isGeneric(x.typ):
- intro = " of generic type "
- case asTypeParam(x.typ) != nil:
- intro = " of type parameter type "
- default:
+ var tpar *TypeParam
+ if isGeneric(x.typ) {
+ intro = " of parameterized type "
+ } else if tpar = asTypeParam(x.typ); tpar != nil {
+ intro = " of type parameter "
+ } else {
intro = " of type "
}
buf.WriteString(intro)
WriteType(&buf, x.typ, qf)
+ if tpar != nil {
+ buf.WriteString(" constrained by ")
+ WriteType(&buf, tpar.bound, qf) // do not compute interface type sets here
+ }
} else {
buf.WriteString(" with invalid type")
}
@@ -233,20 +237,35 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
V := x.typ
+ const debugAssignableTo = false
+ if debugAssignableTo && check != nil {
+ check.dump("V = %s", V)
+ check.dump("T = %s", T)
+ }
+
// x's type is identical to T
- if check.identical(V, T) {
+ if Identical(V, T) {
return true, 0
}
Vu := optype(V)
Tu := optype(T)
+ if debugAssignableTo && check != nil {
+ check.dump("Vu = %s", Vu)
+ check.dump("Tu = %s", Tu)
+ }
+
// x is an untyped value representable by a value of type T.
if isUntyped(Vu) {
- if t, ok := Tu.(*_Sum); ok {
- return t.is(func(t Type) bool {
+ if t, ok := Tu.(*Union); ok {
+ return t.is(func(t *term) bool {
// TODO(gri) this could probably be more efficient
- ok, _ := x.assignableTo(check, t, reason)
+ if t.tilde {
+ // TODO(gri) We need to check assignability
+ // for the underlying type of x.
+ }
+ ok, _ := x.assignableTo(check, t.typ, reason)
return ok
}), _IncompatibleAssign
}
@@ -257,7 +276,7 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
// x's type V and T have identical underlying types
// and at least one of V or T is not a named type
- if check.identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) {
+ if Identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) {
return true, 0
}
@@ -266,7 +285,7 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
if m, wrongType := check.missingMethod(V, Ti, true); m != nil /* Implements(V, Ti) */ {
if reason != nil {
if wrongType != nil {
- if check.identical(m.typ, wrongType.typ) {
+ if Identical(m.typ, wrongType.typ) {
*reason = fmt.Sprintf("missing method %s (%s has pointer receiver)", m.name, m.name)
} else {
*reason = fmt.Sprintf("wrong type for method %s (have %s, want %s)", m.Name(), wrongType.typ, m.typ)
@@ -285,7 +304,7 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
// type, x's type V and T have identical element types,
// and at least one of V or T is not a named type
if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv {
- if Tc, ok := Tu.(*Chan); ok && check.identical(Vc.elem, Tc.elem) {
+ if Tc, ok := Tu.(*Chan); ok && Identical(Vc.elem, Tc.elem) {
return !isNamed(V) || !isNamed(T), _InvalidChanAssign
}
}
diff --git a/src/go/types/pointer.go b/src/go/types/pointer.go
new file mode 100644
index 0000000000..6352ee57e2
--- /dev/null
+++ b/src/go/types/pointer.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// A Pointer represents a pointer type.
+type Pointer struct {
+ base Type // element type
+}
+
+// NewPointer returns a new pointer type for the given element (base) type.
+func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} }
+
+// Elem returns the element type for the given pointer p.
+func (p *Pointer) Elem() Type { return p.base }
+
+func (t *Pointer) Underlying() Type { return t }
+func (t *Pointer) String() string { return TypeString(t, nil) }
diff --git a/src/go/types/predicates.go b/src/go/types/predicates.go
index 7bb026414f..23924693fd 100644
--- a/src/go/types/predicates.go
+++ b/src/go/types/predicates.go
@@ -6,15 +6,11 @@
package types
-import (
- "go/token"
-)
-
// isNamed reports whether typ has a name.
// isNamed may be called with types that are not fully set up.
func isNamed(typ Type) bool {
switch typ.(type) {
- case *Basic, *Named, *_TypeParam, *instance:
+ case *Basic, *Named, *TypeParam:
return true
}
return false
@@ -25,15 +21,15 @@ func isNamed(typ Type) bool {
func isGeneric(typ Type) bool {
// A parameterized type is only instantiated if it doesn't have an instantiation already.
named, _ := typ.(*Named)
- return named != nil && named.obj != nil && named.tparams != nil && named.targs == nil
+ return named != nil && named.obj != nil && named.targs == nil && named.TParams() != nil
}
func is(typ Type, what BasicInfo) bool {
- switch t := optype(typ).(type) {
+ switch t := under(typ).(type) {
case *Basic:
return t.info&what != 0
- case *_Sum:
- return t.is(func(typ Type) bool { return is(typ, what) })
+ case *TypeParam:
+ return t.underIs(func(typ Type) bool { return is(typ, what) })
}
return false
}
@@ -61,9 +57,6 @@ func isNumericOrString(typ Type) bool { return is(typ, IsNumeric|IsString) }
func isTyped(typ Type) bool {
// isTyped is called with types that are not fully
// set up. Must not call asBasic()!
- // A *Named or *instance type is always typed, so
- // we only need to check if we have a true *Basic
- // type.
t, _ := typ.(*Basic)
return t == nil || t.info&IsUntyped == 0
}
@@ -100,19 +93,7 @@ func comparable(T Type, seen map[Type]bool) bool {
}
seen[T] = true
- // If T is a type parameter not constrained by any type
- // list (i.e., it's underlying type is the top type),
- // T is comparable if it has the == method. Otherwise,
- // the underlying type "wins". For instance
- //
- // interface{ comparable; type []byte }
- //
- // is not comparable because []byte is not comparable.
- if t := asTypeParam(T); t != nil && optype(t) == theTop {
- return t.Bound()._IsComparable()
- }
-
- switch t := optype(T).(type) {
+ switch t := under(T).(type) {
case *Basic:
// assume invalid types to be comparable
// to avoid follow-up errors
@@ -128,42 +109,25 @@ func comparable(T Type, seen map[Type]bool) bool {
return true
case *Array:
return comparable(t.elem, seen)
- case *_Sum:
- pred := func(t Type) bool {
- return comparable(t, seen)
- }
- return t.is(pred)
- case *_TypeParam:
- return t.Bound()._IsComparable()
+ case *TypeParam:
+ return t.iface().IsComparable()
}
return false
}
// hasNil reports whether a type includes the nil value.
func hasNil(typ Type) bool {
- switch t := optype(typ).(type) {
+ switch t := under(typ).(type) {
case *Basic:
return t.kind == UnsafePointer
case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
return true
- case *_Sum:
- return t.is(hasNil)
+ case *TypeParam:
+ return t.underIs(hasNil)
}
return false
}
-// identical reports whether x and y are identical types.
-// Receivers of Signature types are ignored.
-func (check *Checker) identical(x, y Type) bool {
- return check.identical0(x, y, true, nil)
-}
-
-// identicalIgnoreTags reports whether x and y are identical types if tags are ignored.
-// Receivers of Signature types are ignored.
-func (check *Checker) identicalIgnoreTags(x, y Type) bool {
- return check.identical0(x, y, false, nil)
-}
-
// An ifacePair is a node in a stack of interface type pairs compared for identity.
type ifacePair struct {
x, y *Interface
@@ -175,10 +139,10 @@ func (p *ifacePair) identical(q *ifacePair) bool {
}
// For changes to this code the corresponding changes should be made to unifier.nify.
-func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
+func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
// types must be expanded for comparison
- x = expandf(x)
- y = expandf(y)
+ x = expand(x)
+ y = expand(y)
if x == y {
return true
@@ -199,13 +163,13 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
if y, ok := y.(*Array); ok {
// If one or both array lengths are unknown (< 0) due to some error,
// assume they are the same to avoid spurious follow-on errors.
- return (x.len < 0 || y.len < 0 || x.len == y.len) && check.identical0(x.elem, y.elem, cmpTags, p)
+ return (x.len < 0 || y.len < 0 || x.len == y.len) && identical(x.elem, y.elem, cmpTags, p)
}
case *Slice:
// Two slice types are identical if they have identical element types.
if y, ok := y.(*Slice); ok {
- return check.identical0(x.elem, y.elem, cmpTags, p)
+ return identical(x.elem, y.elem, cmpTags, p)
}
case *Struct:
@@ -220,7 +184,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
if f.embedded != g.embedded ||
cmpTags && x.Tag(i) != y.Tag(i) ||
!f.sameId(g.pkg, g.name) ||
- !check.identical0(f.typ, g.typ, cmpTags, p) {
+ !identical(f.typ, g.typ, cmpTags, p) {
return false
}
}
@@ -231,7 +195,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
case *Pointer:
// Two pointer types are identical if they have identical base types.
if y, ok := y.(*Pointer); ok {
- return check.identical0(x.base, y.base, cmpTags, p)
+ return identical(x.base, y.base, cmpTags, p)
}
case *Tuple:
@@ -242,7 +206,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
if x != nil {
for i, v := range x.vars {
w := y.vars[i]
- if !check.identical0(v.typ, w.typ, cmpTags, p) {
+ if !identical(v.typ, w.typ, cmpTags, p) {
return false
}
}
@@ -260,49 +224,37 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
// parameter names.
if y, ok := y.(*Signature); ok {
return x.variadic == y.variadic &&
- check.identicalTParams(x.tparams, y.tparams, cmpTags, p) &&
- check.identical0(x.params, y.params, cmpTags, p) &&
- check.identical0(x.results, y.results, cmpTags, p)
+ identicalTParams(x.TParams().list(), y.TParams().list(), cmpTags, p) &&
+ identical(x.params, y.params, cmpTags, p) &&
+ identical(x.results, y.results, cmpTags, p)
}
- case *_Sum:
- // Two sum types are identical if they contain the same types.
- // (Sum types always consist of at least two types. Also, the
- // the set (list) of types in a sum type consists of unique
- // types - each type appears exactly once. Thus, two sum types
+ case *Union:
+ // Two union types are identical if they contain the same terms.
+ // The set (list) of types in a union type consists of unique
+ // types - each type appears exactly once. Thus, two union types
// must contain the same number of types to have chance of
// being equal.
- if y, ok := y.(*_Sum); ok && len(x.types) == len(y.types) {
- // Every type in x.types must be in y.types.
- // Quadratic algorithm, but probably good enough for now.
- // TODO(gri) we need a fast quick type ID/hash for all types.
- L:
- for _, x := range x.types {
- for _, y := range y.types {
- if Identical(x, y) {
- continue L // x is in y.types
- }
- }
- return false // x is not in y.types
- }
- return true
+ if y, ok := y.(*Union); ok {
+ return identicalTerms(x.terms, y.terms)
}
case *Interface:
+ // Two interface types are identical if they describe the same type sets.
+ // With the existing implementation restriction, this simplifies to:
+ //
// Two interface types are identical if they have the same set of methods with
- // the same names and identical function types. Lower-case method names from
- // different packages are always different. The order of the methods is irrelevant.
+ // the same names and identical function types, and if any type restrictions
+ // are the same. Lower-case method names from different packages are always
+ // different. The order of the methods is irrelevant.
if y, ok := y.(*Interface); ok {
- // If identical0 is called (indirectly) via an external API entry point
- // (such as Identical, IdenticalIgnoreTags, etc.), check is nil. But in
- // that case, interfaces are expected to be complete and lazy completion
- // here is not needed.
- if check != nil {
- check.completeInterface(token.NoPos, x)
- check.completeInterface(token.NoPos, y)
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if !Identical(xset.types, yset.types) {
+ return false
}
- a := x.allMethods
- b := y.allMethods
+ a := xset.methods
+ b := yset.methods
if len(a) == len(b) {
// Interface types are the only types where cycles can occur
// that are not "terminated" via named types; and such cycles
@@ -339,7 +291,7 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
}
for i, f := range a {
g := b[i]
- if f.Id() != g.Id() || !check.identical0(f.typ, g.typ, cmpTags, q) {
+ if f.Id() != g.Id() || !identical(f.typ, g.typ, cmpTags, q) {
return false
}
}
@@ -350,14 +302,14 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
case *Map:
// Two map types are identical if they have identical key and value types.
if y, ok := y.(*Map); ok {
- return check.identical0(x.key, y.key, cmpTags, p) && check.identical0(x.elem, y.elem, cmpTags, p)
+ return identical(x.key, y.key, cmpTags, p) && identical(x.elem, y.elem, cmpTags, p)
}
case *Chan:
// Two channel types are identical if they have identical value types
// and the same direction.
if y, ok := y.(*Chan); ok {
- return x.dir == y.dir && check.identical0(x.elem, y.elem, cmpTags, p)
+ return x.dir == y.dir && identical(x.elem, y.elem, cmpTags, p)
}
case *Named:
@@ -370,16 +322,12 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
return x.obj == y.obj
}
- case *_TypeParam:
+ case *TypeParam:
// nothing to do (x and y being equal is caught in the very beginning of this function)
- // case *instance:
- // unreachable since types are expanded
-
- case *bottom, *top:
- // Either both types are theBottom, or both are theTop in which
- // case the initial x == y check will have caught them. Otherwise
- // they are not identical.
+ case *top:
+ // Either both types are theTop in which case the initial x == y check
+ // will have caught them. Otherwise they are not identical.
case nil:
// avoid a crash in case of nil type
@@ -391,13 +339,13 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
return false
}
-func (check *Checker) identicalTParams(x, y []*TypeName, cmpTags bool, p *ifacePair) bool {
+func identicalTParams(x, y []*TypeName, cmpTags bool, p *ifacePair) bool {
if len(x) != len(y) {
return false
}
for i, x := range x {
y := y[i]
- if !check.identical0(x.typ.(*_TypeParam).bound, y.typ.(*_TypeParam).bound, cmpTags, p) {
+ if !identical(x.typ.(*TypeParam).bound, y.typ.(*TypeParam).bound, cmpTags, p) {
return false
}
}
diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go
index 114647a2ff..5e58c3dcfd 100644
--- a/src/go/types/resolver.go
+++ b/src/go/types/resolver.go
@@ -276,7 +276,7 @@ func (check *Checker) collectObjects() {
}
if name == "init" {
- check.errorf(d.spec.Name, _InvalidInitDecl, "cannot import package as init - init must be a func")
+ check.errorf(d.spec, _InvalidInitDecl, "cannot import package as init - init must be a func")
return
}
@@ -309,20 +309,24 @@ func (check *Checker) collectObjects() {
check.dotImportMap = make(map[dotImportKey]*PkgName)
}
// merge imported scope with file scope
- for _, obj := range imp.scope.elems {
+ for name, obj := range imp.scope.elems {
+ // Note: Avoid eager resolve(name, obj) here, so we only
+ // resolve dot-imported objects as needed.
+
// A package scope may contain non-exported objects,
// do not import them!
- if obj.Exported() {
+ if token.IsExported(name) {
// declare dot-imported object
// (Do not use check.declare because it modifies the object
// via Object.setScopePos, which leads to a race condition;
// the object may be imported into more than one file scope
// concurrently. See issue #32154.)
- if alt := fileScope.Insert(obj); alt != nil {
- check.errorf(d.spec.Name, _DuplicateDecl, "%s redeclared in this block", obj.Name())
+ if alt := fileScope.Lookup(name); alt != nil {
+ check.errorf(d.spec.Name, _DuplicateDecl, "%s redeclared in this block", alt.Name())
check.reportAltDecl(alt)
} else {
- check.dotImportMap[dotImportKey{fileScope, obj}] = pkgName
+ fileScope.insert(name, obj)
+ check.dotImportMap[dotImportKey{fileScope, name}] = pkgName
}
}
}
@@ -443,8 +447,9 @@ func (check *Checker) collectObjects() {
// verify that objects in package and file scopes have different names
for _, scope := range fileScopes {
- for _, obj := range scope.elems {
- if alt := pkg.scope.Lookup(obj.Name()); alt != nil {
+ for name, obj := range scope.elems {
+ if alt := pkg.scope.Lookup(name); alt != nil {
+ obj = resolve(name, obj)
if pkg, ok := obj.(*PkgName); ok {
check.errorf(alt, _DuplicateDecl, "%s already declared through import of %s", alt.Name(), pkg.Imported())
check.reportAltDecl(pkg)
@@ -499,10 +504,12 @@ L: // unpack receiver type
}
// unpack type parameters, if any
- if ptyp, _ := rtyp.(*ast.IndexExpr); ptyp != nil {
- rtyp = ptyp.X
+ switch rtyp.(type) {
+ case *ast.IndexExpr, *ast.MultiIndexExpr:
+ ix := typeparams.UnpackIndexExpr(rtyp)
+ rtyp = ix.X
if unpackParams {
- for _, arg := range typeparams.UnpackExpr(ptyp.Index) {
+ for _, arg := range ix.Indices {
var par *ast.Ident
switch arg := arg.(type) {
case *ast.Ident:
@@ -510,7 +517,7 @@ L: // unpack receiver type
case *ast.BadExpr:
// ignore - error already reported by parser
case nil:
- check.invalidAST(ptyp, "parameterized receiver contains nil parameters")
+ check.invalidAST(ix.Orig, "parameterized receiver contains nil parameters")
default:
check.errorf(arg, _Todo, "receiver type parameter %s must be an identifier", arg)
}
diff --git a/src/go/types/sanitize.go b/src/go/types/sanitize.go
deleted file mode 100644
index 727ec173ea..0000000000
--- a/src/go/types/sanitize.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types
-
-// sanitizeInfo walks the types contained in info to ensure that all instances
-// are expanded.
-//
-// This includes some objects that may be shared across concurrent
-// type-checking passes (such as those in the universe scope), so we are
-// careful here not to write types that are already sanitized. This avoids a
-// data race as any shared types should already be sanitized.
-func sanitizeInfo(info *Info) {
- var s sanitizer = make(map[Type]Type)
-
- // Note: Some map entries are not references.
- // If modified, they must be assigned back.
-
- for e, tv := range info.Types {
- if typ := s.typ(tv.Type); typ != tv.Type {
- tv.Type = typ
- info.Types[e] = tv
- }
- }
-
- inferred := getInferred(info)
- for e, inf := range inferred {
- changed := false
- for i, targ := range inf.Targs {
- if typ := s.typ(targ); typ != targ {
- inf.Targs[i] = typ
- changed = true
- }
- }
- if typ := s.typ(inf.Sig); typ != inf.Sig {
- inf.Sig = typ.(*Signature)
- changed = true
- }
- if changed {
- inferred[e] = inf
- }
- }
-
- for _, obj := range info.Defs {
- if obj != nil {
- if typ := s.typ(obj.Type()); typ != obj.Type() {
- obj.setType(typ)
- }
- }
- }
-
- for _, obj := range info.Uses {
- if obj != nil {
- if typ := s.typ(obj.Type()); typ != obj.Type() {
- obj.setType(typ)
- }
- }
- }
-
- // TODO(gri) sanitize as needed
- // - info.Implicits
- // - info.Selections
- // - info.Scopes
- // - info.InitOrder
-}
-
-type sanitizer map[Type]Type
-
-func (s sanitizer) typ(typ Type) Type {
- if typ == nil {
- return nil
- }
-
- if t, found := s[typ]; found {
- return t
- }
- s[typ] = typ
-
- switch t := typ.(type) {
- case *Basic, *bottom, *top:
- // nothing to do
-
- case *Array:
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Slice:
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Struct:
- s.varList(t.fields)
-
- case *Pointer:
- if base := s.typ(t.base); base != t.base {
- t.base = base
- }
-
- case *Tuple:
- s.tuple(t)
-
- case *Signature:
- s.var_(t.recv)
- s.tuple(t.params)
- s.tuple(t.results)
-
- case *_Sum:
- s.typeList(t.types)
-
- case *Interface:
- s.funcList(t.methods)
- if types := s.typ(t.types); types != t.types {
- t.types = types
- }
- s.typeList(t.embeddeds)
- s.funcList(t.allMethods)
- if allTypes := s.typ(t.allTypes); allTypes != t.allTypes {
- t.allTypes = allTypes
- }
-
- case *Map:
- if key := s.typ(t.key); key != t.key {
- t.key = key
- }
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Chan:
- if elem := s.typ(t.elem); elem != t.elem {
- t.elem = elem
- }
-
- case *Named:
- if debug && t.check != nil {
- panic("internal error: Named.check != nil")
- }
- if orig := s.typ(t.orig); orig != t.orig {
- t.orig = orig
- }
- if under := s.typ(t.underlying); under != t.underlying {
- t.underlying = under
- }
- s.typeList(t.targs)
- s.funcList(t.methods)
-
- case *_TypeParam:
- if bound := s.typ(t.bound); bound != t.bound {
- t.bound = bound
- }
-
- case *instance:
- typ = t.expand()
- s[t] = typ
-
- default:
- panic("unimplemented")
- }
-
- return typ
-}
-
-func (s sanitizer) var_(v *Var) {
- if v != nil {
- if typ := s.typ(v.typ); typ != v.typ {
- v.typ = typ
- }
- }
-}
-
-func (s sanitizer) varList(list []*Var) {
- for _, v := range list {
- s.var_(v)
- }
-}
-
-func (s sanitizer) tuple(t *Tuple) {
- if t != nil {
- s.varList(t.vars)
- }
-}
-
-func (s sanitizer) func_(f *Func) {
- if f != nil {
- if typ := s.typ(f.typ); typ != f.typ {
- f.typ = typ
- }
- }
-}
-
-func (s sanitizer) funcList(list []*Func) {
- for _, f := range list {
- s.func_(f)
- }
-}
-
-func (s sanitizer) typeList(list []Type) {
- for i, t := range list {
- if typ := s.typ(t); typ != t {
- list[i] = typ
- }
- }
-}
diff --git a/src/go/types/scope.go b/src/go/types/scope.go
index 26c28d1c4e..010727eb72 100644
--- a/src/go/types/scope.go
+++ b/src/go/types/scope.go
@@ -13,6 +13,7 @@ import (
"io"
"sort"
"strings"
+ "sync"
)
// A Scope maintains a set of objects and links to its containing
@@ -22,6 +23,7 @@ import (
type Scope struct {
parent *Scope
children []*Scope
+ number int // parent.children[number-1] is this scope; 0 if there is no parent
elems map[string]Object // lazily allocated
pos, end token.Pos // scope extent; may be invalid
comment string // for debugging only
@@ -31,10 +33,11 @@ type Scope struct {
// NewScope returns a new, empty scope contained in the given parent
// scope, if any. The comment is for debugging only.
func NewScope(parent *Scope, pos, end token.Pos, comment string) *Scope {
- s := &Scope{parent, nil, nil, pos, end, comment, false}
+ s := &Scope{parent, nil, 0, nil, pos, end, comment, false}
// don't add children to Universe scope!
if parent != nil && parent != Universe {
parent.children = append(parent.children, s)
+ s.number = len(parent.children)
}
return s
}
@@ -66,7 +69,7 @@ func (s *Scope) Child(i int) *Scope { return s.children[i] }
// Lookup returns the object in scope s with the given name if such an
// object exists; otherwise the result is nil.
func (s *Scope) Lookup(name string) Object {
- return s.elems[name]
+ return resolve(name, s.elems[name])
}
// LookupParent follows the parent chain of scopes starting with s until
@@ -81,7 +84,7 @@ func (s *Scope) Lookup(name string) Object {
// whose scope is the scope of the package that exported them.
func (s *Scope) LookupParent(name string, pos token.Pos) (*Scope, Object) {
for ; s != nil; s = s.parent {
- if obj := s.elems[name]; obj != nil && (!pos.IsValid() || obj.scopePos() <= pos) {
+ if obj := s.Lookup(name); obj != nil && (!pos.IsValid() || obj.scopePos() <= pos) {
return s, obj
}
}
@@ -95,19 +98,38 @@ func (s *Scope) LookupParent(name string, pos token.Pos) (*Scope, Object) {
// if not already set, and returns nil.
func (s *Scope) Insert(obj Object) Object {
name := obj.Name()
- if alt := s.elems[name]; alt != nil {
+ if alt := s.Lookup(name); alt != nil {
return alt
}
- if s.elems == nil {
- s.elems = make(map[string]Object)
- }
- s.elems[name] = obj
+ s.insert(name, obj)
if obj.Parent() == nil {
obj.setParent(s)
}
return nil
}
+// _InsertLazy is like Insert, but allows deferring construction of the
+// inserted object until it's accessed with Lookup. The Object
+// returned by resolve must have the same name as given to _InsertLazy.
+// If s already contains an alternative object with the same name,
+// _InsertLazy leaves s unchanged and returns false. Otherwise it
+// records the binding and returns true. The object's parent scope
+// will be set to s after resolve is called.
+func (s *Scope) _InsertLazy(name string, resolve func() Object) bool {
+ if s.elems[name] != nil {
+ return false
+ }
+ s.insert(name, &lazyObject{parent: s, resolve: resolve})
+ return true
+}
+
+func (s *Scope) insert(name string, obj Object) {
+ if s.elems == nil {
+ s.elems = make(map[string]Object)
+ }
+ s.elems[name] = obj
+}
+
// squash merges s with its parent scope p by adding all
// objects of s to p, adding all children of s to the
// children of p, and removing s from p's children.
@@ -117,7 +139,8 @@ func (s *Scope) Insert(obj Object) Object {
func (s *Scope) squash(err func(obj, alt Object)) {
p := s.parent
assert(p != nil)
- for _, obj := range s.elems {
+ for name, obj := range s.elems {
+ obj = resolve(name, obj)
obj.setParent(nil)
if alt := p.Insert(obj); alt != nil {
err(obj, alt)
@@ -196,7 +219,7 @@ func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) {
indn1 := indn + ind
for _, name := range s.Names() {
- fmt.Fprintf(w, "%s%s\n", indn1, s.elems[name])
+ fmt.Fprintf(w, "%s%s\n", indn1, s.Lookup(name))
}
if recurse {
@@ -214,3 +237,57 @@ func (s *Scope) String() string {
s.WriteTo(&buf, 0, false)
return buf.String()
}
+
+// A lazyObject represents an imported Object that has not been fully
+// resolved yet by its importer.
+type lazyObject struct {
+ parent *Scope
+ resolve func() Object
+ obj Object
+ once sync.Once
+}
+
+// resolve returns the Object represented by obj, resolving lazy
+// objects as appropriate.
+func resolve(name string, obj Object) Object {
+ if lazy, ok := obj.(*lazyObject); ok {
+ lazy.once.Do(func() {
+ obj := lazy.resolve()
+
+ if _, ok := obj.(*lazyObject); ok {
+ panic("recursive lazy object")
+ }
+ if obj.Name() != name {
+ panic("lazy object has unexpected name")
+ }
+
+ if obj.Parent() == nil {
+ obj.setParent(lazy.parent)
+ }
+ lazy.obj = obj
+ })
+
+ obj = lazy.obj
+ }
+ return obj
+}
+
+// stub implementations so *lazyObject implements Object and we can
+// store them directly into Scope.elems.
+func (*lazyObject) Parent() *Scope { panic("unreachable") }
+func (*lazyObject) Pos() token.Pos { panic("unreachable") }
+func (*lazyObject) Pkg() *Package { panic("unreachable") }
+func (*lazyObject) Name() string { panic("unreachable") }
+func (*lazyObject) Type() Type { panic("unreachable") }
+func (*lazyObject) Exported() bool { panic("unreachable") }
+func (*lazyObject) Id() string { panic("unreachable") }
+func (*lazyObject) String() string { panic("unreachable") }
+func (*lazyObject) order() uint32 { panic("unreachable") }
+func (*lazyObject) color() color { panic("unreachable") }
+func (*lazyObject) setType(Type) { panic("unreachable") }
+func (*lazyObject) setOrder(uint32) { panic("unreachable") }
+func (*lazyObject) setColor(color color) { panic("unreachable") }
+func (*lazyObject) setParent(*Scope) { panic("unreachable") }
+func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") }
+func (*lazyObject) scopePos() token.Pos { panic("unreachable") }
+func (*lazyObject) setScopePos(pos token.Pos) { panic("unreachable") }
diff --git a/src/go/types/signature.go b/src/go/types/signature.go
new file mode 100644
index 0000000000..5a69bb17b5
--- /dev/null
+++ b/src/go/types/signature.go
@@ -0,0 +1,352 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/internal/typeparams"
+ "go/token"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Signature represents a (non-builtin) function or method type.
+// The receiver is ignored when comparing signatures for identity.
+type Signature struct {
+ // We need to keep the scope in Signature (rather than passing it around
+ // and store it in the Func Object) because when type-checking a function
+ // literal we call the general type checker which returns a general Type.
+ // We then unpack the *Signature and use the scope for the literal body.
+ rparams *TypeParams // receiver type parameters from left to right, or nil
+ tparams *TypeParams // type parameters from left to right, or nil
+ scope *Scope // function scope, present for package-local signatures
+ recv *Var // nil if not a method
+ params *Tuple // (incoming) parameters from left to right; or nil
+ results *Tuple // (outgoing) results from left to right; or nil
+ variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
+}
+
+// NewSignature returns a new function type for the given receiver, parameters,
+// and results, either of which may be nil. If variadic is set, the function
+// is variadic, it must have at least one parameter, and the last parameter
+// must be of unnamed slice type.
+func NewSignature(recv *Var, params, results *Tuple, variadic bool) *Signature {
+ if variadic {
+ n := params.Len()
+ if n == 0 {
+ panic("types.NewSignature: variadic function must have at least one parameter")
+ }
+ if _, ok := params.At(n - 1).typ.(*Slice); !ok {
+ panic("types.NewSignature: variadic parameter must be of unnamed slice type")
+ }
+ }
+ return &Signature{recv: recv, params: params, results: results, variadic: variadic}
+}
+
+// Recv returns the receiver of signature s (if a method), or nil if a
+// function. It is ignored when comparing signatures for identity.
+//
+// For an abstract method, Recv returns the enclosing interface either
+// as a *Named or an *Interface. Due to embedding, an interface may
+// contain methods whose receiver type is a different interface.
+func (s *Signature) Recv() *Var { return s.recv }
+
+// TParams returns the type parameters of signature s, or nil.
+func (s *Signature) TParams() *TypeParams { return s.tparams }
+
+// SetTParams sets the type parameters of signature s.
+func (s *Signature) SetTParams(tparams []*TypeName) { s.tparams = bindTParams(tparams) }
+
+// RParams returns the receiver type parameters of signature s, or nil.
+func (s *Signature) RParams() *TypeParams { return s.rparams }
+
+// SetRParams sets the receiver type params of signature s.
+func (s *Signature) SetRParams(rparams []*TypeName) { s.rparams = bindTParams(rparams) }
+
+// Params returns the parameters of signature s, or nil.
+func (s *Signature) Params() *Tuple { return s.params }
+
+// Results returns the results of signature s, or nil.
+func (s *Signature) Results() *Tuple { return s.results }
+
+// Variadic reports whether the signature s is variadic.
+func (s *Signature) Variadic() bool { return s.variadic }
+
+func (t *Signature) Underlying() Type { return t }
+func (t *Signature) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ var recvTyp ast.Expr // rewritten receiver type; valid if != nil
+ if recvPar != nil && len(recvPar.List) > 0 {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
+ if len(rparams) > 0 {
+ // Blank identifiers don't get declared and regular type-checking of the instantiated
+ // parameterized receiver type expression fails in Checker.collectParams of receiver.
+ // Identify blank type parameters and substitute each with a unique new identifier named
+ // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
+ // name.
+ var smap map[*ast.Ident]*ast.Ident // substitution map from "_" to "n_" identifiers
+ for i, p := range rparams {
+ if p.Name == "_" {
+ new := *p
+ new.Name = fmt.Sprintf("%d_", i)
+ rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
+ if smap == nil {
+ smap = make(map[*ast.Ident]*ast.Ident)
+ }
+ smap[p] = &new
+ }
+ }
+ if smap != nil {
+ // blank identifiers were found => use rewritten receiver type
+ recvTyp = isubst(recvPar.List[0].Type, smap)
+ }
+ sig.rparams = bindTParams(check.declareTypeParams(nil, rparams))
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeName
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv := asNamed(check.genericType(rname, false)); recv != nil {
+ recvTParams = recv.TParams().list()
+ }
+ }
+ // provide type parameter bounds
+ // - only do this if we have the right number (otherwise an error is reported elsewhere)
+ if sig.RParams().Len() == len(recvTParams) {
+ // We have a list of *TypeNames but we need a list of Types.
+ list := make([]Type, sig.RParams().Len())
+ for i, t := range sig.RParams().list() {
+ list[i] = t.typ
+ }
+ smap := makeSubstMap(recvTParams, list)
+ for i, tname := range sig.RParams().list() {
+ bound := recvTParams[i].typ.(*TypeParam).bound
+ // bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the
+ // current context.
+ // TODO(gri) should we assume now that bounds always exist?
+ // (no bound == empty interface)
+ if bound != nil {
+ bound = check.subst(tname.pos, bound, smap)
+ tname.typ.(*TypeParam).bound = bound
+ }
+ }
+ }
+ }
+ }
+
+ if tparams := typeparams.Get(ftyp); tparams != nil {
+ sig.tparams = check.collectTypeParams(tparams)
+ // Always type-check method type parameters but complain that they are not allowed.
+ // (A separate check is needed when type-checking interface method signatures because
+ // they don't have a receiver specification.)
+ if recvPar != nil {
+ check.errorf(tparams, _Todo, "methods cannot have type parameters")
+ }
+ }
+
+ // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
+ // declarations and then squash that scope into the parent scope (and report any redeclarations at
+ // that time).
+ scope := NewScope(check.scope, token.NoPos, token.NoPos, "function body (temp. scope)")
+ recvList, _ := check.collectParams(scope, recvPar, recvTyp, false) // use rewritten receiver type, if any
+ params, variadic := check.collectParams(scope, ftyp.Params, nil, true)
+ results, _ := check.collectParams(scope, ftyp.Results, nil, false)
+ scope.squash(func(obj, alt Object) {
+ check.errorf(obj, _DuplicateDecl, "%s redeclared in this block", obj.Name())
+ check.reportAltDecl(alt)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1], _BadRecv, "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+
+ // TODO(gri) We should delay rtyp expansion to when we actually need the
+ // receiver; thus all checks here should be delayed to later.
+ rtyp, _ := deref(recv.typ)
+ rtyp = expand(rtyp)
+
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if rtyp != Typ[Invalid] {
+ var err string
+ switch T := rtyp.(type) {
+ case *Named:
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ } else {
+ // The underlying type of a receiver base type can be a type parameter;
+ // e.g. for methods with a generic receiver T[P] with type T[P any] P.
+ underIs(T, func(u Type) bool {
+ switch u := u.(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ return false
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ return false
+ }
+ return true
+ })
+ }
+ case *Basic:
+ err = "basic or unnamed type"
+ default:
+ check.errorf(recv, _InvalidRecv, "invalid receiver type %s", recv.typ)
+ }
+ if err != "" {
+ check.errorf(recv, _InvalidRecv, "invalid receiver type %s (%s)", recv.typ, err)
+ // ok to continue
+ }
+ }
+ sig.recv = recv
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list. If type0 != nil, it is used instead of the first type in list.
+func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, type0 ast.Expr, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+ for i, field := range list.List {
+ ftype := field.Type
+ if i == 0 && type0 != nil {
+ ftype = type0
+ }
+ if t, _ := ftype.(*ast.Ellipsis); t != nil {
+ ftype = t.Elt
+ if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, _MisplacedDotDotDot, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ := check.varType(ftype)
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if len(field.Names) > 0 {
+ // named parameter
+ for _, name := range field.Names {
+ if name.Name == "" {
+ check.invalidAST(name, "anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(name.Pos(), check.pkg, name.Name, typ)
+ check.declare(scope, name, par, scope.pos)
+ params = append(params, par)
+ }
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(ftype.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.invalidAST(list, "list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
+
+// isubst returns an x with identifiers substituted per the substitution map smap.
+// isubst only handles the case of (valid) method receiver type expressions correctly.
+func isubst(x ast.Expr, smap map[*ast.Ident]*ast.Ident) ast.Expr {
+ switch n := x.(type) {
+ case *ast.Ident:
+ if alt := smap[n]; alt != nil {
+ return alt
+ }
+ case *ast.StarExpr:
+ X := isubst(n.X, smap)
+ if X != n.X {
+ new := *n
+ new.X = X
+ return &new
+ }
+ case *ast.IndexExpr, *ast.MultiIndexExpr:
+ ix := typeparams.UnpackIndexExpr(x)
+ var newIndexes []ast.Expr
+ for i, index := range ix.Indices {
+ new := isubst(index, smap)
+ if new != index {
+ if newIndexes == nil {
+ newIndexes = make([]ast.Expr, len(ix.Indices))
+ copy(newIndexes, ix.Indices)
+ }
+ newIndexes[i] = new
+ }
+ }
+ if newIndexes != nil {
+ return typeparams.PackIndexExpr(ix.X, ix.Lbrack, newIndexes, ix.Rbrack)
+ }
+ case *ast.ParenExpr:
+ return isubst(n.X, smap) // no need to keep parentheses
+ default:
+ // Other receiver type expressions are invalid.
+ // It's fine to ignore those here as they will
+ // be checked elsewhere.
+ }
+ return x
+}
diff --git a/src/go/types/sizeof_test.go b/src/go/types/sizeof_test.go
index 5a9d07ca41..67a9b39558 100644
--- a/src/go/types/sizeof_test.go
+++ b/src/go/types/sizeof_test.go
@@ -25,15 +25,14 @@ func TestSizeof(t *testing.T) {
{Struct{}, 24, 48},
{Pointer{}, 8, 16},
{Tuple{}, 12, 24},
- {Signature{}, 44, 88},
- {_Sum{}, 12, 24},
- {Interface{}, 60, 120},
+ {Signature{}, 28, 56},
+ {Union{}, 12, 24},
+ {Interface{}, 40, 80},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 64, 128},
- {_TypeParam{}, 28, 48},
- {instance{}, 44, 88},
- {bottom{}, 0, 0},
+ {Named{}, 80, 152},
+ {TypeParam{}, 28, 48},
+ {term{}, 12, 24},
{top{}, 0, 0},
// Objects
@@ -47,8 +46,9 @@ func TestSizeof(t *testing.T) {
{Nil{}, 40, 72},
// Misc
- {Scope{}, 40, 80},
+ {Scope{}, 44, 88},
{Package{}, 40, 80},
+ {_TypeSet{}, 24, 48},
}
for _, test := range tests {
got := reflect.TypeOf(test.val).Size()
diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go
index 67052bb816..4c85bfe057 100644
--- a/src/go/types/sizes.go
+++ b/src/go/types/sizes.go
@@ -48,7 +48,7 @@ type StdSizes struct {
func (s *StdSizes) Alignof(T Type) int64 {
// For arrays and structs, alignment is defined in terms
// of alignment of the elements and fields, respectively.
- switch t := optype(T).(type) {
+ switch t := under(T).(type) {
case *Array:
// spec: "For a variable x of array type: unsafe.Alignof(x)
// is the same as unsafe.Alignof(x[0]), but at least 1."
@@ -73,6 +73,8 @@ func (s *StdSizes) Alignof(T Type) int64 {
if t.Info()&IsString != 0 {
return s.WordSize
}
+ case *TypeParam, *Union:
+ unreachable()
}
a := s.Sizeof(T) // may be 0
// spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
@@ -118,7 +120,7 @@ var basicSizes = [...]byte{
}
func (s *StdSizes) Sizeof(T Type) int64 {
- switch t := optype(T).(type) {
+ switch t := under(T).(type) {
case *Basic:
assert(isTyped(T))
k := t.kind
@@ -148,10 +150,10 @@ func (s *StdSizes) Sizeof(T Type) int64 {
}
offsets := s.Offsetsof(t.fields)
return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
- case *_Sum:
- panic("Sizeof unimplemented for type sum")
case *Interface:
return s.WordSize * 2
+ case *TypeParam, *Union:
+ unreachable()
}
return s.WordSize // catch-all
}
diff --git a/src/go/types/slice.go b/src/go/types/slice.go
new file mode 100644
index 0000000000..debdd81586
--- /dev/null
+++ b/src/go/types/slice.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// A Slice represents a slice type.
+type Slice struct {
+ elem Type
+}
+
+// NewSlice returns a new slice type for the given element type.
+func NewSlice(elem Type) *Slice { return &Slice{elem: elem} }
+
+// Elem returns the element type of slice s.
+func (s *Slice) Elem() Type { return s.elem }
+
+func (t *Slice) Underlying() Type { return t }
+func (t *Slice) String() string { return TypeString(t, nil) }
diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go
index d86a77a110..3eb7519a91 100644
--- a/src/go/types/stdlib_test.go
+++ b/src/go/types/stdlib_test.go
@@ -140,8 +140,7 @@ func testTestDir(t *testing.T, path string, ignore ...string) {
// parse and type-check file
file, err := parser.ParseFile(fset, filename, nil, 0)
if err == nil {
- conf := Config{Importer: stdLibImporter}
- SetGoVersion(&conf, goVersion)
+ conf := Config{GoVersion: goVersion, Importer: stdLibImporter}
_, err = conf.Check(filename, fset, []*ast.File{file}, nil)
}
diff --git a/src/go/types/stmt.go b/src/go/types/stmt.go
index 47f6dcfbd1..0f0a2e4d9f 100644
--- a/src/go/types/stmt.go
+++ b/src/go/types/stmt.go
@@ -65,7 +65,8 @@ func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body
func (check *Checker) usage(scope *Scope) {
var unused []*Var
- for _, elem := range scope.elems {
+ for name, elem := range scope.elems {
+ elem = resolve(name, elem)
if v, _ := elem.(*Var); v != nil && !v.used {
unused = append(unused, v)
}
@@ -264,7 +265,7 @@ L:
// look for duplicate types for a given value
// (quadratic algorithm, but these lists tend to be very short)
for _, vt := range seen[val] {
- if check.identical(v.typ, vt.typ) {
+ if Identical(v.typ, vt.typ) {
check.errorf(&v, _DuplicateCase, "duplicate case %s in expression switch", &v)
check.error(atPos(vt.pos), _DuplicateCase, "\tprevious case") // secondary error, \t indented
continue L
@@ -288,7 +289,7 @@ L:
// look for duplicate types
// (quadratic algorithm, but type switches tend to be reasonably small)
for t, other := range seen {
- if T == nil && t == nil || T != nil && t != nil && check.identical(T, t) {
+ if T == nil && t == nil || T != nil && t != nil && Identical(T, t) {
// talk about "case" rather than "type" because of nil case
Ts := "nil"
if T != nil {
@@ -360,25 +361,33 @@ func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
check.errorf(&x, code, "%s %s", &x, msg)
case *ast.SendStmt:
- var ch, x operand
+ var ch, val operand
check.expr(&ch, s.Chan)
- check.expr(&x, s.Value)
- if ch.mode == invalid || x.mode == invalid {
+ check.expr(&val, s.Value)
+ if ch.mode == invalid || val.mode == invalid {
return
}
-
- tch := asChan(ch.typ)
- if tch == nil {
- check.invalidOp(inNode(s, s.Arrow), _InvalidSend, "cannot send to non-chan type %s", ch.typ)
- return
- }
-
- if tch.dir == RecvOnly {
- check.invalidOp(inNode(s, s.Arrow), _InvalidSend, "cannot send to receive-only type %s", tch)
+ var elem Type
+ if !underIs(ch.typ, func(u Type) bool {
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.invalidOp(inNode(s, s.Arrow), _InvalidSend, "cannot send to non-channel %s", &ch)
+ return false
+ }
+ if uch.dir == RecvOnly {
+ check.invalidOp(inNode(s, s.Arrow), _InvalidSend, "cannot send to receive-only channel %s", &ch)
+ return false
+ }
+ if elem != nil && !Identical(uch.elem, elem) {
+ check.invalidOp(inNode(s, s.Arrow), _Todo, "channels of %s must have the same element type", &ch)
+ return false
+ }
+ elem = uch.elem
+ return true
+ }) {
return
}
-
- check.assignment(&x, tch.elem, "send")
+ check.assignment(&val, elem, "send")
case *ast.IncDecStmt:
var op token.Token
@@ -911,12 +920,12 @@ func rangeKeyVal(typ Type, wantKey, wantVal bool) (Type, Type, string) {
msg = "send-only channel"
}
return typ.elem, Typ[Invalid], msg
- case *_Sum:
+ case *Union:
first := true
var key, val Type
var msg string
- typ.is(func(t Type) bool {
- k, v, m := rangeKeyVal(under(t), wantKey, wantVal)
+ typ.underIs(func(t Type) bool {
+ k, v, m := rangeKeyVal(t, wantKey, wantVal)
if k == nil || m != "" {
key, val, msg = k, v, m
return false
diff --git a/src/go/types/struct.go b/src/go/types/struct.go
new file mode 100644
index 0000000000..48b07346dc
--- /dev/null
+++ b/src/go/types/struct.go
@@ -0,0 +1,202 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+ "strconv"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Struct represents a struct type.
+type Struct struct {
+ fields []*Var
+ tags []string // field tags; nil if there are no tags
+}
+
+// NewStruct returns a new struct with the given fields and corresponding field tags.
+// If a field with index i has a tag, tags[i] must be that tag, but len(tags) may be
+// only as long as required to hold the tag with the largest index i. Consequently,
+// if no field has a tag, tags may be nil.
+func NewStruct(fields []*Var, tags []string) *Struct {
+ var fset objset
+ for _, f := range fields {
+ if f.name != "_" && fset.insert(f) != nil {
+ panic("multiple fields with the same name")
+ }
+ }
+ if len(tags) > len(fields) {
+ panic("more tags than fields")
+ }
+ return &Struct{fields: fields, tags: tags}
+}
+
+// NumFields returns the number of fields in the struct (including blank and embedded fields).
+func (s *Struct) NumFields() int { return len(s.fields) }
+
+// Field returns the i'th field for 0 <= i < NumFields().
+func (s *Struct) Field(i int) *Var { return s.fields[i] }
+
+// Tag returns the i'th field tag for 0 <= i < NumFields().
+func (s *Struct) Tag(i int) string {
+ if i < len(s.tags) {
+ return s.tags[i]
+ }
+ return ""
+}
+
+func (t *Struct) Underlying() Type { return t }
+func (t *Struct) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (check *Checker) structType(styp *Struct, e *ast.StructType) {
+ list := e.Fields
+ if list == nil {
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *ast.Ident, embedded bool, pos token.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Name
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (issue #25627).
+ addInvalid := func(ident *ast.Ident, pos token.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ for _, f := range list.List {
+ typ = check.varType(f.Type)
+ tag = check.tag(f.Tag)
+ if len(f.Names) > 0 {
+ // named fields
+ for _, name := range f.Names {
+ add(name, false, name.Pos())
+ }
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := f.Type.Pos()
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ // TODO(rFindley): using invalidAST here causes test failures (all
+ // errors should have codes). Clean this up.
+ check.errorf(f.Type, _Todo, "invalid AST: embedded field type %s has no name", f.Type)
+ name = ast.NewIdent("_")
+ name.NamePos = pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, pos)
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+
+ // for use in the closure below
+ embeddedTyp := typ
+ embeddedPos := f.Type
+
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch t := under(t).(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if t.kind == UnsafePointer {
+ check.error(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.error(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer")
+ case *TypeParam:
+ check.error(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a (pointer to a) type parameter")
+ case *Interface:
+ if isPtr {
+ check.error(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ })
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+}
+
+func embeddedFieldIdent(e ast.Expr) *ast.Ident {
+ switch e := e.(type) {
+ case *ast.Ident:
+ return e
+ case *ast.StarExpr:
+ // *T is valid, but **T is not
+ if _, ok := e.X.(*ast.StarExpr); !ok {
+ return embeddedFieldIdent(e.X)
+ }
+ case *ast.SelectorExpr:
+ return e.Sel
+ case *ast.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ check.errorf(atPos(pos), _DuplicateDecl, "%s redeclared", obj.Name())
+ check.reportAltDecl(alt)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *ast.BasicLit) string {
+ if t != nil {
+ if t.Kind == token.STRING {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.invalidAST(t, "incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
diff --git a/src/go/types/subst.go b/src/go/types/subst.go
index 931375f1f2..322e30d357 100644
--- a/src/go/types/subst.go
+++ b/src/go/types/subst.go
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file implements instantiation of generic types
-// through substitution of type parameters by actual
-// types.
+// This file implements type parameter substitution.
package types
@@ -22,21 +20,16 @@ type substMap struct {
// TODO(gri) rewrite that code, get rid of this field, and make this
// struct just the map (proj)
targs []Type
- proj map[*_TypeParam]Type
+ proj map[*TypeParam]Type
}
// makeSubstMap creates a new substitution map mapping tpars[i] to targs[i].
// If targs[i] is nil, tpars[i] is not substituted.
func makeSubstMap(tpars []*TypeName, targs []Type) *substMap {
assert(len(tpars) == len(targs))
- proj := make(map[*_TypeParam]Type, len(tpars))
+ proj := make(map[*TypeParam]Type, len(tpars))
for i, tpar := range tpars {
- // We must expand type arguments otherwise *instance
- // types end up as components in composite types.
- // TODO(gri) explain why this causes problems, if it does
- targ := expand(targs[i]) // possibly nil
- targs[i] = targ
- proj[tpar.typ.(*_TypeParam)] = targ
+ proj[tpar.typ.(*TypeParam)] = targs[i]
}
return &substMap{targs, proj}
}
@@ -49,165 +42,13 @@ func (m *substMap) empty() bool {
return len(m.proj) == 0
}
-func (m *substMap) lookup(tpar *_TypeParam) Type {
+func (m *substMap) lookup(tpar *TypeParam) Type {
if t := m.proj[tpar]; t != nil {
return t
}
return tpar
}
-func (check *Checker) instantiate(pos token.Pos, typ Type, targs []Type, poslist []token.Pos) (res Type) {
- if trace {
- check.trace(pos, "-- instantiating %s with %s", typ, typeListString(targs))
- check.indent++
- defer func() {
- check.indent--
- var under Type
- if res != nil {
- // Calling under() here may lead to endless instantiations.
- // Test case: type T[P any] T[P]
- // TODO(gri) investigate if that's a bug or to be expected.
- under = res.Underlying()
- }
- check.trace(pos, "=> %s (under = %s)", res, under)
- }()
- }
-
- assert(len(poslist) <= len(targs))
-
- // TODO(gri) What is better here: work with TypeParams, or work with TypeNames?
- var tparams []*TypeName
- switch t := typ.(type) {
- case *Named:
- tparams = t.tparams
- case *Signature:
- tparams = t.tparams
- defer func() {
- // If we had an unexpected failure somewhere don't panic below when
- // asserting res.(*Signature). Check for *Signature in case Typ[Invalid]
- // is returned.
- if _, ok := res.(*Signature); !ok {
- return
- }
- // If the signature doesn't use its type parameters, subst
- // will not make a copy. In that case, make a copy now (so
- // we can set tparams to nil w/o causing side-effects).
- if t == res {
- copy := *t
- res = &copy
- }
- // After instantiating a generic signature, it is not generic
- // anymore; we need to set tparams to nil.
- res.(*Signature).tparams = nil
- }()
-
- default:
- check.dump("%v: cannot instantiate %v", pos, typ)
- unreachable() // only defined types and (defined) functions can be generic
- }
-
- // the number of supplied types must match the number of type parameters
- if len(targs) != len(tparams) {
- // TODO(gri) provide better error message
- check.errorf(atPos(pos), _Todo, "got %d arguments but %d type parameters", len(targs), len(tparams))
- return Typ[Invalid]
- }
-
- if len(tparams) == 0 {
- return typ // nothing to do (minor optimization)
- }
-
- smap := makeSubstMap(tparams, targs)
-
- // check bounds
- for i, tname := range tparams {
- tpar := tname.typ.(*_TypeParam)
- iface := tpar.Bound()
- if iface.Empty() {
- continue // no type bound
- }
-
- targ := targs[i]
-
- // best position for error reporting
- pos := pos
- if i < len(poslist) {
- pos = poslist[i]
- }
-
- // The type parameter bound is parameterized with the same type parameters
- // as the instantiated type; before we can use it for bounds checking we
- // need to instantiate it with the type arguments with which we instantiate
- // the parameterized type.
- iface = check.subst(pos, iface, smap).(*Interface)
-
- // targ must implement iface (methods)
- // - check only if we have methods
- check.completeInterface(token.NoPos, iface)
- if len(iface.allMethods) > 0 {
- // If the type argument is a pointer to a type parameter, the type argument's
- // method set is empty.
- // TODO(gri) is this what we want? (spec question)
- if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
- check.errorf(atPos(pos), 0, "%s has no methods", targ)
- break
- }
- if m, wrong := check.missingMethod(targ, iface, true); m != nil {
- // TODO(gri) needs to print updated name to avoid major confusion in error message!
- // (print warning for now)
- // Old warning:
- // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
- if m.name == "==" {
- // We don't want to report "missing method ==".
- check.softErrorf(atPos(pos), 0, "%s does not satisfy comparable", targ)
- } else if wrong != nil {
- // TODO(gri) This can still report uninstantiated types which makes the error message
- // more difficult to read then necessary.
- // TODO(rFindley) should this use parentheses rather than ':' for qualification?
- check.softErrorf(atPos(pos), _Todo,
- "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
- targ, tpar.bound, wrong, m,
- )
- } else {
- check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
- }
- break
- }
- }
-
- // targ's underlying type must also be one of the interface types listed, if any
- if iface.allTypes == nil {
- continue // nothing to do
- }
-
- // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
- // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
- if targ := asTypeParam(targ); targ != nil {
- targBound := targ.Bound()
- if targBound.allTypes == nil {
- check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
- break
- }
- for _, t := range unpackType(targBound.allTypes) {
- if !iface.isSatisfiedBy(t) {
- // TODO(gri) match this error message with the one below (or vice versa)
- check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, t, iface.allTypes)
- break
- }
- }
- break
- }
-
- // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
- if !iface.isSatisfiedBy(targ) {
- check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s or %s not found in %s)", targ, tpar.bound, targ, under(targ), iface.allTypes)
- break
- }
- }
-
- return check.subst(pos, typ, smap)
-}
-
// subst returns the type typ with its type parameters tpars replaced by
// the corresponding type arguments targs, recursively.
// subst is functional in the sense that it doesn't modify the incoming
@@ -222,20 +63,33 @@ func (check *Checker) subst(pos token.Pos, typ Type, smap *substMap) Type {
switch t := typ.(type) {
case *Basic:
return typ // nothing to do
- case *_TypeParam:
+ case *TypeParam:
return smap.lookup(t)
}
// general case
- subst := subster{check, pos, make(map[Type]Type), smap}
+ var subst subster
+ subst.pos = pos
+ subst.smap = smap
+ if check != nil {
+ subst.check = check
+ subst.typMap = check.typMap
+ } else {
+ // If we don't have a *Checker and its global type map,
+ // use a local version. Besides avoiding duplicate work,
+ // the type map prevents infinite recursive substitution
+ // for recursive types (example: type T[P any] *T[P]).
+ subst.typMap = make(map[string]*Named)
+ }
+
return subst.typ(typ)
}
type subster struct {
- check *Checker
- pos token.Pos
- cache map[Type]Type
- smap *substMap
+ pos token.Pos
+ smap *substMap
+ check *Checker // nil if called via Instantiate
+ typMap map[string]*Named
}
func (subst *subster) typ(typ Type) Type {
@@ -244,7 +98,7 @@ func (subst *subster) typ(typ Type) Type {
// Call typOrNil if it's possible that typ is nil.
panic("nil typ")
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
@@ -293,26 +147,23 @@ func (subst *subster) typ(typ Type) Type {
}
}
- case *_Sum:
- types, copied := subst.typeList(t.types)
+ case *Union:
+ terms, copied := subst.termList(t.terms)
if copied {
- // Don't do it manually, with a Sum literal: the new
- // types list may not be unique and NewSum may remove
- // duplicates.
- return _NewSum(types)
+ // TODO(gri) Remove duplicates that may have crept in after substitution
+ // (unlikely but possible). This matters for the Identical
+ // predicate on unions.
+ return &Union{terms}
}
case *Interface:
methods, mcopied := subst.funcList(t.methods)
- types := t.types
- if t.types != nil {
- types = subst.typ(t.types)
- }
embeddeds, ecopied := subst.typeList(t.embeddeds)
- if mcopied || types != t.types || ecopied {
- iface := &Interface{methods: methods, types: types, embeddeds: embeddeds}
- subst.check.posMap[iface] = subst.check.posMap[t] // satisfy completeInterface requirement
- subst.check.completeInterface(token.NoPos, iface)
+ if mcopied || ecopied {
+ iface := &Interface{methods: methods, embeddeds: embeddeds, complete: t.complete}
+ if subst.check == nil {
+ panic("internal error: cannot instantiate interfaces yet")
+ }
return iface
}
@@ -330,17 +181,19 @@ func (subst *subster) typ(typ Type) Type {
}
case *Named:
- subst.check.indent++
- defer func() {
- subst.check.indent--
- }()
- dump := func(format string, args ...interface{}) {
- if trace {
+ // dump is for debugging
+ dump := func(string, ...interface{}) {}
+ if subst.check != nil && trace {
+ subst.check.indent++
+ defer func() {
+ subst.check.indent--
+ }()
+ dump = func(format string, args ...interface{}) {
subst.check.trace(subst.pos, format, args...)
}
}
- if t.tparams == nil {
+ if t.TParams().Len() == 0 {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
@@ -350,7 +203,7 @@ func (subst *subster) typ(typ Type) Type {
if len(t.targs) > 0 {
// already instantiated
dump(">>> %s already instantiated", t)
- assert(len(t.targs) == len(t.tparams))
+ assert(len(t.targs) == t.TParams().Len())
// For each (existing) type argument targ, determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
@@ -360,7 +213,7 @@ func (subst *subster) typ(typ Type) Type {
if newTarg != targ {
dump(">>> substituted %d targ %s => %s", i, targ, newTarg)
if newTargs == nil {
- newTargs = make([]Type, len(t.tparams))
+ newTargs = make([]Type, t.TParams().Len())
copy(newTargs, t.targs)
}
newTargs[i] = newTarg
@@ -381,34 +234,30 @@ func (subst *subster) typ(typ Type) Type {
// before creating a new named type, check if we have this one already
h := instantiatedHash(t, newTargs)
dump(">>> new type hash: %s", h)
- if named, found := subst.check.typMap[h]; found {
+ if named, found := subst.typMap[h]; found {
dump(">>> found %s", named)
- subst.cache[t] = named
return named
}
- // create a new named type and populate caches to avoid endless recursion
+ // create a new named type and populate typMap to avoid endless recursion
tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil)
- named := subst.check.newNamed(tname, t.underlying, t.methods) // method signatures are updated lazily
- named.tparams = t.tparams // new type is still parameterized
+ named := subst.check.newNamed(tname, t, t.Underlying(), t.TParams(), t.methods) // method signatures are updated lazily
named.targs = newTargs
- subst.check.typMap[h] = named
- subst.cache[t] = named
+ subst.typMap[h] = named
+ t.expand() // must happen after typMap update to avoid infinite recursion
// do the substitution
dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, newTargs)
- named.underlying = subst.typOrNil(t.underlying)
- named.orig = named.underlying // for cycle detection (Checker.validType)
+ named.underlying = subst.typOrNil(t.Underlying())
+ dump(">>> underlying: %v", named.underlying)
+ assert(named.underlying != nil)
+ named.fromRHS = named.underlying // for cycle detection (Checker.validType)
return named
- case *_TypeParam:
+ case *TypeParam:
return subst.smap.lookup(t)
- case *instance:
- // TODO(gri) can we avoid the expansion here and just substitute the type parameters?
- return subst.typ(t.expand())
-
default:
panic("unimplemented")
}
@@ -416,14 +265,17 @@ func (subst *subster) typ(typ Type) Type {
return typ
}
-// TODO(gri) Eventually, this should be more sophisticated.
-// It won't work correctly for locally declared types.
+var instanceHashing = 0
+
func instantiatedHash(typ *Named, targs []Type) string {
+ assert(instanceHashing == 0)
+ instanceHashing++
var buf bytes.Buffer
writeTypeName(&buf, typ.obj, nil)
buf.WriteByte('[')
writeTypeList(&buf, targs, nil, nil)
buf.WriteByte(']')
+ instanceHashing--
// With respect to the represented type, whether a
// type is fully expanded or stored as instance
@@ -541,3 +393,21 @@ func (subst *subster) typeList(in []Type) (out []Type, copied bool) {
}
return
}
+
+func (subst *subster) termList(in []*term) (out []*term, copied bool) {
+ out = in
+ for i, t := range in {
+ if u := subst.typ(t.typ); u != t.typ {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]*term, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = &term{t.tilde, u}
+ }
+ }
+ return
+}
diff --git a/src/go/types/testdata/check/builtins.go2 b/src/go/types/testdata/check/builtins.go2
index 3918d836b5..3881090603 100644
--- a/src/go/types/testdata/check/builtins.go2
+++ b/src/go/types/testdata/check/builtins.go2
@@ -6,48 +6,231 @@
package builtins
+import "unsafe"
+
+// close
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ close(ch /* ERROR cannot close non-channel */)
+}
+
+func _[T C0](ch T) {
+ close(ch /* ERROR cannot close non-channel */)
+}
+
+func _[T C1](ch T) {
+ close(ch)
+}
+
+func _[T C2](ch T) {
+ close(ch /* ERROR cannot close receive-only channel */)
+}
+
+func _[T C3](ch T) {
+ close(ch)
+}
+
+func _[T C4](ch T) {
+ close(ch)
+}
+
+func _[T C5[X], X any](ch T) {
+ close(ch)
+}
+
+// delete
+
+type M0 interface{ int }
+type M1 interface{ map[string]int }
+type M2 interface { map[string]int | map[string]float64 }
+type M3 interface{ map[string]int | map[rune]int }
+type M4[K comparable, V any] interface{ map[K]V | map[rune]V }
+
+func _[T any](m T) {
+ delete(m /* ERROR not a map */, "foo")
+}
+
+func _[T M0](m T) {
+ delete(m /* ERROR not a map */, "foo")
+}
+
+func _[T M1](m T) {
+ delete(m, "foo")
+}
+
+func _[T M2](m T) {
+ delete(m, "foo")
+ delete(m, 0 /* ERROR cannot use .* as string */)
+}
+
+func _[T M3](m T) {
+ delete(m /* ERROR must have identical key types */, "foo")
+}
+
+func _[T M4[rune, V], V any](m T) {
+ delete(m, 'k')
+}
+
+func _[T M4[K, V], K comparable, V any](m T) {
+ delete(m /* ERROR must have identical key types */, "foo")
+}
+
+// make
+
type Bmc interface {
- type map[rune]string, chan int
+ ~map[rune]string | ~chan int
}
type Bms interface {
- type map[string]int, []int
+ ~map[string]int | ~[]int
}
type Bcs interface {
- type chan bool, []float64
+ ~chan bool | ~[]float64
}
type Bss interface {
- type []int, []string
+ ~[]int | ~[]string
}
-func _[T any] () {
- _ = make(T /* ERROR invalid argument */ )
- _ = make(T /* ERROR invalid argument */ , 10)
- _ = make(T /* ERROR invalid argument */ , 10, 20)
+func _[T any]() {
+ _ = make(T /* ERROR invalid argument */)
+ _ = make(T /* ERROR invalid argument */, 10)
+ _ = make(T /* ERROR invalid argument */, 10, 20)
}
-func _[T Bmc] () {
+func _[T Bmc]() {
_ = make(T)
_ = make(T, 10)
_ = make /* ERROR expects 1 or 2 arguments */ (T, 10, 20)
}
-func _[T Bms] () {
+func _[T Bms]() {
_ = make /* ERROR expects 2 arguments */ (T)
_ = make(T, 10)
_ = make /* ERROR expects 2 arguments */ (T, 10, 20)
}
-func _[T Bcs] () {
+func _[T Bcs]() {
_ = make /* ERROR expects 2 arguments */ (T)
_ = make(T, 10)
_ = make /* ERROR expects 2 arguments */ (T, 10, 20)
}
-func _[T Bss] () {
+func _[T Bss]() {
_ = make /* ERROR expects 2 or 3 arguments */ (T)
_ = make(T, 10)
_ = make(T, 10, 20)
}
+
+// unsafe.Alignof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Alignof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Alignof(p)
+ assert(pp == 8)
+ const ll = unsafe.Alignof(l)
+ assert(ll == 8)
+ const ff = unsafe.Alignof(f)
+ assert(ff == 8)
+ const ii = unsafe.Alignof(i)
+ assert(ii == 8)
+ const cc = unsafe.Alignof(c)
+ assert(cc == 8)
+ const mm = unsafe.Alignof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
+
+// unsafe.Offsetof
+
+func _[T comparable]() {
+ var (
+ b struct{ _, f int64 }
+ a struct{ _, f [10]T }
+ s struct{ _, f struct{ f T } }
+ p struct{ _, f *T }
+ l struct{ _, f []T }
+ f struct{ _, f func(T) }
+ i struct{ _, f interface{ m() T } }
+ c struct{ _, f chan T }
+ m struct{ _, f map[T]T }
+ t struct{ _, f T }
+ )
+
+ const bb = unsafe.Offsetof(b.f)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Offsetof(p.f)
+ assert(pp == 8)
+ const ll = unsafe.Offsetof(l.f)
+ assert(ll == 24)
+ const ff = unsafe.Offsetof(f.f)
+ assert(ff == 8)
+ const ii = unsafe.Offsetof(i.f)
+ assert(ii == 16)
+ const cc = unsafe.Offsetof(c.f)
+ assert(cc == 8)
+ const mm = unsafe.Offsetof(m.f)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
+
+// unsafe.Sizeof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Sizeof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Sizeof(p)
+ assert(pp == 8)
+ const ll = unsafe.Sizeof(l)
+ assert(ll == 24)
+ const ff = unsafe.Sizeof(f)
+ assert(ff == 8)
+ const ii = unsafe.Sizeof(i)
+ assert(ii == 16)
+ const cc = unsafe.Sizeof(c)
+ assert(cc == 8)
+ const mm = unsafe.Sizeof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
diff --git a/src/go/types/testdata/check/builtins.src b/src/go/types/testdata/check/builtins.src
index 3707528914..7fd6a4b032 100644
--- a/src/go/types/testdata/check/builtins.src
+++ b/src/go/types/testdata/check/builtins.src
@@ -144,7 +144,7 @@ func close1() {
var r <-chan int
close() // ERROR not enough arguments
close(1, 2) // ERROR too many arguments
- close(42 /* ERROR not a channel */)
+ close(42 /* ERROR cannot close non-channel */)
close(r /* ERROR receive-only channel */)
close(c)
_ = close /* ERROR used as value */ (c)
diff --git a/src/go/types/testdata/check/const0.src b/src/go/types/testdata/check/const0.src
index 5608b1549b..3cffdf904c 100644
--- a/src/go/types/testdata/check/const0.src
+++ b/src/go/types/testdata/check/const0.src
@@ -27,7 +27,7 @@ const (
ub1 = true
ub2 = 2 < 1
ub3 = ui1 == uf1
- ub4 = true /* ERROR "cannot convert" */ == 0
+ ub4 = true /* ERROR "mismatched types untyped bool and untyped int" */ == 0
// integer values
ui0 = 0
diff --git a/src/go/types/testdata/check/cycles4.src b/src/go/types/testdata/check/cycles4.src
index 445babca68..924aabf475 100644
--- a/src/go/types/testdata/check/cycles4.src
+++ b/src/go/types/testdata/check/cycles4.src
@@ -4,6 +4,8 @@
package p
+import "unsafe"
+
// Check that all methods of T are collected before
// determining the result type of m (which embeds
// all methods of T).
@@ -13,7 +15,7 @@ type T interface {
E
}
-var _ = T.m(nil).m().e()
+var _ int = T.m(nil).m().e()
type E interface {
e() int
@@ -22,7 +24,7 @@ type E interface {
// Check that unresolved forward chains are followed
// (see also comment in resolver.go, checker.typeDecl).
-var _ = C.m(nil).m().e()
+var _ int = C.m(nil).m().e()
type A B
@@ -108,3 +110,12 @@ type Element interface {
type Event interface {
Target() Element
}
+
+// Check that accessing an interface method too early doesn't lead
+// to follow-on errors due to an incorrectly computed type set.
+
+type T8 interface {
+ m() [unsafe.Sizeof(T8.m /* ERROR undefined */ )]int
+}
+
+var _ = T8.m // no error expected here
diff --git a/src/go/types/testdata/check/decls0.src b/src/go/types/testdata/check/decls0.src
index 5ad8f53f65..1224e46377 100644
--- a/src/go/types/testdata/check/decls0.src
+++ b/src/go/types/testdata/check/decls0.src
@@ -4,7 +4,7 @@
// type declarations
-package decls0
+package go1_17 // don't permit non-interface elements in interfaces
import "unsafe"
@@ -187,10 +187,10 @@ func f4() (x *f4 /* ERROR "not a type" */ ) { return }
// TODO(#43215) this should be detected as a cycle error
func f5([unsafe.Sizeof(f5)]int) {}
-func (S0) m1 (x S0 /* ERROR value .* is not a type */ .m1) {}
-func (S0) m2 (x *S0 /* ERROR value .* is not a type */ .m2) {}
-func (S0) m3 () (x S0 /* ERROR value .* is not a type */ .m3) { return }
-func (S0) m4 () (x *S0 /* ERROR value .* is not a type */ .m4) { return }
+func (S0) m1 (x S0 /* ERROR illegal cycle in method declaration */ .m1) {}
+func (S0) m2 (x *S0 /* ERROR illegal cycle in method declaration */ .m2) {}
+func (S0) m3 () (x S0 /* ERROR illegal cycle in method declaration */ .m3) { return }
+func (S0) m4 () (x *S0 /* ERROR illegal cycle in method declaration */ .m4) { return }
// interfaces may not have any blank methods
type BlankI interface {
diff --git a/src/go/types/testdata/check/decls1.src b/src/go/types/testdata/check/decls1.src
index f4d2eaba91..6fe349b0b2 100644
--- a/src/go/types/testdata/check/decls1.src
+++ b/src/go/types/testdata/check/decls1.src
@@ -83,7 +83,7 @@ var (
// Constant expression initializations
var (
- v1 = 1 /* ERROR "cannot convert" */ + "foo"
+ v1 = 1 /* ERROR "mismatched types untyped int and untyped string" */ + "foo"
v2 = c + 255
v3 = c + 256 /* ERROR "overflows" */
v4 = r + 2147483647
diff --git a/src/go/types/testdata/check/expr1.src b/src/go/types/testdata/check/expr1.src
index 4ead815158..42b95fbb37 100644
--- a/src/go/types/testdata/check/expr1.src
+++ b/src/go/types/testdata/check/expr1.src
@@ -111,10 +111,10 @@ type mystring string
func _(x, y string, z mystring) {
x = x + "foo"
x = x /* ERROR not defined */ - "foo"
- x = x + 1 // ERROR cannot convert
+ x = x /* ERROR mismatched types string and untyped int */ + 1
x = x + y
x = x /* ERROR not defined */ - y
- x = x * 10 // ERROR cannot convert
+ x = x /* ERROR mismatched types string and untyped int */* 10
}
func f() (a, b int) { return }
diff --git a/src/go/types/testdata/check/expr2.src b/src/go/types/testdata/check/expr2.src
index 0c959e8011..f9726b5de5 100644
--- a/src/go/types/testdata/check/expr2.src
+++ b/src/go/types/testdata/check/expr2.src
@@ -10,7 +10,7 @@ func _bool() {
const t = true == true
const f = true == false
_ = t /* ERROR "cannot compare" */ < f
- _ = 0 /* ERROR "cannot convert" */ == t
+ _ = 0 /* ERROR "mismatched types untyped int and untyped bool" */ == t
var b bool
var x, y float32
b = x < y
diff --git a/src/go/types/testdata/check/expr3.src b/src/go/types/testdata/check/expr3.src
index 0525a5a33a..3ab367810f 100644
--- a/src/go/types/testdata/check/expr3.src
+++ b/src/go/types/testdata/check/expr3.src
@@ -103,7 +103,7 @@ func indexes() {
var ok mybool
_, ok = m["bar"]
_ = ok
- _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "cannot convert"
+ _ = m/* ERROR "mismatched types int and untyped string" */[0 /* ERROR "cannot use 0" */ ] + "foo"
var t string
_ = t[- /* ERROR "negative" */ 1]
diff --git a/src/go/types/testdata/check/issues.go2 b/src/go/types/testdata/check/issues.go2
index 8994164eac..6a1a10ad49 100644
--- a/src/go/types/testdata/check/issues.go2
+++ b/src/go/types/testdata/check/issues.go2
@@ -24,22 +24,20 @@ func _() {
eql[io.Reader](nil, nil)
}
-// If we have a receiver of pointer type (below: *T) we must ignore
-// the pointer in the implementation of the method lookup because
-// the type bound of T is an interface and pointer to interface types
-// have no methods and then the lookup would fail.
+// If we have a receiver of pointer to type parameter type (below: *T)
+// we don't have any methods, like for interfaces.
type C[T any] interface {
m()
}
// using type bound C
func _[T C[T]](x *T) {
- x.m()
+ x.m /* ERROR x\.m undefined */ ()
}
// using an interface literal as bound
func _[T interface{ m() }](x *T) {
- x.m()
+ x.m /* ERROR x\.m undefined */ ()
}
// In a generic function body all method calls will be pointer method calls.
@@ -64,15 +62,15 @@ func _() {
// type with a type list constraint, all of the type argument's types in its
// bound, but at least one (!), must be in the type list of the bound of the
// corresponding parameterized type's type parameter.
-type T1[P interface{type uint}] struct{}
+type T1[P interface{~uint}] struct{}
func _[P any]() {
- _ = T1[P /* ERROR P has no type constraints */ ]{}
+ _ = T1[P /* ERROR P has no constraints */ ]{}
}
// This is the original (simplified) program causing the same issue.
type Unsigned interface {
- type uint
+ ~uint
}
type T2[U Unsigned] struct {
@@ -83,8 +81,8 @@ func (u T2[U]) Add1() U {
return u.s + 1
}
-func NewT2[U any]() T2[U /* ERROR U has no type constraints */ ] {
- return T2[U /* ERROR U has no type constraints */ ]{}
+func NewT2[U any]() T2[U /* ERROR U has no constraints */ ] {
+ return T2[U /* ERROR U has no constraints */ ]{}
}
func _() {
@@ -163,7 +161,7 @@ type inf2[T any] struct{ inf2 /* ERROR illegal cycle */ [T] }
// predicate disjunction in the implementation was wrong because if a type list
// contains both an integer and a floating-point type, the type parameter is
// neither an integer or a floating-point number.
-func convert[T1, T2 interface{type int, uint, float32}](v T1) T2 {
+func convert[T1, T2 interface{~int | ~uint | ~float32}](v T1) T2 {
return T2(v)
}
@@ -175,12 +173,12 @@ func _() {
// both numeric, or both strings. The implementation had the same problem
// with this check as the conversion issue above (issue #39623).
-func issue39623[T interface{type int, string}](x, y T) T {
+func issue39623[T interface{~int | ~string}](x, y T) T {
return x + y
}
// Simplified, from https://go2goplay.golang.org/p/efS6x6s-9NI:
-func Sum[T interface{type int, string}](s []T) (sum T) {
+func Sum[T interface{~int | ~string}](s []T) (sum T) {
for _, v := range s {
sum += v
}
@@ -189,19 +187,19 @@ func Sum[T interface{type int, string}](s []T) (sum T) {
// Assignability of an unnamed pointer type to a type parameter that
// has a matching underlying type.
-func _[T interface{}, PT interface{type *T}] (x T) PT {
+func _[T interface{}, PT interface{~*T}] (x T) PT {
return &x
}
// Indexing of generic types containing type parameters in their type list:
-func at[T interface{ type []E }, E interface{}](x T, i int) E {
+func at[T interface{ ~[]E }, E interface{}](x T, i int) E {
return x[i]
}
// A generic type inside a function acts like a named type. Its underlying
// type is itself, its "operational type" is defined by the type list in
// the tybe bound, if any.
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
type myint int
var _ int = int(x)
var _ T = 42
@@ -210,24 +208,24 @@ func _[T interface{type int}](x T) {
// Indexing a generic type with an array type bound checks length.
// (Example by mdempsky@.)
-func _[T interface { type [10]int }](x T) {
+func _[T interface { ~[10]int }](x T) {
_ = x[9] // ok
_ = x[20 /* ERROR out of bounds */ ]
}
// Pointer indirection of a generic type.
-func _[T interface{ type *int }](p T) int {
+func _[T interface{ ~*int }](p T) int {
return *p
}
// Channel sends and receives on generic types.
-func _[T interface{ type chan int }](ch T) int {
+func _[T interface{ ~chan int }](ch T) int {
ch <- 0
return <- ch
}
// Calling of a generic variable.
-func _[T interface{ type func() }](f T) {
+func _[T interface{ ~func() }](f T) {
f()
go f()
}
@@ -239,9 +237,9 @@ func _[T interface{ type func() }](f T) {
// type parameter that was substituted with a defined type.
// Test case from an (originally) failing example.
-type sliceOf[E any] interface{ type []E }
+type sliceOf[E any] interface{ ~[]E }
-func append[T interface{}, S sliceOf[T], T2 interface{ type T }](s S, t ...T2) S
+func append[T interface{}, S sliceOf[T], T2 interface{}](s S, t ...T2) S
var f func()
var cancelSlice []context.CancelFunc
diff --git a/src/go/types/testdata/check/issues.src b/src/go/types/testdata/check/issues.src
index 55fe220337..ef1737331d 100644
--- a/src/go/types/testdata/check/issues.src
+++ b/src/go/types/testdata/check/issues.src
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package issues
+package go1_17 // don't permit non-interface elements in interfaces
import (
"fmt"
diff --git a/src/go/types/testdata/check/linalg.go2 b/src/go/types/testdata/check/linalg.go2
index 0d27603a58..efc090a1d1 100644
--- a/src/go/types/testdata/check/linalg.go2
+++ b/src/go/types/testdata/check/linalg.go2
@@ -9,10 +9,10 @@ import "math"
// Numeric is type bound that matches any numeric type.
// It would likely be in a constraints package in the standard library.
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
func DotProduct[T Numeric](s1, s2 []T) T {
@@ -42,14 +42,14 @@ func AbsDifference[T NumericAbs[T]](a, b T) T {
// OrderedNumeric is a type bound that matches numeric types that support the < operator.
type OrderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex is a type bound that matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// OrderedAbs is a helper type that defines an Abs method for
diff --git a/src/go/types/testdata/check/stmt0.src b/src/go/types/testdata/check/stmt0.src
index 76b6e70d63..15df37703c 100644
--- a/src/go/types/testdata/check/stmt0.src
+++ b/src/go/types/testdata/check/stmt0.src
@@ -49,18 +49,18 @@ func assignments1() {
b = true
i += 1
- i += "foo" /* ERROR "cannot convert.*int" */
+ i /* ERROR "mismatched types int and untyped string" */+= "foo"
f -= 1
f /= 0
f = float32(0)/0 /* ERROR "division by zero" */
- f -= "foo" /* ERROR "cannot convert.*float64" */
+ f /* ERROR "mismatched types float64 and untyped string" */-= "foo"
c *= 1
c /= 0
s += "bar"
- s += 1 /* ERROR "cannot convert.*string" */
+ s /* ERROR "mismatched types string and untyped int" */+= 1
var u64 uint64
u64 += 1<<u64
@@ -937,13 +937,13 @@ func issue6766b() {
// errors reported).
func issue10148() {
for y /* ERROR declared but not used */ := range "" {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int */ + 1
}
for range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int */ + 1
}
for y := range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int */ + 1
}
}
diff --git a/src/go/types/testdata/check/tinference.go2 b/src/go/types/testdata/check/tinference.go2
index 31338b33ad..44e8dc0059 100644
--- a/src/go/types/testdata/check/tinference.go2
+++ b/src/go/types/testdata/check/tinference.go2
@@ -11,36 +11,38 @@ type any interface{}
// TODO(rFindley) the below partially applied function types should probably
// not be permitted (spec question).
-func f0[A any, B interface{type C}, C interface{type D}, D interface{type A}](A, B, C, D)
-func _() {
- f := f0[string]
- f("a", "b", "c", "d")
- f0("a", "b", "c", "d")
-}
-
-func f1[A any, B interface{type A}](A, B)
-func _() {
- f := f1[int]
- f(int(0), int(0))
- f1(int(0), int(0))
-}
-
-func f2[A any, B interface{type []A}](A, B)
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// func f0[A any, B interface{~C}, C interface{~D}, D interface{~A}](A, B, C, D)
+// func _() {
+// f := f0[string]
+// f("a", "b", "c", "d")
+// f0("a", "b", "c", "d")
+// }
+//
+// func f1[A any, B interface{~A}](A, B)
+// func _() {
+// f := f1[int]
+// f(int(0), int(0))
+// f1(int(0), int(0))
+// }
+
+func f2[A any, B interface{~[]A}](A, B)
func _() {
f := f2[byte]
f(byte(0), []byte{})
f2(byte(0), []byte{})
}
-func f3[A any, B interface{type C}, C interface{type *A}](A, B, C)
-func _() {
- f := f3[int]
- var x int
- f(x, &x, &x)
- f3(x, &x, &x)
-}
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// func f3[A any, B interface{~C}, C interface{~*A}](A, B, C)
+// func _() {
+// f := f3[int]
+// var x int
+// f(x, &x, &x)
+// f3(x, &x, &x)
+// }
-func f4[A any, B interface{type []C}, C interface{type *A}](A, B, C)
+func f4[A any, B interface{~[]C}, C interface{~*A}](A, B, C)
func _() {
f := f4[int]
var x int
@@ -48,14 +50,14 @@ func _() {
f4(x, []*int{}, &x)
}
-func f5[A interface{type struct{b B; c C}}, B any, C interface{type *B}](x B) A
+func f5[A interface{~struct{b B; c C}}, B any, C interface{~*B}](x B) A
func _() {
x := f5(1.2)
var _ float64 = x.b
var _ float64 = *x.c
}
-func f6[A any, B interface{type struct{f []A}}](B) A
+func f6[A any, B interface{~struct{f []A}}](B) A
func _() {
x := f6(struct{f []string}{})
var _ string = x
@@ -63,11 +65,11 @@ func _() {
// TODO(gri) Need to flag invalid recursive constraints. At the
// moment these cause infinite recursions and stack overflow.
-// func f7[A interface{type B}, B interface{type A}]()
+// func f7[A interface{type B}, B interface{~A}]()
// More realistic examples
-func Double[S interface{ type []E }, E interface{ type int, int8, int16, int32, int64 }](s S) S {
+func Double[S interface{ ~[]E }, E interface{ ~int | ~int8 | ~int16 | ~int32 | ~int64 }](s S) S {
r := make(S, len(s))
for i, v := range s {
r[i] = v + v
@@ -83,7 +85,7 @@ var _ = Double(MySlice{1})
type Setter[B any] interface {
Set(string)
- type *B
+ ~*B
}
func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
diff --git a/src/go/types/testdata/check/tmp.go2 b/src/go/types/testdata/check/tmp.go2
deleted file mode 100644
index dae78caff8..0000000000
--- a/src/go/types/testdata/check/tmp.go2
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is meant as "dumping ground" for debugging code.
-
-package p
-
-// fun test case
-type C[P interface{m()}] P
-
-func (r C[P]) m() { r.m() }
-
-func f[T interface{m(); n()}](x T) {
- y := C[T](x)
- y.m()
-}
diff --git a/src/go/types/testdata/check/typeinst.go2 b/src/go/types/testdata/check/typeinst.go2
index 3184a4b5b1..069bd3bc16 100644
--- a/src/go/types/testdata/check/typeinst.go2
+++ b/src/go/types/testdata/check/typeinst.go2
@@ -33,11 +33,11 @@ var _ A3
var x int
type _ x /* ERROR not a type */ [int]
-type _ int /* ERROR not a generic type */ []
-type _ myInt /* ERROR not a generic type */ []
+type _ int /* ERROR not a generic type */ [] // ERROR expected type argument list
+type _ myInt /* ERROR not a generic type */ [] // ERROR expected type argument list
// TODO(gri) better error messages
-type _ T1 /* ERROR got 0 arguments but 1 type parameters */ []
+type _ T1[] // ERROR expected type argument list
type _ T1[x /* ERROR not a type */ ]
type _ T1 /* ERROR got 2 arguments but 1 type parameters */ [int, float32]
diff --git a/src/go/types/testdata/check/typeinst2.go2 b/src/go/types/testdata/check/typeinst2.go2
index 6e2104a515..ab56ccafc9 100644
--- a/src/go/types/testdata/check/typeinst2.go2
+++ b/src/go/types/testdata/check/typeinst2.go2
@@ -148,15 +148,15 @@ func _[T any](r R2[T, int], p *R2[string, T]) {
p.pm()
}
-// An interface can (explicitly) declare at most one type list.
+// It is ok to have multiple embedded unions.
type _ interface {
m0()
- type int, string, bool
- type /* ERROR multiple type lists */ float32, float64
+ ~int | ~string | ~bool
+ ~float32 | ~float64
m1()
m2()
- type /* ERROR multiple type lists */ complex64, complex128
- type /* ERROR multiple type lists */ rune
+ ~complex64 | ~complex128
+ ~rune
}
// Interface type lists may contain each type at most once.
@@ -164,23 +164,24 @@ type _ interface {
// for them to be all in a single list, and we report the error
// as well.)
type _ interface {
- type int, int /* ERROR duplicate type int */
- type /* ERROR multiple type lists */ int /* ERROR duplicate type int */
+ ~int|~ /* ERROR duplicate term int */ int
+ ~int|int /* ERROR duplicate term int */
+ int|int /* ERROR duplicate term int */
}
type _ interface {
- type struct{f int}, struct{g int}, struct /* ERROR duplicate type */ {f int}
+ ~struct{f int} | ~struct{g int} | ~ /* ERROR duplicate term */ struct{f int}
}
// Interface type lists can contain any type, incl. *Named types.
// Verify that we use the underlying type to compute the operational type.
type MyInt int
-func add1[T interface{type MyInt}](x T) T {
+func add1[T interface{MyInt}](x T) T {
return x + 1
}
type MyString string
-func double[T interface{type MyInt, MyString}](x T) T {
+func double[T interface{MyInt|MyString}](x T) T {
return x + x
}
@@ -189,15 +190,15 @@ func double[T interface{type MyInt, MyString}](x T) T {
// type lists.
type E0 interface {
- type int, bool, string
+ ~int | ~bool | ~string
}
type E1 interface {
- type int, float64, string
+ ~int | ~float64 | ~string
}
type E2 interface {
- type float64
+ ~float64
}
type I0 interface {
@@ -246,7 +247,7 @@ var _ = f12[float64]
type I0_ interface {
E0
- type int
+ ~int
}
func f0_[T I0_]()
diff --git a/src/go/types/testdata/check/typeparams.go2 b/src/go/types/testdata/check/typeparams.go2
index d95e02e443..77cd65d19a 100644
--- a/src/go/types/testdata/check/typeparams.go2
+++ b/src/go/types/testdata/check/typeparams.go2
@@ -6,11 +6,11 @@ package p
// import "io" // for type assertion tests
-// The predeclared identifier "any" is only visible as a constraint
+// The predeclared identifier "any" can only be used as a constraint
// in a type parameter list.
-var _ any // ERROR undeclared
-func _[_ any /* ok here */ , _ interface{any /* ERROR undeclared */ }](any /* ERROR undeclared */ ) {
- var _ any /* ERROR undeclared */
+var _ any // ERROR cannot use any outside constraint position
+func _[_ any /* ok here */ , _ interface{any /* ERROR constraint */ }](any /* ERROR constraint */ ) {
+ var _ any /* ERROR constraint */
}
func identity[T any](x T) T { return x }
@@ -52,22 +52,22 @@ func swapswap[A, B any](a A, b B) (A, B) {
type F[A, B any] func(A, B) (B, A)
-func min[T interface{ type int }](x, y T) T {
+func min[T interface{ ~int }](x, y T) T {
if x < y {
return x
}
return y
}
-func _[T interface{type int, float32}](x, y T) bool { return x < y }
+func _[T interface{~int | ~float32}](x, y T) bool { return x < y }
func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
-func _[T interface{type int, float32, bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{~int | ~float32 | ~bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C2[T]](x, y T) bool { return x < y }
type C1[T any] interface{}
-type C2[T any] interface{ type int, float32 }
+type C2[T any] interface{ ~int | ~float32 }
func new[T any]() *T {
var x T
@@ -79,11 +79,11 @@ var _ *int = new[int]()
func _[T any](map[T /* ERROR incomparable map key type T \(missing comparable constraint\) */]int) // w/o constraint we don't know if T is comparable
-func f1[T1 any](struct{T1}) int
+func f1[T1 any](struct{T1 /* ERROR cannot be a .* type parameter */ }) int
var _ = f1[int](struct{T1}{})
type T1 = int
-func f2[t1 any](struct{t1; x float32}) int
+func f2[t1 any](struct{t1 /* ERROR cannot be a .* type parameter */ ; x float32}) int
var _ = f2[t1](struct{t1; x float32}{})
type t1 = int
@@ -95,48 +95,53 @@ var _ = f3[int, rune, bool](1, struct{x rune}{}, nil)
// indexing
func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type [10]int, *[20]int, map[int]int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type string, []byte }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int, [1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string, []rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-
-// indexing with various combinations of map types in type lists (see issue #42616)
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = x[i] }
-func _[T interface{ type []E }, E any](x T, i int) { _ = &x[i] }
-func _[T interface{ type map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
-func _[T interface{ type []E, map[int]E, map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
-func _[T interface{ type []E, map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[int]int }] (x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+
+// indexing with various combinations of map types in type sets (see issue #42616)
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+func _[T interface{ ~[]E }, E any](x T, i int) { _ = &x[i] }
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
+func _[T interface{ ~map[int]E | ~map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
+func _[T interface{ ~[]E | ~map[string]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+
+// indexing with various combinations of array and other types in type sets
+func _[T interface{ [10]int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
+func _[T interface{ [10]byte | string }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
+func _[T interface{ [10]int | *[20]int | []int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
// slicing
// TODO(gri) implement this
-func _[T interface{ type string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR generic slice expressions not yet implemented */ [i:j:k] }
// len/cap built-ins
func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = len(x) }
-func _[T interface{ type [10]int }](x T) { _ = len(x) }
-func _[T interface{ type []byte }](x T) { _ = len(x) }
-func _[T interface{ type map[int]int }](x T) { _ = len(x) }
-func _[T interface{ type chan int }](x T) { _ = len(x) }
-func _[T interface{ type string, []byte, chan int }](x T) { _ = len(x) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type [10]int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte }](x T) { _ = cap(x) }
-func _[T interface{ type map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type chan int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte, chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
// range iteration
@@ -144,7 +149,7 @@ func _[T interface{}](x T) {
for range x /* ERROR cannot range */ {}
}
-func _[T interface{ type string, []string }](x T) {
+func _[T interface{ ~string | ~[]string }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i }
@@ -156,23 +161,23 @@ func _[T interface{ type string, []string }](x T) {
}
-func _[T interface{ type string, []rune, map[int]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[int]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x { _ = i; _ = e }
}
-func _[T interface{ type string, []rune, map[string]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[string]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x /* ERROR must have the same key type */ { _ = e }
}
-func _[T interface{ type string, chan int }](x T) {
+func _[T interface{ ~string | ~chan int }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
}
-func _[T interface{ type string, chan<-int }](x T) {
+func _[T interface{ ~string | ~chan<-int }](x T) {
for i := range x /* ERROR send-only channel */ { _ = i }
}
@@ -399,7 +404,7 @@ func _[T any](x T) {
}
}
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
_ = x /* ERROR not an interface */ .(int)
switch x /* ERROR not an interface */ .(type) {
}
diff --git a/src/go/types/testdata/examples/constraints.go2 b/src/go/types/testdata/examples/constraints.go2
new file mode 100644
index 0000000000..28aa19bb12
--- /dev/null
+++ b/src/go/types/testdata/examples/constraints.go2
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic constraint interfaces.
+
+package p
+
+type (
+ // Type lists are processed as unions but an error is reported.
+ // TODO(gri) remove this once the parser doesn't accept type lists anymore.
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ }
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ type float32
+ }
+)
+
+type (
+ // Arbitrary types may be embedded like interfaces.
+ _ interface{int}
+ _ interface{~int}
+
+ // Types may be combined into a union.
+ _ interface{int|~string}
+
+ // Union terms must be unique independent of whether they are ~ or not.
+ _ interface{int|int /* ERROR duplicate term int */ }
+ _ interface{int|~ /* ERROR duplicate term int */ int }
+ _ interface{~int|~ /* ERROR duplicate term int */ int }
+
+ // For now we do not permit interfaces with methods in unions.
+ _ interface{~ /* ERROR invalid use of ~ */ interface{}}
+ _ interface{int|interface /* ERROR cannot use .* in union */ { m() }}
+)
+
+type (
+ // Tilde is not permitted on defined types or interfaces.
+ foo int
+ bar interface{}
+ _ interface{foo}
+ _ interface{~ /* ERROR invalid use of ~ */ foo }
+ _ interface{~ /* ERROR invalid use of ~ */ bar }
+)
+
+// Multiple embedded union elements are intersected. The order in which they
+// appear in the interface doesn't matter since intersection is a symmetric
+// operation.
+
+type myInt1 int
+type myInt2 int
+
+func _[T interface{ myInt1|myInt2; ~int }]() T { return T(0) }
+func _[T interface{ ~int; myInt1|myInt2 }]() T { return T(0) }
+
+// Here the intersections are empty - there's no type that's in the type set of T.
+func _[T interface{ myInt1|myInt2; int }]() T { return T(0 /* ERROR cannot convert */ ) }
+func _[T interface{ int; myInt1|myInt2 }]() T { return T(0 /* ERROR cannot convert */ ) }
diff --git a/src/go/types/testdata/examples/functions.go2 b/src/go/types/testdata/examples/functions.go2
index a053471202..81b9d3456c 100644
--- a/src/go/types/testdata/examples/functions.go2
+++ b/src/go/types/testdata/examples/functions.go2
@@ -98,7 +98,7 @@ func g2b[P, Q any](x P, y Q) {
// Here's an example of a recursive function call with variadic
// arguments and type inference inferring the type parameter of
// the caller (i.e., itself).
-func max[T interface{ type int }](x ...T) T {
+func max[T interface{ ~int }](x ...T) T {
var x0 T
if len(x) > 0 {
x0 = x[0]
diff --git a/src/go/types/testdata/examples/inference.go2 b/src/go/types/testdata/examples/inference.go2
index b4f3369aa0..1142e569b4 100644
--- a/src/go/types/testdata/examples/inference.go2
+++ b/src/go/types/testdata/examples/inference.go2
@@ -7,7 +7,7 @@
package p
type Ordered interface {
- type int, float64, string
+ ~int|~float64|~string
}
func min[T Ordered](x, y T) T
@@ -54,7 +54,7 @@ func _() {
mixed[int, string](1.1 /* ERROR cannot use 1.1 */ , "", false)
}
-func related1[Slice interface{type []Elem}, Elem any](s Slice, e Elem)
+func related1[Slice interface{~[]Elem}, Elem any](s Slice, e Elem)
func _() {
// related1 can be called with explicit instantiation.
@@ -78,7 +78,7 @@ func _() {
related1(si, "foo" /* ERROR cannot use "foo" */ )
}
-func related2[Elem any, Slice interface{type []Elem}](e Elem, s Slice)
+func related2[Elem any, Slice interface{~[]Elem}](e Elem, s Slice)
func _() {
// related2 can be called with explicit instantiation.
diff --git a/src/go/types/testdata/examples/methods.go2 b/src/go/types/testdata/examples/methods.go2
index 76c6539e1b..4e87041e54 100644
--- a/src/go/types/testdata/examples/methods.go2
+++ b/src/go/types/testdata/examples/methods.go2
@@ -6,6 +6,8 @@
package p
+import "unsafe"
+
// Parameterized types may have methods.
type T1[A any] struct{ a A }
@@ -94,3 +96,18 @@ func (_ T2[_, _, _]) _() int { return 42 }
type T0 struct{}
func (T0) _() {}
func (T1[A]) _() {}
+
+// A generic receiver type may constrain its type parameter such
+// that it must be a pointer type. Such receiver types are not
+// permitted.
+type T3a[P interface{ ~int | ~string | ~float64 }] P
+
+func (T3a[_]) m() {} // this is ok
+
+type T3b[P interface{ ~unsafe.Pointer }] P
+
+func (T3b /* ERROR invalid receiver */ [_]) m() {}
+
+type T3c[P interface{ *int | *string }] P
+
+func (T3c /* ERROR invalid receiver */ [_]) m() {}
diff --git a/src/go/types/testdata/examples/operations.go2 b/src/go/types/testdata/examples/operations.go2
new file mode 100644
index 0000000000..18e4d6080c
--- /dev/null
+++ b/src/go/types/testdata/examples/operations.go2
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// indirection
+
+func _[P any](p P) {
+ _ = *p // ERROR cannot indirect p
+}
+
+func _[P interface{ int }](p P) {
+ _ = *p // ERROR cannot indirect p
+}
+
+func _[P interface{ *int }](p P) {
+ _ = *p
+}
+
+func _[P interface{ *int | *string }](p P) {
+ _ = *p // ERROR must have identical base types
+}
+
+type intPtr *int
+
+func _[P interface{ *int | intPtr } ](p P) {
+ var _ int = *p
+}
diff --git a/src/go/types/testdata/examples/types.go2 b/src/go/types/testdata/examples/types.go2
index 59c8804ad2..a7544f79ea 100644
--- a/src/go/types/testdata/examples/types.go2
+++ b/src/go/types/testdata/examples/types.go2
@@ -161,30 +161,40 @@ type _ struct {
* /* ERROR List redeclared */ List[int]
}
+// Issue #45639: We don't allow this anymore. Keep this code
+// in case we decide to revisit this decision.
+//
// It's possible to declare local types whose underlying types
// are type parameters. As with ordinary type definitions, the
// types underlying properties are "inherited" but the methods
// are not.
-func _[T interface{ m(); type int }]() {
- type L T
- var x L
-
- // m is not defined on L (it is not "inherited" from
- // its underlying type).
- x.m /* ERROR x.m undefined */ ()
-
- // But the properties of T, such that as that it supports
- // the operations of the types given by its type bound,
- // are also the properties of L.
- x++
- _ = x - x
-
- // On the other hand, if we define a local alias for T,
- // that alias stands for T as expected.
- type A = T
- var y A
- y.m()
- _ = y < 0
+//func _[T interface{ m(); ~int }]() {
+// type L T
+// var x L
+//
+// // m is not defined on L (it is not "inherited" from
+// // its underlying type).
+// x.m /* ERROR x.m undefined */ ()
+//
+// // But the properties of T, such that as that it supports
+// // the operations of the types given by its type bound,
+// // are also the properties of L.
+// x++
+// _ = x - x
+//
+// // On the other hand, if we define a local alias for T,
+// // that alias stands for T as expected.
+// type A = T
+// var y A
+// y.m()
+// _ = y < 0
+//}
+
+// It is not permitted to declare a local type whose underlying
+// type is a type parameters not declared by that type declaration.
+func _[T any]() {
+ type _ T // ERROR cannot use function type parameter T as RHS in type declaration
+ type _ [_ any] T // ERROR cannot use function type parameter T as RHS in type declaration
}
// As a special case, an explicit type argument may be omitted
@@ -238,11 +248,11 @@ func _[A Adder[A], B Adder[B], C Adder[A]]() {
// The type of variables (incl. parameters and return values) cannot
// be an interface with type constraints or be/embed comparable.
type I interface {
- type int
+ ~int
}
var (
- _ interface /* ERROR contains type constraints */ {type int}
+ _ interface /* ERROR contains type constraints */ {~int}
_ I /* ERROR contains type constraints */
)
@@ -273,7 +283,7 @@ func _() {
// (If a type list contains just a single const type, we could
// allow it, but such type lists don't make much sense in the
// first place.)
-func _[T interface { type int, float64 }]() {
+func _[T interface {~int|~float64}]() {
// not valid
const _ = T /* ERROR not constant */ (0)
const _ T /* ERROR invalid constant type T */ = 1
diff --git a/src/go/types/testdata/fixedbugs/issue28251.src b/src/go/types/testdata/fixedbugs/issue28251.src
index cd79e0e8b5..ef5e61df47 100644
--- a/src/go/types/testdata/fixedbugs/issue28251.src
+++ b/src/go/types/testdata/fixedbugs/issue28251.src
@@ -60,6 +60,6 @@ type (
T11 = T
)
-func (T9 /* ERROR invalid receiver \*\*T */ ) m9() {}
+func (T9 /* ERROR invalid receiver type \*\*T */ ) m9() {}
func _() { (T{}).m9 /* ERROR has no field or method m9 */ () }
func _() { (&T{}).m9 /* ERROR has no field or method m9 */ () }
diff --git a/src/go/types/testdata/fixedbugs/issue39634.go2 b/src/go/types/testdata/fixedbugs/issue39634.go2
index a13ed13ce5..aec404e294 100644
--- a/src/go/types/testdata/fixedbugs/issue39634.go2
+++ b/src/go/types/testdata/fixedbugs/issue39634.go2
@@ -31,13 +31,14 @@ type x7[A any] struct{ foo7 }
func main7() { var _ foo7 = x7[int]{} }
// crash 8
-type foo8[A any] interface { type A }
-func bar8[A foo8[A]](a A) {}
-func main8() {}
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// type foo8[A any] interface { ~A }
+// func bar8[A foo8[A]](a A) {}
+// func main8() {}
// crash 9
-type foo9[A any] interface { type foo9 /* ERROR interface contains type constraints */ [A] }
-func _() { var _ = new(foo9 /* ERROR interface contains type constraints */ [int]) }
+type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
+func _() { var _ = new(foo9 /* ERROR illegal cycle */ [int]) }
// crash 12
var u /* ERROR cycle */ , i [func /* ERROR used as value */ /* ERROR used as value */ (u, c /* ERROR undeclared */ /* ERROR undeclared */ ) {}(0, len /* ERROR must be called */ /* ERROR must be called */ )]c /* ERROR undeclared */ /* ERROR undeclared */
diff --git a/src/go/types/testdata/fixedbugs/issue39680.go2 b/src/go/types/testdata/fixedbugs/issue39680.go2
index 9bc26f3546..e56bc35475 100644
--- a/src/go/types/testdata/fixedbugs/issue39680.go2
+++ b/src/go/types/testdata/fixedbugs/issue39680.go2
@@ -4,16 +4,19 @@
package p
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+
+/*
import "fmt"
// Minimal test case.
-func _[T interface{type T}](x T) T{
+func _[T interface{~T}](x T) T{
return x
}
// Test case from issue.
type constr[T any] interface {
- type T
+ ~T
}
func Print[T constr[T]](s []T) {
@@ -25,3 +28,4 @@ func Print[T constr[T]](s []T) {
func f() {
Print([]string{"Hello, ", "playground\n"})
}
+*/
diff --git a/src/go/types/testdata/fixedbugs/issue39693.go2 b/src/go/types/testdata/fixedbugs/issue39693.go2
index 316ab1982e..ec7641902a 100644
--- a/src/go/types/testdata/fixedbugs/issue39693.go2
+++ b/src/go/types/testdata/fixedbugs/issue39693.go2
@@ -4,11 +4,20 @@
package p
-type Number interface {
- int /* ERROR int is not an interface */
- float64 /* ERROR float64 is not an interface */
+type Number1 interface {
+ // embedding non-interface types is permitted
+ int
+ float64
}
-func Add[T Number](a, b T) T {
+func Add[T Number1](a, b T) T {
return a /* ERROR not defined */ + b
}
+
+type Number2 interface {
+ int|float64
+}
+
+func Add2[T Number2](a, b T) T {
+ return a + b
+}
diff --git a/src/go/types/testdata/fixedbugs/issue39699.go2 b/src/go/types/testdata/fixedbugs/issue39699.go2
index 75491e7e26..72f83997c2 100644
--- a/src/go/types/testdata/fixedbugs/issue39699.go2
+++ b/src/go/types/testdata/fixedbugs/issue39699.go2
@@ -8,7 +8,7 @@ type T0 interface{
}
type T1 interface{
- type int
+ ~int
}
type T2 interface{
diff --git a/src/go/types/testdata/fixedbugs/issue39711.go2 b/src/go/types/testdata/fixedbugs/issue39711.go2
index df621a4c17..cf1f90545f 100644
--- a/src/go/types/testdata/fixedbugs/issue39711.go2
+++ b/src/go/types/testdata/fixedbugs/issue39711.go2
@@ -7,5 +7,7 @@ package p
// Do not report a duplicate type error for this type list.
// (Check types after interfaces have been completed.)
type _ interface {
- type interface{ Error() string }, interface{ String() string }
+ // TODO(rfindley) Once we have full type sets we can enable this again.
+ // Fow now we don't permit interfaces in type lists.
+ // type interface{ Error() string }, interface{ String() string }
}
diff --git a/src/go/types/testdata/fixedbugs/issue39723.go2 b/src/go/types/testdata/fixedbugs/issue39723.go2
index 55464e6b77..367b3f1360 100644
--- a/src/go/types/testdata/fixedbugs/issue39723.go2
+++ b/src/go/types/testdata/fixedbugs/issue39723.go2
@@ -6,4 +6,4 @@ package p
// A constraint must be an interface; it cannot
// be a type parameter, for instance.
-func _[A interface{ type interface{} }, B A /* ERROR not an interface */ ]()
+func _[A interface{ ~int }, B A /* ERROR not an interface */ ]()
diff --git a/src/go/types/testdata/fixedbugs/issue39755.go2 b/src/go/types/testdata/fixedbugs/issue39755.go2
index b7ab68818e..257b73a2fb 100644
--- a/src/go/types/testdata/fixedbugs/issue39755.go2
+++ b/src/go/types/testdata/fixedbugs/issue39755.go2
@@ -4,14 +4,14 @@
package p
-func _[T interface{type map[string]int}](x T) {
+func _[T interface{~map[string]int}](x T) {
_ = x == nil
}
// simplified test case from issue
type PathParamsConstraint interface {
- type map[string]string, []struct{key, value string}
+ ~map[string]string | ~[]struct{key, value string}
}
type PathParams[T PathParamsConstraint] struct {
diff --git a/src/go/types/testdata/fixedbugs/issue39938.go2 b/src/go/types/testdata/fixedbugs/issue39938.go2
index 76e7e369ca..0da6e103fd 100644
--- a/src/go/types/testdata/fixedbugs/issue39938.go2
+++ b/src/go/types/testdata/fixedbugs/issue39938.go2
@@ -8,8 +8,8 @@ package p
type E0[P any] P
type E1[P any] *P
-type E2[P any] struct{ P }
-type E3[P any] struct{ *P }
+type E2[P any] struct{ _ P }
+type E3[P any] struct{ _ *P }
type T0 /* ERROR illegal cycle */ struct {
_ E0[T0]
diff --git a/src/go/types/testdata/fixedbugs/issue39948.go2 b/src/go/types/testdata/fixedbugs/issue39948.go2
index c2b460902c..e38e57268d 100644
--- a/src/go/types/testdata/fixedbugs/issue39948.go2
+++ b/src/go/types/testdata/fixedbugs/issue39948.go2
@@ -5,5 +5,5 @@
package p
type T[P any] interface{
- P // ERROR P is a type parameter, not an interface
+ P // ERROR cannot embed a type parameter
}
diff --git a/src/go/types/testdata/fixedbugs/issue40301.go2 b/src/go/types/testdata/fixedbugs/issue40301.go2
index 5d97855f8a..c78f9a1fa0 100644
--- a/src/go/types/testdata/fixedbugs/issue40301.go2
+++ b/src/go/types/testdata/fixedbugs/issue40301.go2
@@ -7,6 +7,6 @@ package p
import "unsafe"
func _[T any](x T) {
- _ = unsafe /* ERROR undefined */ .Alignof(x)
- _ = unsafe /* ERROR undefined */ .Sizeof(x)
+ _ = unsafe.Alignof(x)
+ _ = unsafe.Sizeof(x)
}
diff --git a/src/go/types/testdata/fixedbugs/issue40789.go2 b/src/go/types/testdata/fixedbugs/issue40789.go2
new file mode 100644
index 0000000000..9eea4ad60a
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue40789.go2
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+func main() {
+ m := map[string]int{
+ "a": 6,
+ "b": 7,
+ }
+ fmt.Println(copyMap[map[string]int, string, int](m))
+}
+
+type Map[K comparable, V any] interface {
+ map[K] V
+}
+
+func copyMap[M Map[K, V], K comparable, V any](m M) M {
+ m1 := make(M)
+ for k, v := range m {
+ m1[k] = v
+ }
+ return m1
+}
+
+// simpler test case from the same issue
+
+type A[X comparable] interface {
+ []X
+}
+
+func f[B A[X], X comparable]() B {
+ return nil
+}
diff --git a/src/go/types/testdata/fixedbugs/issue41124.go2 b/src/go/types/testdata/fixedbugs/issue41124.go2
index 61f766bcbd..ab535049dd 100644
--- a/src/go/types/testdata/fixedbugs/issue41124.go2
+++ b/src/go/types/testdata/fixedbugs/issue41124.go2
@@ -7,7 +7,7 @@ package p
// Test case from issue.
type Nat interface {
- type Zero, Succ
+ Zero|Succ
}
type Zero struct{}
@@ -22,7 +22,7 @@ type I1 interface {
}
type I2 interface {
- type int
+ ~int
}
type I3 interface {
diff --git a/src/go/types/testdata/fixedbugs/issue42758.go2 b/src/go/types/testdata/fixedbugs/issue42758.go2
index 698cb8a16b..bf0031f5d2 100644
--- a/src/go/types/testdata/fixedbugs/issue42758.go2
+++ b/src/go/types/testdata/fixedbugs/issue42758.go2
@@ -17,7 +17,7 @@ func _[T any](x interface{}){
}
type constraint interface {
- type int
+ ~int
}
func _[T constraint](x interface{}){
diff --git a/src/go/types/testdata/fixedbugs/issue43671.go2 b/src/go/types/testdata/fixedbugs/issue43671.go2
new file mode 100644
index 0000000000..6cc3801cc9
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue43671.go2
@@ -0,0 +1,58 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | <-chan T }
+
+func _[T any](ch T) {
+ <-ch // ERROR cannot receive from non-channel
+}
+
+func _[T C0](ch T) {
+ <-ch // ERROR cannot receive from non-channel
+}
+
+func _[T C1](ch T) {
+ <-ch
+}
+
+func _[T C2](ch T) {
+ <-ch
+}
+
+func _[T C3](ch T) {
+ <-ch // ERROR channels of ch .* must have the same element type
+}
+
+func _[T C4](ch T) {
+ <-ch // ERROR cannot receive from send-only channel
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ x = <-ch
+}
+
+// test case from issue, slightly modified
+type RecvChan[T any] interface {
+ ~chan T | ~<-chan T
+}
+
+func _[T any, C RecvChan[T]](ch C) T {
+ return <-ch
+}
+
+func f[T any, C interface{ chan T }](ch C) T {
+ return <-ch
+}
+
+func _(ch chan int) {
+ var x int = f(ch) // test constraint type inference for this case
+ _ = x
+}
diff --git a/src/go/types/testdata/fixedbugs/issue45548.go2 b/src/go/types/testdata/fixedbugs/issue45548.go2
index b1e42497e8..b8ba0ad4a7 100644
--- a/src/go/types/testdata/fixedbugs/issue45548.go2
+++ b/src/go/types/testdata/fixedbugs/issue45548.go2
@@ -4,7 +4,7 @@
package p
-func f[F interface{type *Q}, G interface{type *R}, Q, R any](q Q, r R) {}
+func f[F interface{~*Q}, G interface{~*R}, Q, R any](q Q, r R) {}
func _() {
f[*float64, *int](1, 2)
diff --git a/src/go/types/testdata/fixedbugs/issue45635.go2 b/src/go/types/testdata/fixedbugs/issue45635.go2
index 3e2cceca2d..fc50797b17 100644
--- a/src/go/types/testdata/fixedbugs/issue45635.go2
+++ b/src/go/types/testdata/fixedbugs/issue45635.go2
@@ -10,10 +10,10 @@ func main() {
type N[T any] struct{}
-var _ N /* ERROR "0 arguments but 1 type parameters" */ []
+var _ N [] // ERROR expected type argument list
type I interface {
- type map[int]int, []int
+ ~[]int
}
func _[T I](i, j int) {
@@ -27,6 +27,5 @@ func _[T I](i, j int) {
_ = s[i, j /* ERROR "more than one index" */ ]
var t T
- // TODO(rFindley) Fix the duplicate error below.
- _ = t[i, j /* ERROR "more than one index" */ /* ERROR "more than one index" */ ]
+ _ = t[i, j /* ERROR "more than one index" */ ]
}
diff --git a/src/go/types/testdata/fixedbugs/issue45639.go2 b/src/go/types/testdata/fixedbugs/issue45639.go2
new file mode 100644
index 0000000000..441fb4cb34
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue45639.go2
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package P
+
+// It is not permitted to declare a local type whose underlying
+// type is a type parameters not declared by that type declaration.
+func _[T any]() {
+ type _ T // ERROR cannot use function type parameter T as RHS in type declaration
+ type _ [_ any] T // ERROR cannot use function type parameter T as RHS in type declaration
+}
diff --git a/src/go/types/testdata/fixedbugs/issue45985.go2 b/src/go/types/testdata/fixedbugs/issue45985.go2
index 550b9c6712..6e42dbb633 100644
--- a/src/go/types/testdata/fixedbugs/issue45985.go2
+++ b/src/go/types/testdata/fixedbugs/issue45985.go2
@@ -5,7 +5,7 @@
package issue45985
// TODO(rFindley): this error should be on app[int] below.
-func app[S /* ERROR "type S = S does not match" */ interface{ type []T }, T any](s S, e T) S {
+func app[S /* ERROR "type S = S does not match" */ interface{ ~[]T }, T any](s S, e T) S {
return append(s, e)
}
diff --git a/src/go/types/testdata/fixedbugs/issue46090.go2 b/src/go/types/testdata/fixedbugs/issue46090.go2
new file mode 100644
index 0000000000..81b31974c8
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue46090.go2
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The predeclared type comparable is not visible before Go 1.18.
+
+package go1_17
+
+type _ comparable // ERROR undeclared
diff --git a/src/go/types/testdata/fixedbugs/issue46275.go2 b/src/go/types/testdata/fixedbugs/issue46275.go2
new file mode 100644
index 0000000000..0ebde31c8e
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue46275.go2
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
+
diff --git a/src/go/types/testdata/fixedbugs/issue47031.go2 b/src/go/types/testdata/fixedbugs/issue47031.go2
new file mode 100644
index 0000000000..b184f9b5b7
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue47031.go2
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Mer interface { M() }
+
+func F[T Mer](p *T) {
+ p.M /* ERROR p\.M undefined */ ()
+}
+
+type MyMer int
+
+func (MyMer) M() {}
+
+func _() {
+ F(new(MyMer))
+ F[Mer](nil)
+}
diff --git a/src/go/types/testdata/fixedbugs/issue47115.go2 b/src/go/types/testdata/fixedbugs/issue47115.go2
new file mode 100644
index 0000000000..6694219b54
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue47115.go2
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ ch <- /* ERROR cannot send to non-channel */ 0
+}
+
+func _[T C0](ch T) {
+ ch <- /* ERROR cannot send to non-channel */ 0
+}
+
+func _[T C1](ch T) {
+ ch <- 0
+}
+
+func _[T C2](ch T) {
+ ch <-/* ERROR cannot send to receive-only channel */ 0
+}
+
+func _[T C3](ch T) {
+ ch <- /* ERROR channels of ch .* must have the same element type */ 0
+}
+
+func _[T C4](ch T) {
+ ch <- 0
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ ch <- x
+}
diff --git a/src/go/types/testdata/fixedbugs/issue47127.go2 b/src/go/types/testdata/fixedbugs/issue47127.go2
new file mode 100644
index 0000000000..387c946957
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue47127.go2
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Embedding of stand-alone type parameters is not permitted.
+
+package p
+
+type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+)
+
+func _[P any]() {
+ type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+
+ _ interface{ *P | []P | chan P | map[string]P }
+ _ interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+ )
+}
+
+func _[P any, Q interface{ *P | []P | chan P | map[string]P }]()
+func _[P any, Q interface{ P /* ERROR "cannot embed a type parameter" */ }]()
+func _[P any, Q interface{ ~P /* ERROR "cannot embed a type parameter" */ }]()
+func _[P any, Q interface{ int | P /* ERROR "cannot embed a type parameter" */ }]()
+func _[P any, Q interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }]()
diff --git a/src/go/types/testdata/fixedbugs/issue47411.go2 b/src/go/types/testdata/fixedbugs/issue47411.go2
new file mode 100644
index 0000000000..7326205863
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue47411.go2
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]()
+func g[_ interface{interface{comparable; ~int|~string}}]()
+
+func _[P comparable,
+ Q interface{ comparable; ~int|~string },
+ R any, // not comparable
+ S interface{ comparable; ~func() }, // not comparable
+]() {
+ _ = f[int]
+ _ = f[P]
+ _ = f[Q]
+ _ = f[func /* ERROR does not satisfy comparable */ ()]
+ _ = f[R /* ERROR R has no constraints */ ]
+
+ _ = g[int]
+ _ = g[P /* ERROR P has no type constraints */ ]
+ _ = g[Q]
+ _ = g[func /* ERROR does not satisfy comparable */()]
+ _ = g[R /* ERROR R has no constraints */ ]
+}
diff --git a/src/go/types/testdata/manual.go2 b/src/go/types/testdata/manual.go2
new file mode 100644
index 0000000000..25e6f22f94
--- /dev/null
+++ b/src/go/types/testdata/manual.go2
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
diff --git a/src/go/types/tuple.go b/src/go/types/tuple.go
new file mode 100644
index 0000000000..16d28bc9a6
--- /dev/null
+++ b/src/go/types/tuple.go
@@ -0,0 +1,36 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple.
+// Tuples are used as components of signatures and to represent the type of multiple
+// assignments; they are not first class types of Go.
+type Tuple struct {
+ vars []*Var
+}
+
+// NewTuple returns a new tuple for the given variables.
+func NewTuple(x ...*Var) *Tuple {
+ if len(x) > 0 {
+ return &Tuple{vars: x}
+ }
+ // TODO(gri) Don't represent empty tuples with a (*Tuple)(nil) pointer;
+ // it's too subtle and causes problems.
+ return nil
+}
+
+// Len returns the number variables of tuple t.
+func (t *Tuple) Len() int {
+ if t != nil {
+ return len(t.vars)
+ }
+ return 0
+}
+
+// At returns the i'th variable of tuple t.
+func (t *Tuple) At(i int) *Var { return t.vars[i] }
+
+func (t *Tuple) Underlying() Type { return t }
+func (t *Tuple) String() string { return TypeString(t, nil) }
diff --git a/src/go/types/type.go b/src/go/types/type.go
index 2660ce4408..5819dd290c 100644
--- a/src/go/types/type.go
+++ b/src/go/types/type.go
@@ -4,12 +4,6 @@
package types
-import (
- "fmt"
- "go/token"
- "sync/atomic"
-)
-
// A Type represents a type of Go.
// All types implement the Type interface.
type Type interface {
@@ -22,753 +16,31 @@ type Type interface {
String() string
}
-// BasicKind describes the kind of basic type.
-type BasicKind int
-
-const (
- Invalid BasicKind = iota // type is invalid
-
- // predeclared types
- Bool
- Int
- Int8
- Int16
- Int32
- Int64
- Uint
- Uint8
- Uint16
- Uint32
- Uint64
- Uintptr
- Float32
- Float64
- Complex64
- Complex128
- String
- UnsafePointer
-
- // types for untyped values
- UntypedBool
- UntypedInt
- UntypedRune
- UntypedFloat
- UntypedComplex
- UntypedString
- UntypedNil
-
- // aliases
- Byte = Uint8
- Rune = Int32
-)
-
-// BasicInfo is a set of flags describing properties of a basic type.
-type BasicInfo int
-
-// Properties of basic types.
-const (
- IsBoolean BasicInfo = 1 << iota
- IsInteger
- IsUnsigned
- IsFloat
- IsComplex
- IsString
- IsUntyped
-
- IsOrdered = IsInteger | IsFloat | IsString
- IsNumeric = IsInteger | IsFloat | IsComplex
- IsConstType = IsBoolean | IsNumeric | IsString
-)
-
-// A Basic represents a basic type.
-type Basic struct {
- kind BasicKind
- info BasicInfo
- name string
-}
-
-// Kind returns the kind of basic type b.
-func (b *Basic) Kind() BasicKind { return b.kind }
-
-// Info returns information about properties of basic type b.
-func (b *Basic) Info() BasicInfo { return b.info }
-
-// Name returns the name of basic type b.
-func (b *Basic) Name() string { return b.name }
-
-// An Array represents an array type.
-type Array struct {
- len int64
- elem Type
-}
-
-// NewArray returns a new array type for the given element type and length.
-// A negative length indicates an unknown length.
-func NewArray(elem Type, len int64) *Array { return &Array{len: len, elem: elem} }
-
-// Len returns the length of array a.
-// A negative result indicates an unknown length.
-func (a *Array) Len() int64 { return a.len }
-
-// Elem returns element type of array a.
-func (a *Array) Elem() Type { return a.elem }
-
-// A Slice represents a slice type.
-type Slice struct {
- elem Type
-}
-
-// NewSlice returns a new slice type for the given element type.
-func NewSlice(elem Type) *Slice { return &Slice{elem: elem} }
-
-// Elem returns the element type of slice s.
-func (s *Slice) Elem() Type { return s.elem }
-
-// A Struct represents a struct type.
-type Struct struct {
- fields []*Var
- tags []string // field tags; nil if there are no tags
-}
-
-// NewStruct returns a new struct with the given fields and corresponding field tags.
-// If a field with index i has a tag, tags[i] must be that tag, but len(tags) may be
-// only as long as required to hold the tag with the largest index i. Consequently,
-// if no field has a tag, tags may be nil.
-func NewStruct(fields []*Var, tags []string) *Struct {
- var fset objset
- for _, f := range fields {
- if f.name != "_" && fset.insert(f) != nil {
- panic("multiple fields with the same name")
- }
- }
- if len(tags) > len(fields) {
- panic("more tags than fields")
- }
- return &Struct{fields: fields, tags: tags}
-}
-
-// NumFields returns the number of fields in the struct (including blank and embedded fields).
-func (s *Struct) NumFields() int { return len(s.fields) }
-
-// Field returns the i'th field for 0 <= i < NumFields().
-func (s *Struct) Field(i int) *Var { return s.fields[i] }
-
-// Tag returns the i'th field tag for 0 <= i < NumFields().
-func (s *Struct) Tag(i int) string {
- if i < len(s.tags) {
- return s.tags[i]
- }
- return ""
-}
-
-// A Pointer represents a pointer type.
-type Pointer struct {
- base Type // element type
-}
-
-// NewPointer returns a new pointer type for the given element (base) type.
-func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} }
-
-// Elem returns the element type for the given pointer p.
-func (p *Pointer) Elem() Type { return p.base }
-
-// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple.
-// Tuples are used as components of signatures and to represent the type of multiple
-// assignments; they are not first class types of Go.
-type Tuple struct {
- vars []*Var
-}
-
-// NewTuple returns a new tuple for the given variables.
-func NewTuple(x ...*Var) *Tuple {
- if len(x) > 0 {
- return &Tuple{vars: x}
- }
- // TODO(gri) Don't represent empty tuples with a (*Tuple)(nil) pointer;
- // it's too subtle and causes problems.
- return nil
-}
-
-// Len returns the number variables of tuple t.
-func (t *Tuple) Len() int {
- if t != nil {
- return len(t.vars)
- }
- return 0
-}
-
-// At returns the i'th variable of tuple t.
-func (t *Tuple) At(i int) *Var { return t.vars[i] }
-
-// A Signature represents a (non-builtin) function or method type.
-// The receiver is ignored when comparing signatures for identity.
-type Signature struct {
- // We need to keep the scope in Signature (rather than passing it around
- // and store it in the Func Object) because when type-checking a function
- // literal we call the general type checker which returns a general Type.
- // We then unpack the *Signature and use the scope for the literal body.
- rparams []*TypeName // receiver type parameters from left to right, or nil
- tparams []*TypeName // type parameters from left to right, or nil
- scope *Scope // function scope, present for package-local signatures
- recv *Var // nil if not a method
- params *Tuple // (incoming) parameters from left to right; or nil
- results *Tuple // (outgoing) results from left to right; or nil
- variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
-}
-
-// NewSignature returns a new function type for the given receiver, parameters,
-// and results, either of which may be nil. If variadic is set, the function
-// is variadic, it must have at least one parameter, and the last parameter
-// must be of unnamed slice type.
-func NewSignature(recv *Var, params, results *Tuple, variadic bool) *Signature {
- if variadic {
- n := params.Len()
- if n == 0 {
- panic("types.NewSignature: variadic function must have at least one parameter")
- }
- if _, ok := params.At(n - 1).typ.(*Slice); !ok {
- panic("types.NewSignature: variadic parameter must be of unnamed slice type")
- }
- }
- return &Signature{recv: recv, params: params, results: results, variadic: variadic}
-}
-
-// Recv returns the receiver of signature s (if a method), or nil if a
-// function. It is ignored when comparing signatures for identity.
-//
-// For an abstract method, Recv returns the enclosing interface either
-// as a *Named or an *Interface. Due to embedding, an interface may
-// contain methods whose receiver type is a different interface.
-func (s *Signature) Recv() *Var { return s.recv }
-
-// _TParams returns the type parameters of signature s, or nil.
-func (s *Signature) _TParams() []*TypeName { return s.tparams }
-
-// _SetTParams sets the type parameters of signature s.
-func (s *Signature) _SetTParams(tparams []*TypeName) { s.tparams = tparams }
-
-// Params returns the parameters of signature s, or nil.
-func (s *Signature) Params() *Tuple { return s.params }
-
-// Results returns the results of signature s, or nil.
-func (s *Signature) Results() *Tuple { return s.results }
-
-// Variadic reports whether the signature s is variadic.
-func (s *Signature) Variadic() bool { return s.variadic }
-
-// A _Sum represents a set of possible types.
-// Sums are currently used to represent type lists of interfaces
-// and thus the underlying types of type parameters; they are not
-// first class types of Go.
-type _Sum struct {
- types []Type // types are unique
-}
-
-// _NewSum returns a new Sum type consisting of the provided
-// types if there are more than one. If there is exactly one
-// type, it returns that type. If the list of types is empty
-// the result is nil.
-func _NewSum(types []Type) Type {
- if len(types) == 0 {
- return nil
- }
-
- // What should happen if types contains a sum type?
- // Do we flatten the types list? For now we check
- // and panic. This should not be possible for the
- // current use case of type lists.
- // TODO(gri) Come up with the rules for sum types.
- for _, t := range types {
- if _, ok := t.(*_Sum); ok {
- panic("sum type contains sum type - unimplemented")
- }
- }
-
- if len(types) == 1 {
- return types[0]
- }
- return &_Sum{types: types}
-}
-
-// is reports whether all types in t satisfy pred.
-func (s *_Sum) is(pred func(Type) bool) bool {
- if s == nil {
- return false
- }
- for _, t := range s.types {
- if !pred(t) {
- return false
- }
- }
- return true
-}
-
-// An Interface represents an interface type.
-type Interface struct {
- methods []*Func // ordered list of explicitly declared methods
- types Type // (possibly a Sum) type declared with a type list (TODO(gri) need better field name)
- embeddeds []Type // ordered list of explicitly embedded types
-
- allMethods []*Func // ordered list of methods declared with or embedded in this interface (TODO(gri): replace with mset)
- allTypes Type // intersection of all embedded and locally declared types (TODO(gri) need better field name)
-
- obj Object // type declaration defining this interface; or nil (for better error messages)
-}
-
-// unpack unpacks a type into a list of types.
-// TODO(gri) Try to eliminate the need for this function.
-func unpackType(typ Type) []Type {
- if typ == nil {
- return nil
- }
- if sum := asSum(typ); sum != nil {
- return sum.types
- }
- return []Type{typ}
-}
-
-// is reports whether interface t represents types that all satisfy pred.
-func (t *Interface) is(pred func(Type) bool) bool {
- if t.allTypes == nil {
- return false // we must have at least one type! (was bug)
- }
- for _, t := range unpackType(t.allTypes) {
- if !pred(t) {
- return false
- }
- }
- return true
-}
-
-// emptyInterface represents the empty (completed) interface
-var emptyInterface = Interface{allMethods: markComplete}
-
-// markComplete is used to mark an empty interface as completely
-// set up by setting the allMethods field to a non-nil empty slice.
-var markComplete = make([]*Func, 0)
-
-// NewInterface returns a new (incomplete) interface for the given methods and embedded types.
-// Each embedded type must have an underlying type of interface type.
-// NewInterface takes ownership of the provided methods and may modify their types by setting
-// missing receivers. To compute the method set of the interface, Complete must be called.
-//
-// Deprecated: Use NewInterfaceType instead which allows any (even non-defined) interface types
-// to be embedded. This is necessary for interfaces that embed alias type names referring to
-// non-defined (literal) interface types.
-func NewInterface(methods []*Func, embeddeds []*Named) *Interface {
- tnames := make([]Type, len(embeddeds))
- for i, t := range embeddeds {
- tnames[i] = t
- }
- return NewInterfaceType(methods, tnames)
-}
-
-// NewInterfaceType returns a new (incomplete) interface for the given methods and embedded types.
-// Each embedded type must have an underlying type of interface type (this property is not
-// verified for defined types, which may be in the process of being set up and which don't
-// have a valid underlying type yet).
-// NewInterfaceType takes ownership of the provided methods and may modify their types by setting
-// missing receivers. To compute the method set of the interface, Complete must be called.
-func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
- if len(methods) == 0 && len(embeddeds) == 0 {
- return &emptyInterface
- }
-
- // set method receivers if necessary
- typ := new(Interface)
- for _, m := range methods {
- if sig := m.typ.(*Signature); sig.recv == nil {
- sig.recv = NewVar(m.pos, m.pkg, "", typ)
- }
- }
-
- // All embedded types should be interfaces; however, defined types
- // may not yet be fully resolved. Only verify that non-defined types
- // are interfaces. This matches the behavior of the code before the
- // fix for #25301 (issue #25596).
- for _, t := range embeddeds {
- if _, ok := t.(*Named); !ok && !IsInterface(t) {
- panic("embedded type is not an interface")
- }
- }
-
- // sort for API stability
- sortMethods(methods)
- sortTypes(embeddeds)
-
- typ.methods = methods
- typ.embeddeds = embeddeds
- return typ
-}
-
-// NumExplicitMethods returns the number of explicitly declared methods of interface t.
-func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
-
-// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
-// The methods are ordered by their unique Id.
-func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
-
-// NumEmbeddeds returns the number of embedded types in interface t.
-func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
-
-// Embedded returns the i'th embedded defined (*Named) type of interface t for 0 <= i < t.NumEmbeddeds().
-// The result is nil if the i'th embedded type is not a defined type.
-//
-// Deprecated: Use EmbeddedType which is not restricted to defined (*Named) types.
-func (t *Interface) Embedded(i int) *Named { tname, _ := t.embeddeds[i].(*Named); return tname }
-
-// EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
-func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
-
-// NumMethods returns the total number of methods of interface t.
-// The interface must have been completed.
-func (t *Interface) NumMethods() int { t.assertCompleteness(); return len(t.allMethods) }
-
-func (t *Interface) assertCompleteness() {
- if t.allMethods == nil {
- panic("interface is incomplete")
- }
-}
-
-// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
-// The methods are ordered by their unique Id.
-// The interface must have been completed.
-func (t *Interface) Method(i int) *Func { t.assertCompleteness(); return t.allMethods[i] }
-
-// Empty reports whether t is the empty interface.
-func (t *Interface) Empty() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- // A non-nil allTypes may still be empty and represents the bottom type.
- return len(t.allMethods) == 0 && t.allTypes == nil
- }
- return !t.iterate(func(t *Interface) bool {
- return len(t.methods) > 0 || t.types != nil
- }, nil)
-}
-
-// _HasTypeList reports whether interface t has a type list, possibly from an embedded type.
-func (t *Interface) _HasTypeList() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- return t.allTypes != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- return t.types != nil
- }, nil)
-}
-
-// _IsComparable reports whether interface t is or embeds the predeclared interface "comparable".
-func (t *Interface) _IsComparable() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
-}
-
-// _IsConstraint reports t.HasTypeList() || t.IsComparable().
-func (t *Interface) _IsConstraint() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- if t.allTypes != nil {
- return true
- }
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- if t.types != nil {
- return true
- }
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
-}
-
-// iterate calls f with t and then with any embedded interface of t, recursively, until f returns true.
-// iterate reports whether any call to f returned true.
-func (t *Interface) iterate(f func(*Interface) bool, seen map[*Interface]bool) bool {
- if f(t) {
- return true
- }
- for _, e := range t.embeddeds {
- // e should be an interface but be careful (it may be invalid)
- if e := asInterface(e); e != nil {
- // Cyclic interfaces such as "type E interface { E }" are not permitted
- // but they are still constructed and we need to detect such cycles.
- if seen[e] {
- continue
- }
- if seen == nil {
- seen = make(map[*Interface]bool)
- }
- seen[e] = true
- if e.iterate(f, seen) {
- return true
- }
- }
- }
- return false
-}
-
-// isSatisfiedBy reports whether interface t's type list is satisfied by the type typ.
-// If the type list is empty (absent), typ trivially satisfies the interface.
-// TODO(gri) This is not a great name. Eventually, we should have a more comprehensive
-// "implements" predicate.
-func (t *Interface) isSatisfiedBy(typ Type) bool {
- t.Complete()
- if t.allTypes == nil {
- return true
- }
- types := unpackType(t.allTypes)
- return includes(types, typ) || includes(types, under(typ))
-}
-
-// Complete computes the interface's method set. It must be called by users of
-// NewInterfaceType and NewInterface after the interface's embedded types are
-// fully defined and before using the interface type in any way other than to
-// form other types. The interface must not contain duplicate methods or a
-// panic occurs. Complete returns the receiver.
-func (t *Interface) Complete() *Interface {
- // TODO(gri) consolidate this method with Checker.completeInterface
- if t.allMethods != nil {
- return t
- }
-
- t.allMethods = markComplete // avoid infinite recursion
-
- var todo []*Func
- var methods []*Func
- var seen objset
- addMethod := func(m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- case explicit:
- panic("duplicate method " + m.name)
- default:
- // check method signatures after all locally embedded interfaces are computed
- todo = append(todo, m, other.(*Func))
- }
- }
-
- for _, m := range t.methods {
- addMethod(m, true)
- }
-
- allTypes := t.types
+// top represents the top of the type lattice.
+// It is the underlying type of a type parameter that
+// can be satisfied by any type (ignoring methods),
+// because its type constraint contains no restrictions
+// besides methods.
+type top struct{}
- for _, typ := range t.embeddeds {
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- panic(fmt.Sprintf("%s is not an interface", typ))
- }
- continue
- }
- etyp.Complete()
- for _, m := range etyp.allMethods {
- addMethod(m, false)
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
+// theTop is the singleton top type.
+var theTop = &top{}
- for i := 0; i < len(todo); i += 2 {
- m := todo[i]
- other := todo[i+1]
- if !Identical(m.typ, other.typ) {
- panic("duplicate method " + m.name)
- }
- }
+func (t *top) Underlying() Type { return t }
+func (t *top) String() string { return TypeString(t, nil) }
- if methods != nil {
- sortMethods(methods)
- t.allMethods = methods
+// under returns the true expanded underlying type.
+// If it doesn't exist, the result is Typ[Invalid].
+// under must only be called when a type is known
+// to be fully set up.
+func under(t Type) Type {
+ // TODO(gri) is this correct for *Union?
+ if n := asNamed(t); n != nil {
+ return n.under()
}
- t.allTypes = allTypes
-
return t
}
-// A Map represents a map type.
-type Map struct {
- key, elem Type
-}
-
-// NewMap returns a new map for the given key and element types.
-func NewMap(key, elem Type) *Map {
- return &Map{key: key, elem: elem}
-}
-
-// Key returns the key type of map m.
-func (m *Map) Key() Type { return m.key }
-
-// Elem returns the element type of map m.
-func (m *Map) Elem() Type { return m.elem }
-
-// A Chan represents a channel type.
-type Chan struct {
- dir ChanDir
- elem Type
-}
-
-// A ChanDir value indicates a channel direction.
-type ChanDir int
-
-// The direction of a channel is indicated by one of these constants.
-const (
- SendRecv ChanDir = iota
- SendOnly
- RecvOnly
-)
-
-// NewChan returns a new channel type for the given direction and element type.
-func NewChan(dir ChanDir, elem Type) *Chan {
- return &Chan{dir: dir, elem: elem}
-}
-
-// Dir returns the direction of channel c.
-func (c *Chan) Dir() ChanDir { return c.dir }
-
-// Elem returns the element type of channel c.
-func (c *Chan) Elem() Type { return c.elem }
-
-// A Named represents a named (defined) type.
-type Named struct {
- check *Checker // for Named.under implementation; nilled once under has been called
- info typeInfo // for cycle detection
- obj *TypeName // corresponding declared object
- orig Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
- underlying Type // possibly a *Named during setup; never a *Named once set up completely
- tparams []*TypeName // type parameters, or nil
- targs []Type // type arguments (after instantiation), or nil
- methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
-}
-
-// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
-// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
-// The underlying type must not be a *Named.
-func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
- if _, ok := underlying.(*Named); ok {
- panic("types.NewNamed: underlying type must not be *Named")
- }
- return (*Checker)(nil).newNamed(obj, underlying, methods)
-}
-
-func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
- typ := &Named{check: check, obj: obj, orig: underlying, underlying: underlying, methods: methods}
- if obj.typ == nil {
- obj.typ = typ
- }
- // Ensure that typ is always expanded, at which point the check field can be
- // nilled out.
- //
- // Note that currently we cannot nil out check inside typ.under(), because
- // it's possible that typ is expanded multiple times.
- //
- // TODO(rFindley): clean this up so that under is the only function mutating
- // named types.
- if check != nil {
- check.later(func() {
- switch typ.under().(type) {
- case *Named, *instance:
- panic("internal error: unexpanded underlying type")
- }
- typ.check = nil
- })
- }
- return typ
-}
-
-// Obj returns the type name for the named type t.
-func (t *Named) Obj() *TypeName { return t.obj }
-
-// TODO(gri) Come up with a better representation and API to distinguish
-// between parameterized instantiated and non-instantiated types.
-
-// _TParams returns the type parameters of the named type t, or nil.
-// The result is non-nil for an (originally) parameterized type even if it is instantiated.
-func (t *Named) _TParams() []*TypeName { return t.tparams }
-
-// _TArgs returns the type arguments after instantiation of the named type t, or nil if not instantiated.
-func (t *Named) _TArgs() []Type { return t.targs }
-
-// _SetTArgs sets the type arguments of Named.
-func (t *Named) _SetTArgs(args []Type) { t.targs = args }
-
-// NumMethods returns the number of explicit methods whose receiver is named type t.
-func (t *Named) NumMethods() int { return len(t.methods) }
-
-// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
-func (t *Named) Method(i int) *Func { return t.methods[i] }
-
-// SetUnderlying sets the underlying type and marks t as complete.
-func (t *Named) SetUnderlying(underlying Type) {
- if underlying == nil {
- panic("types.Named.SetUnderlying: underlying type must not be nil")
- }
- if _, ok := underlying.(*Named); ok {
- panic("types.Named.SetUnderlying: underlying type must not be *Named")
- }
- t.underlying = underlying
-}
-
-// AddMethod adds method m unless it is already in the method list.
-func (t *Named) AddMethod(m *Func) {
- if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
- t.methods = append(t.methods, m)
- }
-}
-
-// Note: This is a uint32 rather than a uint64 because the
-// respective 64 bit atomic instructions are not available
-// on all platforms.
-var lastId uint32
-
-// nextId returns a value increasing monotonically by 1 with
-// each call, starting with 1. It may be called concurrently.
-func nextId() uint64 { return uint64(atomic.AddUint32(&lastId, 1)) }
-
-// A _TypeParam represents a type parameter type.
-type _TypeParam struct {
- check *Checker // for lazy type bound completion
- id uint64 // unique id
- obj *TypeName // corresponding type name
- index int // parameter index
- bound Type // *Named or *Interface; underlying type is always *Interface
-}
-
-// newTypeParam returns a new TypeParam.
-func (check *Checker) newTypeParam(obj *TypeName, index int, bound Type) *_TypeParam {
- assert(bound != nil)
- typ := &_TypeParam{check: check, id: nextId(), obj: obj, index: index, bound: bound}
- if obj.typ == nil {
- obj.typ = typ
- }
- return typ
-}
-
-func (t *_TypeParam) Bound() *Interface {
- iface := asInterface(t.bound)
- // use the type bound position if we have one
- pos := token.NoPos
- if n, _ := t.bound.(*Named); n != nil {
- pos = n.obj.pos
- }
- // TODO(rFindley) switch this to an unexported method on Checker.
- t.check.completeInterface(pos, iface)
- return iface
-}
-
// optype returns a type's operational type. Except for
// type parameters, the operational type is the same
// as the underlying type (as returned by under). For
@@ -784,130 +56,22 @@ func optype(typ Type) Type {
// for a type parameter list of the form:
// (type T interface { type T }).
// See also issue #39680.
- if u := t.Bound().allTypes; u != nil && u != typ {
- // u != typ and u is a type parameter => under(u) != typ, so this is ok
- return under(u)
+ if a := t.iface().typeSet().types; a != nil && a != typ {
+ // If we have a union with a single entry, ignore
+ // any tilde because under(~t) == under(t).
+ if u, _ := a.(*Union); u != nil && u.NumTerms() == 1 {
+ a, _ = u.Term(0)
+ }
+ if a != typ {
+ // a != typ and a is a type parameter => under(a) != typ, so this is ok
+ return under(a)
+ }
}
return theTop
}
return under(typ)
}
-// An instance represents an instantiated generic type syntactically
-// (without expanding the instantiation). Type instances appear only
-// during type-checking and are replaced by their fully instantiated
-// (expanded) types before the end of type-checking.
-type instance struct {
- check *Checker // for lazy instantiation
- pos token.Pos // position of type instantiation; for error reporting only
- base *Named // parameterized type to be instantiated
- targs []Type // type arguments
- poslist []token.Pos // position of each targ; for error reporting only
- value Type // base(targs...) after instantiation or Typ[Invalid]; nil if not yet set
-}
-
-// expand returns the instantiated (= expanded) type of t.
-// The result is either an instantiated *Named type, or
-// Typ[Invalid] if there was an error.
-func (t *instance) expand() Type {
- v := t.value
- if v == nil {
- v = t.check.instantiate(t.pos, t.base, t.targs, t.poslist)
- if v == nil {
- v = Typ[Invalid]
- }
- t.value = v
- }
- // After instantiation we must have an invalid or a *Named type.
- if debug && v != Typ[Invalid] {
- _ = v.(*Named)
- }
- return v
-}
-
-// expand expands a type instance into its instantiated
-// type and leaves all other types alone. expand does
-// not recurse.
-func expand(typ Type) Type {
- if t, _ := typ.(*instance); t != nil {
- return t.expand()
- }
- return typ
-}
-
-// expandf is set to expand.
-// Call expandf when calling expand causes compile-time cycle error.
-var expandf func(Type) Type
-
-func init() { expandf = expand }
-
-// bottom represents the bottom of the type lattice.
-// It is the underlying type of a type parameter that
-// cannot be satisfied by any type, usually because
-// the intersection of type constraints left nothing).
-type bottom struct{}
-
-// theBottom is the singleton bottom type.
-var theBottom = &bottom{}
-
-// top represents the top of the type lattice.
-// It is the underlying type of a type parameter that
-// can be satisfied by any type (ignoring methods),
-// usually because the type constraint has no type
-// list.
-type top struct{}
-
-// theTop is the singleton top type.
-var theTop = &top{}
-
-// Type-specific implementations of Underlying.
-func (t *Basic) Underlying() Type { return t }
-func (t *Array) Underlying() Type { return t }
-func (t *Slice) Underlying() Type { return t }
-func (t *Struct) Underlying() Type { return t }
-func (t *Pointer) Underlying() Type { return t }
-func (t *Tuple) Underlying() Type { return t }
-func (t *Signature) Underlying() Type { return t }
-func (t *_Sum) Underlying() Type { return t }
-func (t *Interface) Underlying() Type { return t }
-func (t *Map) Underlying() Type { return t }
-func (t *Chan) Underlying() Type { return t }
-func (t *Named) Underlying() Type { return t.underlying }
-func (t *_TypeParam) Underlying() Type { return t }
-func (t *instance) Underlying() Type { return t }
-func (t *bottom) Underlying() Type { return t }
-func (t *top) Underlying() Type { return t }
-
-// Type-specific implementations of String.
-func (t *Basic) String() string { return TypeString(t, nil) }
-func (t *Array) String() string { return TypeString(t, nil) }
-func (t *Slice) String() string { return TypeString(t, nil) }
-func (t *Struct) String() string { return TypeString(t, nil) }
-func (t *Pointer) String() string { return TypeString(t, nil) }
-func (t *Tuple) String() string { return TypeString(t, nil) }
-func (t *Signature) String() string { return TypeString(t, nil) }
-func (t *_Sum) String() string { return TypeString(t, nil) }
-func (t *Interface) String() string { return TypeString(t, nil) }
-func (t *Map) String() string { return TypeString(t, nil) }
-func (t *Chan) String() string { return TypeString(t, nil) }
-func (t *Named) String() string { return TypeString(t, nil) }
-func (t *_TypeParam) String() string { return TypeString(t, nil) }
-func (t *instance) String() string { return TypeString(t, nil) }
-func (t *bottom) String() string { return TypeString(t, nil) }
-func (t *top) String() string { return TypeString(t, nil) }
-
-// under returns the true expanded underlying type.
-// If it doesn't exist, the result is Typ[Invalid].
-// under must only be called when a type is known
-// to be fully set up.
-func under(t Type) Type {
- // TODO(gri) is this correct for *Sum?
- if n := asNamed(t); n != nil {
- return n.under()
- }
- return t
-}
-
// Converters
//
// A converter must only be called when a type is
@@ -941,46 +105,26 @@ func asPointer(t Type) *Pointer {
return op
}
-func asTuple(t Type) *Tuple {
- op, _ := optype(t).(*Tuple)
- return op
-}
-
func asSignature(t Type) *Signature {
op, _ := optype(t).(*Signature)
return op
}
-func asSum(t Type) *_Sum {
- op, _ := optype(t).(*_Sum)
- return op
-}
+// If the argument to asInterface, asNamed, or asTypeParam is of the respective type
+// (possibly after expanding an instance type), these methods return that type.
+// Otherwise the result is nil.
func asInterface(t Type) *Interface {
op, _ := optype(t).(*Interface)
return op
}
-func asMap(t Type) *Map {
- op, _ := optype(t).(*Map)
- return op
-}
-
-func asChan(t Type) *Chan {
- op, _ := optype(t).(*Chan)
- return op
-}
-
-// If the argument to asNamed and asTypeParam is of the respective types
-// (possibly after expanding an instance type), these methods return that type.
-// Otherwise the result is nil.
-
func asNamed(t Type) *Named {
e, _ := expand(t).(*Named)
return e
}
-func asTypeParam(t Type) *_TypeParam {
- u, _ := under(t).(*_TypeParam)
+func asTypeParam(t Type) *TypeParam {
+ u, _ := under(t).(*TypeParam)
return u
}
diff --git a/src/go/types/typeparam.go b/src/go/types/typeparam.go
new file mode 100644
index 0000000000..33a516c209
--- /dev/null
+++ b/src/go/types/typeparam.go
@@ -0,0 +1,136 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/token"
+ "sync/atomic"
+)
+
+// Note: This is a uint32 rather than a uint64 because the
+// respective 64 bit atomic instructions are not available
+// on all platforms.
+var lastID uint32
+
+// nextID returns a value increasing monotonically by 1 with
+// each call, starting with 1. It may be called concurrently.
+func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
+
+// A TypeParam represents a type parameter type.
+type TypeParam struct {
+ check *Checker // for lazy type bound completion
+ id uint64 // unique id, for debugging only
+ obj *TypeName // corresponding type name
+ index int // type parameter index in source order, starting at 0
+ // TODO(rfindley): this could also be Typ[Invalid]. Verify that this is handled correctly.
+ bound Type // *Named or *Interface; underlying type is always *Interface
+}
+
+// NewTypeParam returns a new TypeParam. Type parameters may be set on a Named
+// or Signature type by calling SetTParams. Setting a type parameter on more
+// than one type will result in a panic.
+//
+// The bound argument can be nil, and set later via SetBound.
+func (check *Checker) NewTypeParam(obj *TypeName, bound Type) *TypeParam {
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+ typ := &TypeParam{check: check, id: id, obj: obj, index: -1, bound: bound}
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ return typ
+}
+
+// TODO(rfindley): remove or export these placeholder APIs.
+
+// Index returns the index of the type param within its param list.
+func (t *TypeParam) _Index() int {
+ return t.index
+}
+
+// SetId sets the unique id of a type param. Should only be used for type params
+// in imported generic types.
+func (t *TypeParam) _SetId(id uint64) {
+ t.id = id
+}
+
+// Constraint returns the type constraint specified for t.
+func (t *TypeParam) Constraint() Type {
+ // compute the type set if possible (we may not have an interface)
+ if iface, _ := under(t.bound).(*Interface); iface != nil {
+ // use the type bound position if we have one
+ pos := token.NoPos
+ if n, _ := t.bound.(*Named); n != nil {
+ pos = n.obj.pos
+ }
+ computeTypeSet(t.check, pos, iface)
+ }
+ return t.bound
+}
+
+// SetConstraint sets the type constraint for t.
+func (t *TypeParam) SetConstraint(bound Type) {
+ if bound == nil {
+ panic("types2.TypeParam.SetConstraint: bound must not be nil")
+ }
+ t.bound = bound
+}
+
+// iface returns the constraint interface of t.
+func (t *TypeParam) iface() *Interface {
+ if iface, _ := under(t.Constraint()).(*Interface); iface != nil {
+ return iface
+ }
+ return &emptyInterface
+}
+
+func (t *TypeParam) Underlying() Type { return t }
+func (t *TypeParam) String() string { return TypeString(t, nil) }
+
+// TypeParams holds a list of type parameters bound to a type.
+type TypeParams struct{ tparams []*TypeName }
+
+// Len returns the number of type parameters in the list.
+// It is safe to call on a nil receiver.
+func (tps *TypeParams) Len() int {
+ return len(tps.list())
+}
+
+// At returns the i'th type parameter in the list.
+func (tps *TypeParams) At(i int) *TypeName {
+ return tps.list()[i]
+}
+
+func (tps *TypeParams) list() []*TypeName {
+ if tps == nil {
+ return nil
+ }
+ return tps.tparams
+}
+
+func bindTParams(list []*TypeName) *TypeParams {
+ if len(list) == 0 {
+ return nil
+ }
+ for i, tp := range list {
+ typ := tp.Type().(*TypeParam)
+ if typ.index >= 0 {
+ panic("internal error: type parameter bound more than once")
+ }
+ typ.index = i
+ }
+ return &TypeParams{tparams: list}
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (t *TypeParam) underIs(f func(Type) bool) bool {
+ return t.iface().typeSet().underIs(f)
+}
diff --git a/src/go/types/types_test.go b/src/go/types/types_test.go
index 25cd996628..f2358c6e19 100644
--- a/src/go/types/types_test.go
+++ b/src/go/types/types_test.go
@@ -4,16 +4,5 @@
package types
-import "sync/atomic"
-
-// Upon calling ResetId, nextId starts with 1 again.
-// It may be called concurrently. This is only needed
-// for tests where we may want to have a consistent
-// numbering for each individual test case.
-func ResetId() { atomic.StoreUint32(&lastId, 0) }
-
-// SetGoVersion sets the unexported goVersion field on config, so that tests
-// which assert on behavior for older Go versions can set it.
-func SetGoVersion(config *Config, goVersion string) {
- config.goVersion = goVersion
-}
+// Debug is set if go/types is built with debug mode enabled.
+const Debug = debug
diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go
new file mode 100644
index 0000000000..836f93047a
--- /dev/null
+++ b/src/go/types/typeset.go
@@ -0,0 +1,298 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "sort"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A _TypeSet represents the type set of an interface.
+type _TypeSet struct {
+ comparable bool // if set, the interface is or embeds comparable
+ // TODO(gri) consider using a set for the methods for faster lookup
+ methods []*Func // all methods of the interface; sorted by unique ID
+ types Type // typically a *Union; nil means no type restrictions
+}
+
+// IsTop reports whether type set s is the top type set (corresponding to the empty interface).
+func (s *_TypeSet) IsTop() bool { return !s.comparable && len(s.methods) == 0 && s.types == nil }
+
+// IsMethodSet reports whether the type set s is described by a single set of methods.
+func (s *_TypeSet) IsMethodSet() bool { return !s.comparable && s.types == nil }
+
+// IsComparable reports whether each type in the set is comparable.
+// TODO(gri) this is not correct - there may be s.types values containing non-comparable types
+func (s *_TypeSet) IsComparable() bool {
+ if s.types == nil {
+ return s.comparable
+ }
+ tcomparable := s.underIs(func(u Type) bool {
+ return Comparable(u)
+ })
+ if !s.comparable {
+ return tcomparable
+ }
+ return s.comparable && tcomparable
+}
+
+// TODO(gri) IsTypeSet is not a great name. Find a better one.
+
+// IsTypeSet reports whether the type set s is represented by a finite set of underlying types.
+func (s *_TypeSet) IsTypeSet() bool {
+ return !s.comparable && len(s.methods) == 0
+}
+
+// NumMethods returns the number of methods available.
+func (s *_TypeSet) NumMethods() int { return len(s.methods) }
+
+// Method returns the i'th method of type set s for 0 <= i < s.NumMethods().
+// The methods are ordered by their unique ID.
+func (s *_TypeSet) Method(i int) *Func { return s.methods[i] }
+
+// LookupMethod returns the index of and method with matching package and name, or (-1, nil).
+func (s *_TypeSet) LookupMethod(pkg *Package, name string) (int, *Func) {
+ // TODO(gri) s.methods is sorted - consider binary search
+ return lookupMethod(s.methods, pkg, name)
+}
+
+func (s *_TypeSet) String() string {
+ if s.IsTop() {
+ return "⊤"
+ }
+
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ if s.comparable {
+ buf.WriteString(" comparable")
+ if len(s.methods) > 0 || s.types != nil {
+ buf.WriteByte(';')
+ }
+ }
+ for i, m := range s.methods {
+ if i > 0 {
+ buf.WriteByte(';')
+ }
+ buf.WriteByte(' ')
+ buf.WriteString(m.String())
+ }
+ if len(s.methods) > 0 && s.types != nil {
+ buf.WriteByte(';')
+ }
+ if s.types != nil {
+ buf.WriteByte(' ')
+ writeType(&buf, s.types, nil, nil)
+ }
+
+ buf.WriteString(" }") // there was a least one method or type
+ return buf.String()
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// underIs reports whether f returned true for the underlying types of the
+// enumerable types in the type set s. If the type set comprises all types
+// f is called once with the top type; if the type set is empty, the result
+// is false.
+func (s *_TypeSet) underIs(f func(Type) bool) bool {
+ switch t := s.types.(type) {
+ case nil:
+ return f(theTop)
+ default:
+ return f(t)
+ case *Union:
+ return t.underIs(f)
+ }
+}
+
+// topTypeSet may be used as type set for the empty interface.
+var topTypeSet _TypeSet
+
+// computeTypeSet may be called with check == nil.
+func computeTypeSet(check *Checker, pos token.Pos, ityp *Interface) *_TypeSet {
+ if ityp.tset != nil {
+ return ityp.tset
+ }
+
+ // If the interface is not fully set up yet, the type set will
+ // not be complete, which may lead to errors when using the the
+ // type set (e.g. missing method). Don't compute a partial type
+ // set (and don't store it!), so that we still compute the full
+ // type set eventually. Instead, return the top type set and
+ // let any follow-on errors play out.
+ //
+ // TODO(gri) Consider recording when this happens and reporting
+ // it as an error (but only if there were no other errors so to
+ // to not have unnecessary follow-on errors).
+ if !ityp.complete {
+ return &topTypeSet
+ }
+
+ if check != nil && trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsValid() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "type set for %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s ", ityp.typeSet())
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.tset = new(_TypeSet) // TODO(gri) is this sufficient?
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also issue #34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var todo []*Func
+ var seen objset
+ var methods []*Func
+ mpos := make(map[*Func]token.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos token.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ methods = append(methods, m)
+ mpos[m] = pos
+ case explicit:
+ if check == nil {
+ panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
+ }
+ // check != nil
+ check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
+ check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (issue #33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check == nil {
+ // check method signatures after all locally embedded interfaces are computed
+ todo = append(todo, m, other.(*Func))
+ break
+ }
+ // check != nil
+ check.later(func() {
+ if !check.allowVersion(m.pkg, 1, 14) || !Identical(m.typ, other.Type()) {
+ check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
+ check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
+ }
+ })
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect embedded elements
+ var allTypes Type
+ for i, typ := range ityp.embeddeds {
+ // The embedding position is nil for imported interfaces
+ // and also for interface copies after substitution (but
+ // in that case we don't need to report errors again).
+ var pos token.Pos // embedding position
+ if ityp.embedPos != nil {
+ pos = (*ityp.embedPos)[i]
+ }
+ var types Type
+ switch t := under(typ).(type) {
+ case *Interface:
+ tset := computeTypeSet(check, pos, t)
+ if tset.comparable {
+ ityp.tset.comparable = true
+ }
+ for _, m := range tset.methods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+
+ }
+ types = tset.types
+ case *Union:
+ // TODO(gri) combine with default case once we have
+ // converted all tests to new notation and we
+ // can report an error when we don't have an
+ // interface before go1.18.
+ types = typ
+ case *TypeParam:
+ // Embedding stand-alone type parameters is not permitted for now.
+ // This case is handled during union parsing.
+ unreachable()
+ default:
+ if typ == Typ[Invalid] {
+ continue
+ }
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(atPos(pos), _InvalidIfaceEmbed, "%s is not an interface", typ)
+ continue
+ }
+ types = typ
+ }
+ allTypes = intersect(allTypes, types)
+ }
+ ityp.embedPos = nil // not needed anymore (errors have been reported)
+
+ // process todo's (this only happens if check == nil)
+ for i := 0; i < len(todo); i += 2 {
+ m := todo[i]
+ other := todo[i+1]
+ if !Identical(m.typ, other.typ) {
+ panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
+ }
+ }
+
+ if methods != nil {
+ sort.Sort(byUniqueMethodName(methods))
+ ityp.tset.methods = methods
+ }
+ ityp.tset.types = allTypes
+
+ return ityp.tset
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("internal error: assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("internal error: methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].Id() < a[j].Id() }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/src/go/types/typestring.go b/src/go/types/typestring.go
index fe27f0f276..c0c69624ec 100644
--- a/src/go/types/typestring.go
+++ b/src/go/types/typestring.go
@@ -158,12 +158,19 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
buf.WriteString("func")
writeSignature(buf, t, qf, visited)
- case *_Sum:
- for i, t := range t.types {
+ case *Union:
+ if t.IsEmpty() {
+ buf.WriteString("⊥")
+ break
+ }
+ for i, t := range t.terms {
if i > 0 {
- buf.WriteString(", ")
+ buf.WriteByte('|')
}
- writeType(buf, t, qf, visited)
+ if t.tilde {
+ buf.WriteByte('~')
+ }
+ writeType(buf, t.typ, qf, visited)
}
case *Interface:
@@ -183,7 +190,8 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
if gcCompatibilityMode {
// print flattened interface
// (useful to compare against gc-generated interfaces)
- for i, m := range t.allMethods {
+ tset := t.typeSet()
+ for i, m := range tset.methods {
if i > 0 {
buf.WriteString("; ")
}
@@ -191,12 +199,12 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
writeSignature(buf, m.typ.(*Signature), qf, visited)
empty = false
}
- if !empty && t.allTypes != nil {
+ if !empty && tset.types != nil {
buf.WriteString("; ")
}
- if t.allTypes != nil {
+ if tset.types != nil {
buf.WriteString("type ")
- writeType(buf, t.allTypes, qf, visited)
+ writeType(buf, tset.types, qf, visited)
}
} else {
// print explicit interface methods and embedded types
@@ -208,14 +216,6 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
writeSignature(buf, m.typ.(*Signature), qf, visited)
empty = false
}
- if !empty && t.types != nil {
- buf.WriteString("; ")
- }
- if t.types != nil {
- buf.WriteString("type ")
- writeType(buf, t.types, qf, visited)
- empty = false
- }
if !empty && len(t.embeddeds) > 0 {
buf.WriteString("; ")
}
@@ -227,7 +227,9 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
empty = false
}
}
- if t.allMethods == nil || len(t.methods) > len(t.allMethods) {
+ // print /* incomplete */ if needed to satisfy existing tests
+ // TODO(gri) get rid of this eventually
+ if debug && t.tset == nil {
if !empty {
buf.WriteByte(' ')
}
@@ -268,39 +270,40 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
}
case *Named:
+ if t.instance != nil {
+ buf.WriteByte(instanceMarker)
+ }
writeTypeName(buf, t.obj, qf)
if t.targs != nil {
// instantiated type
buf.WriteByte('[')
writeTypeList(buf, t.targs, qf, visited)
buf.WriteByte(']')
- } else if t.tparams != nil {
+ } else if t.TParams().Len() != 0 {
// parameterized type
- writeTParamList(buf, t.tparams, qf, visited)
+ writeTParamList(buf, t.TParams().list(), qf, visited)
}
- case *_TypeParam:
+ case *TypeParam:
s := "?"
if t.obj != nil {
+ // Optionally write out package for typeparams (like Named).
+ // TODO(rfindley): this is required for import/export, so
+ // we maybe need a separate function that won't be changed
+ // for debugging purposes.
+ if t.obj.pkg != nil {
+ writePackage(buf, t.obj.pkg, qf)
+ }
s = t.obj.name
}
buf.WriteString(s + subscript(t.id))
- case *instance:
- buf.WriteByte(instanceMarker) // indicate "non-evaluated" syntactic instance
- writeTypeName(buf, t.base.obj, qf)
- buf.WriteByte('[')
- writeTypeList(buf, t.targs, qf, visited)
- buf.WriteByte(']')
-
- case *bottom:
- buf.WriteString("⊥")
-
case *top:
buf.WriteString("⊤")
default:
// For externally defined implementations of Type.
+ // Note: In this case cycles won't be caught.
buf.WriteString(t.String())
}
}
@@ -321,7 +324,7 @@ func writeTParamList(buf *bytes.Buffer, list []*TypeName, qf Qualifier, visited
for i, p := range list {
// TODO(rFindley) support 'any' sugar here.
var b Type = &emptyInterface
- if t, _ := p.typ.(*_TypeParam); t != nil && t.bound != nil {
+ if t, _ := p.typ.(*TypeParam); t != nil && t.bound != nil {
b = t.bound
}
if i > 0 {
@@ -334,7 +337,7 @@ func writeTParamList(buf *bytes.Buffer, list []*TypeName, qf Qualifier, visited
}
prev = b
- if t, _ := p.typ.(*_TypeParam); t != nil {
+ if t, _ := p.typ.(*TypeParam); t != nil {
writeType(buf, t, qf, visited)
} else {
buf.WriteString(p.name)
@@ -348,17 +351,38 @@ func writeTParamList(buf *bytes.Buffer, list []*TypeName, qf Qualifier, visited
}
func writeTypeName(buf *bytes.Buffer, obj *TypeName, qf Qualifier) {
- s := "<Named w/o object>"
- if obj != nil {
- if obj.pkg != nil {
- writePackage(buf, obj.pkg, qf)
+ if obj == nil {
+ buf.WriteString("<Named w/o object>")
+ return
+ }
+ if obj.pkg != nil {
+ writePackage(buf, obj.pkg, qf)
+ }
+ buf.WriteString(obj.name)
+
+ if instanceHashing != 0 {
+ // For local defined types, use the (original!) TypeName's scope
+ // numbers to disambiguate.
+ typ := obj.typ.(*Named)
+ // TODO(gri) Figure out why typ.orig != typ.orig.orig sometimes
+ // and whether the loop can iterate more than twice.
+ // (It seems somehow connected to instance types.)
+ for typ.orig != typ {
+ typ = typ.orig
}
- // TODO(gri): function-local named types should be displayed
- // differently from named types at package level to avoid
- // ambiguity.
- s = obj.name
+ writeScopeNumbers(buf, typ.obj.parent)
+ }
+}
+
+// writeScopeNumbers writes the number sequence for this scope to buf
+// in the form ".i.j.k" where i, j, k, etc. stand for scope numbers.
+// If a scope is nil or has no parent (such as a package scope), nothing
+// is written.
+func writeScopeNumbers(buf *bytes.Buffer, s *Scope) {
+ if s != nil && s.number > 0 {
+ writeScopeNumbers(buf, s.parent)
+ fmt.Fprintf(buf, ".%d", s.number)
}
- buf.WriteString(s)
}
func writeTuple(buf *bytes.Buffer, tup *Tuple, variadic bool, qf Qualifier, visited []Type) {
@@ -403,8 +427,8 @@ func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) {
}
func writeSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier, visited []Type) {
- if sig.tparams != nil {
- writeTParamList(buf, sig.tparams, qf, visited)
+ if sig.TParams().Len() != 0 {
+ writeTParamList(buf, sig.TParams().list(), qf, visited)
}
writeTuple(buf, sig.params, sig.variadic, qf, visited)
diff --git a/src/go/types/typestring_test.go b/src/go/types/typestring_test.go
index b16529dc64..f02c0d9c18 100644
--- a/src/go/types/typestring_test.go
+++ b/src/go/types/typestring_test.go
@@ -95,6 +95,8 @@ var independentTestTypes = []testEntry{
dup("interface{}"),
dup("interface{m()}"),
dup(`interface{String() string; m(int) float32}`),
+ dup("interface{int|float32|complex128}"),
+ dup("interface{int|~float32|~complex128}"),
// TODO(rFindley) uncomment this once this AST is accepted, and add more test
// cases.
@@ -143,6 +145,10 @@ func TestTypeString(t *testing.T) {
}
func TestIncompleteInterfaces(t *testing.T) {
+ if !Debug {
+ t.Skip("requires type checker to be compiled with debug = true")
+ }
+
sig := NewSignature(nil, nil, nil, false)
m := NewFunc(token.NoPos, nil, "m", sig)
for _, test := range []struct {
diff --git a/src/go/types/typeterm.go b/src/go/types/typeterm.go
new file mode 100644
index 0000000000..dbd055a580
--- /dev/null
+++ b/src/go/types/typeterm.go
@@ -0,0 +1,166 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// TODO(gri) use a different symbol instead of ⊤ for the set of all types
+// (⊤ is hard to distinguish from T in some fonts)
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// ⊤: &term{} == ⊤ // set of all types
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+//
+type term struct {
+ tilde bool // valid if typ != nil
+ typ Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "⊤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ ⊤
+
+ return x.tilde == y.tilde && Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // ⊤ ∪ y == ⊤
+ case y.typ == nil:
+ return y, nil // x ∪ ⊤ == ⊤
+ }
+ // ∅ ⊂ x, y ⊂ ⊤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // ⊤ ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ ⊤ == x
+ }
+ // ∅ ⊂ x, y ⊂ ⊤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ ⊤ == true
+ }
+ // ∅ ⊂ x ⊂ ⊤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ ⊤ == true
+ case x.typ == nil:
+ return false // ⊤ ⊆ y == false since y != ⊤
+ }
+ // ∅ ⊂ x, y ⊂ ⊤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !Identical(ux, uy)
+}
diff --git a/src/go/types/typeterm_test.go b/src/go/types/typeterm_test.go
new file mode 100644
index 0000000000..391ff3e05f
--- /dev/null
+++ b/src/go/types/typeterm_test.go
@@ -0,0 +1,205 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "strings"
+ "testing"
+)
+
+var testTerms = map[string]*term{
+ "∅": nil,
+ "⊤": {},
+ "int": {false, Typ[Int]},
+ "~int": {true, Typ[Int]},
+ "string": {false, Typ[String]},
+ "~string": {true, Typ[String]},
+ // TODO(gri) add a defined type
+}
+
+func TestTermString(t *testing.T) {
+ for want, x := range testTerms {
+ if got := x.String(); got != want {
+ t.Errorf("%v.String() == %v; want %v", x, got, want)
+ }
+ }
+}
+
+func split(s string, n int) []string {
+ r := strings.Split(s, " ")
+ if len(r) != n {
+ panic("invalid test case: " + s)
+ }
+ return r
+}
+
+func testTerm(name string) *term {
+ r, ok := testTerms[name]
+ if !ok {
+ panic("invalid test argument: " + name)
+ }
+ return r
+}
+
+func TestTermEqual(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "⊤ ⊤ T",
+ "int int T",
+ "~int ~int T",
+ "∅ ⊤ F",
+ "∅ int F",
+ "∅ ~int F",
+ "⊤ int F",
+ "⊤ ~int F",
+ "int ~int F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ // equal is symmetric
+ x, y = y, x
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermUnion(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅ ∅",
+ "∅ ⊤ ⊤ ∅",
+ "∅ int int ∅",
+ "∅ ~int ~int ∅",
+ "⊤ ⊤ ⊤ ∅",
+ "⊤ int ⊤ ∅",
+ "⊤ ~int ⊤ ∅",
+ "int int int ∅",
+ "int ~int ~int ∅",
+ "int string int string",
+ "int ~string int ~string",
+ "~int ~string ~int ~string",
+
+ // union is symmetric, but the result order isn't - repeat symmetric cases explictly
+ "⊤ ∅ ⊤ ∅",
+ "int ∅ int ∅",
+ "~int ∅ ~int ∅",
+ "int ⊤ ⊤ ∅",
+ "~int ⊤ ⊤ ∅",
+ "~int int ~int ∅",
+ "string int string int",
+ "~string int ~string int",
+ "~string ~int ~string ~int",
+ } {
+ args := split(test, 4)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want1 := testTerm(args[2])
+ want2 := testTerm(args[3])
+ if got1, got2 := x.union(y); !got1.equal(want1) || !got2.equal(want2) {
+ t.Errorf("%v.union(%v) = %v, %v; want %v, %v", x, y, got1, got2, want1, want2)
+ }
+ }
+}
+
+func TestTermIntersection(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅",
+ "∅ ⊤ ∅",
+ "∅ int ∅",
+ "∅ ~int ∅",
+ "⊤ ⊤ ⊤",
+ "⊤ int int",
+ "⊤ ~int ~int",
+ "int int int",
+ "int ~int int",
+ "int string ∅",
+ "int ~string ∅",
+ "~int ~string ∅",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := testTerm(args[2])
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ // intersect is symmetric
+ x, y = y, x
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermIncludes(t *testing.T) {
+ for _, test := range []string{
+ "∅ int F",
+ "⊤ int T",
+ "int int T",
+ "~int int T",
+ "string int F",
+ "~string int F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1]).typ
+ want := args[2] == "T"
+ if got := x.includes(y); got != want {
+ t.Errorf("%v.includes(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermSubsetOf(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "⊤ ⊤ T",
+ "int int T",
+ "~int ~int T",
+ "∅ ⊤ T",
+ "∅ int T",
+ "∅ ~int T",
+ "⊤ int F",
+ "⊤ ~int F",
+ "int ~int T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.subsetOf(y); got != want {
+ t.Errorf("%v.subsetOf(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermDisjoint(t *testing.T) {
+ for _, test := range []string{
+ "int int F",
+ "~int ~int F",
+ "int ~int F",
+ "int string T",
+ "int ~string T",
+ "~int ~string T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ // disjoint is symmetric
+ x, y = y, x
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go
index 1738d864a6..a812ba6519 100644
--- a/src/go/types/typexpr.go
+++ b/src/go/types/typexpr.go
@@ -12,8 +12,6 @@ import (
"go/constant"
"go/internal/typeparams"
"go/token"
- "sort"
- "strconv"
"strings"
)
@@ -29,13 +27,24 @@ func (check *Checker) ident(x *operand, e *ast.Ident, def *Named, wantType bool)
// Note that we cannot use check.lookup here because the returned scope
// may be different from obj.Parent(). See also Scope.LookupParent doc.
scope, obj := check.scope.LookupParent(e.Name, check.pos)
- if obj == nil {
+ switch obj {
+ case nil:
if e.Name == "_" {
- check.errorf(e, _InvalidBlank, "cannot use _ as value or type")
+ check.error(e, _InvalidBlank, "cannot use _ as value or type")
} else {
check.errorf(e, _UndeclaredName, "undeclared name: %s", e.Name)
}
return
+ case universeAny, universeComparable:
+ if !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(e, _UndeclaredName, "undeclared name: %s (requires version go1.18 or later)", e.Name)
+ return
+ }
+ // If we allow "any" for general use, this if-statement can be removed (issue #33232).
+ if obj == universeAny {
+ check.error(e, _Todo, "cannot use any outside constraint position")
+ return
+ }
}
check.recordUse(e, obj)
@@ -58,7 +67,7 @@ func (check *Checker) ident(x *operand, e *ast.Ident, def *Named, wantType bool)
// If so, mark the respective package as used.
// (This code is only needed for dot-imports. Without them,
// we only have to mark variables, see *Var case below).
- if pkgName := check.dotImportMap[dotImportKey{scope, obj}]; pkgName != nil {
+ if pkgName := check.dotImportMap[dotImportKey{scope, obj.Name()}]; pkgName != nil {
pkgName.used = true
}
@@ -142,12 +151,12 @@ func (check *Checker) ordinaryType(pos positioner, typ Type) {
// type-checking.
check.later(func() {
if t := asInterface(typ); t != nil {
- check.completeInterface(pos.Pos(), t) // TODO(gri) is this the correct position?
- if t.allTypes != nil {
- check.softErrorf(pos, _Todo, "interface contains type constraints (%s)", t.allTypes)
+ tset := computeTypeSet(check, pos.Pos(), t) // TODO(gri) is this the correct position?
+ if tset.types != nil {
+ check.softErrorf(pos, _Todo, "interface contains type constraints (%s)", tset.types)
return
}
- if t._IsComparable() {
+ if tset.IsComparable() {
check.softErrorf(pos, _Todo, "interface is (or embeds) comparable")
}
}
@@ -194,205 +203,6 @@ func (check *Checker) genericType(e ast.Expr, reportErr bool) Type {
return typ
}
-// isubst returns an x with identifiers substituted per the substitution map smap.
-// isubst only handles the case of (valid) method receiver type expressions correctly.
-func isubst(x ast.Expr, smap map[*ast.Ident]*ast.Ident) ast.Expr {
- switch n := x.(type) {
- case *ast.Ident:
- if alt := smap[n]; alt != nil {
- return alt
- }
- case *ast.StarExpr:
- X := isubst(n.X, smap)
- if X != n.X {
- new := *n
- new.X = X
- return &new
- }
- case *ast.IndexExpr:
- elems := typeparams.UnpackExpr(n.Index)
- var newElems []ast.Expr
- for i, elem := range elems {
- new := isubst(elem, smap)
- if new != elem {
- if newElems == nil {
- newElems = make([]ast.Expr, len(elems))
- copy(newElems, elems)
- }
- newElems[i] = new
- }
- }
- if newElems != nil {
- index := typeparams.PackExpr(newElems)
- new := *n
- new.Index = index
- return &new
- }
- case *ast.ParenExpr:
- return isubst(n.X, smap) // no need to keep parentheses
- default:
- // Other receiver type expressions are invalid.
- // It's fine to ignore those here as they will
- // be checked elsewhere.
- }
- return x
-}
-
-// funcType type-checks a function or method type.
-func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast.FuncType) {
- check.openScope(ftyp, "function")
- check.scope.isFunc = true
- check.recordScope(ftyp, check.scope)
- sig.scope = check.scope
- defer check.closeScope()
-
- var recvTyp ast.Expr // rewritten receiver type; valid if != nil
- if recvPar != nil && len(recvPar.List) > 0 {
- // collect generic receiver type parameters, if any
- // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
- // - the receiver specification acts as local declaration for its type parameters, which may be blank
- _, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
- if len(rparams) > 0 {
- // Blank identifiers don't get declared and regular type-checking of the instantiated
- // parameterized receiver type expression fails in Checker.collectParams of receiver.
- // Identify blank type parameters and substitute each with a unique new identifier named
- // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
- // name.
- var smap map[*ast.Ident]*ast.Ident // substitution map from "_" to "n_" identifiers
- for i, p := range rparams {
- if p.Name == "_" {
- new := *p
- new.Name = fmt.Sprintf("%d_", i)
- rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
- if smap == nil {
- smap = make(map[*ast.Ident]*ast.Ident)
- }
- smap[p] = &new
- }
- }
- if smap != nil {
- // blank identifiers were found => use rewritten receiver type
- recvTyp = isubst(recvPar.List[0].Type, smap)
- }
- sig.rparams = check.declareTypeParams(nil, rparams)
- // determine receiver type to get its type parameters
- // and the respective type parameter bounds
- var recvTParams []*TypeName
- if rname != nil {
- // recv should be a Named type (otherwise an error is reported elsewhere)
- // Also: Don't report an error via genericType since it will be reported
- // again when we type-check the signature.
- // TODO(gri) maybe the receiver should be marked as invalid instead?
- if recv := asNamed(check.genericType(rname, false)); recv != nil {
- recvTParams = recv.tparams
- }
- }
- // provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if len(sig.rparams) == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, len(sig.rparams))
- for i, t := range sig.rparams {
- list[i] = t.typ
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tname := range sig.rparams {
- bound := recvTParams[i].typ.(*_TypeParam).bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- // TODO(gri) should we assume now that bounds always exist?
- // (no bound == empty interface)
- if bound != nil {
- bound = check.subst(tname.pos, bound, smap)
- tname.typ.(*_TypeParam).bound = bound
- }
- }
- }
- }
- }
-
- if tparams := typeparams.Get(ftyp); tparams != nil {
- sig.tparams = check.collectTypeParams(tparams)
- // Always type-check method type parameters but complain that they are not allowed.
- // (A separate check is needed when type-checking interface method signatures because
- // they don't have a receiver specification.)
- if recvPar != nil {
- check.errorf(tparams, _Todo, "methods cannot have type parameters")
- }
- }
-
- // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
- // declarations and then squash that scope into the parent scope (and report any redeclarations at
- // that time).
- scope := NewScope(check.scope, token.NoPos, token.NoPos, "function body (temp. scope)")
- recvList, _ := check.collectParams(scope, recvPar, recvTyp, false) // use rewritten receiver type, if any
- params, variadic := check.collectParams(scope, ftyp.Params, nil, true)
- results, _ := check.collectParams(scope, ftyp.Results, nil, false)
- scope.squash(func(obj, alt Object) {
- check.errorf(obj, _DuplicateDecl, "%s redeclared in this block", obj.Name())
- check.reportAltDecl(alt)
- })
-
- if recvPar != nil {
- // recv parameter list present (may be empty)
- // spec: "The receiver is specified via an extra parameter section preceding the
- // method name. That parameter section must declare a single parameter, the receiver."
- var recv *Var
- switch len(recvList) {
- case 0:
- // error reported by resolver
- recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
- default:
- // more than one receiver
- check.error(recvList[len(recvList)-1], _BadRecv, "method must have exactly one receiver")
- fallthrough // continue with first receiver
- case 1:
- recv = recvList[0]
- }
-
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
- rtyp = expand(rtyp)
-
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if t := rtyp; t != Typ[Invalid] {
- var err string
- if T := asNamed(t); T != nil {
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
- } else {
- switch u := optype(T).(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
- }
- case *Pointer, *Interface:
- err = "pointer or interface type"
- }
- }
- } else {
- err = "basic or unnamed type"
- }
- if err != "" {
- check.errorf(recv, _InvalidRecv, "invalid receiver %s (%s)", recv.typ, err)
- // ok to continue
- }
- }
- sig.recv = recv
- }
-
- sig.params = NewTuple(params...)
- sig.results = NewTuple(results...)
- sig.variadic = variadic
-}
-
// goTypeName returns the Go type name for typ and
// removes any occurrences of "types." from that name.
func goTypeName(typ Type) string {
@@ -462,13 +272,10 @@ func (check *Checker) typInternal(e0 ast.Expr, def *Named) (T Type) {
check.errorf(&x, _NotAType, "%s is not a type", &x)
}
- case *ast.IndexExpr:
- if typeparams.Enabled {
- exprs := typeparams.UnpackExpr(e.Index)
- return check.instantiatedType(e.X, exprs, def)
- }
- check.errorf(e0, _NotAType, "%s is not a type", e0)
- check.use(e.X)
+ case *ast.IndexExpr, *ast.MultiIndexExpr:
+ ix := typeparams.UnpackIndexExpr(e)
+ // TODO(rfindley): type instantiation should require go1.18
+ return check.instantiatedType(ix.X, ix.Indices, def)
case *ast.ParenExpr:
// Generic types must be instantiated before they can be used in any form.
@@ -604,43 +411,32 @@ func (check *Checker) typeOrNil(e ast.Expr) Type {
return Typ[Invalid]
}
-func (check *Checker) instantiatedType(x ast.Expr, targs []ast.Expr, def *Named) Type {
- b := check.genericType(x, true) // TODO(gri) what about cycles?
- if b == Typ[Invalid] {
- return b // error already reported
- }
- base := asNamed(b)
- if base == nil {
- unreachable() // should have been caught by genericType
+func (check *Checker) instantiatedType(x ast.Expr, targsx []ast.Expr, def *Named) Type {
+ base := check.genericType(x, true)
+ if base == Typ[Invalid] {
+ return base // error already reported
}
- // create a new type instance rather than instantiate the type
- // TODO(gri) should do argument number check here rather than
- // when instantiating the type?
- typ := new(instance)
- def.setUnderlying(typ)
-
- typ.check = check
- typ.pos = x.Pos()
- typ.base = base
-
- // evaluate arguments (always)
- typ.targs = check.typeList(targs)
- if typ.targs == nil {
+ // evaluate arguments
+ targs := check.typeList(targsx)
+ if targs == nil {
def.setUnderlying(Typ[Invalid]) // avoid later errors due to lazy instantiation
return Typ[Invalid]
}
- // determine argument positions (for error reporting)
- typ.poslist = make([]token.Pos, len(targs))
- for i, arg := range targs {
- typ.poslist[i] = arg.Pos()
+ // determine argument positions
+ posList := make([]token.Pos, len(targs))
+ for i, arg := range targsx {
+ posList[i] = arg.Pos()
}
+ typ := check.InstantiateLazy(x.Pos(), base, targs, posList, true)
+ def.setUnderlying(typ)
+
// make sure we check instantiation works at least once
// and that the resulting type is valid
check.later(func() {
- t := typ.expand()
+ t := expand(typ)
check.validType(t, nil)
})
@@ -689,518 +485,3 @@ func (check *Checker) typeList(list []ast.Expr) []Type {
}
return res
}
-
-// collectParams declares the parameters of list in scope and returns the corresponding
-// variable list. If type0 != nil, it is used instead of the first type in list.
-func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, type0 ast.Expr, variadicOk bool) (params []*Var, variadic bool) {
- if list == nil {
- return
- }
-
- var named, anonymous bool
- for i, field := range list.List {
- ftype := field.Type
- if i == 0 && type0 != nil {
- ftype = type0
- }
- if t, _ := ftype.(*ast.Ellipsis); t != nil {
- ftype = t.Elt
- if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 {
- variadic = true
- } else {
- check.softErrorf(t, _MisplacedDotDotDot, "can only use ... with final parameter in list")
- // ignore ... and continue
- }
- }
- typ := check.varType(ftype)
- // The parser ensures that f.Tag is nil and we don't
- // care if a constructed AST contains a non-nil tag.
- if len(field.Names) > 0 {
- // named parameter
- for _, name := range field.Names {
- if name.Name == "" {
- check.invalidAST(name, "anonymous parameter")
- // ok to continue
- }
- par := NewParam(name.Pos(), check.pkg, name.Name, typ)
- check.declare(scope, name, par, scope.pos)
- params = append(params, par)
- }
- named = true
- } else {
- // anonymous parameter
- par := NewParam(ftype.Pos(), check.pkg, "", typ)
- check.recordImplicit(field, par)
- params = append(params, par)
- anonymous = true
- }
- }
-
- if named && anonymous {
- check.invalidAST(list, "list contains both named and anonymous parameters")
- // ok to continue
- }
-
- // For a variadic function, change the last parameter's type from T to []T.
- // Since we type-checked T rather than ...T, we also need to retro-actively
- // record the type for ...T.
- if variadic {
- last := params[len(params)-1]
- last.typ = &Slice{elem: last.typ}
- check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil)
- }
-
- return
-}
-
-func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool {
- if alt := oset.insert(obj); alt != nil {
- check.errorf(atPos(pos), _DuplicateDecl, "%s redeclared", obj.Name())
- check.reportAltDecl(alt)
- return false
- }
- return true
-}
-
-func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
- var tlist *ast.Ident // "type" name of first entry in a type list declaration
- var types []ast.Expr
- for _, f := range iface.Methods.List {
- if len(f.Names) > 0 {
- // We have a method with name f.Names[0], or a type
- // of a type list (name.Name == "type").
- // (The parser ensures that there's only one method
- // and we don't care if a constructed AST has more.)
- name := f.Names[0]
- if name.Name == "_" {
- check.errorf(name, _BlankIfaceMethod, "invalid method name _")
- continue // ignore
- }
-
- if name.Name == "type" {
- // Always collect all type list entries, even from
- // different type lists, under the assumption that
- // the author intended to include all types.
- types = append(types, f.Type)
- if tlist != nil && tlist != name {
- check.errorf(name, _Todo, "cannot have multiple type lists in an interface")
- }
- tlist = name
- continue
- }
-
- typ := check.typ(f.Type)
- sig, _ := typ.(*Signature)
- if sig == nil {
- if typ != Typ[Invalid] {
- check.invalidAST(f.Type, "%s is not a method signature", typ)
- }
- continue // ignore
- }
-
- // Always type-check method type parameters but complain if they are not enabled.
- // (This extra check is needed here because interface method signatures don't have
- // a receiver specification.)
- if sig.tparams != nil {
- var at positioner = f.Type
- if tparams := typeparams.Get(f.Type); tparams != nil {
- at = tparams
- }
- check.errorf(at, _Todo, "methods cannot have type parameters")
- }
-
- // use named receiver type if available (for better error messages)
- var recvTyp Type = ityp
- if def != nil {
- recvTyp = def
- }
- sig.recv = NewVar(name.Pos(), check.pkg, "", recvTyp)
-
- m := NewFunc(name.Pos(), check.pkg, name.Name, sig)
- check.recordDef(name, m)
- ityp.methods = append(ityp.methods, m)
- } else {
- // We have an embedded type. completeInterface will
- // eventually verify that we have an interface.
- ityp.embeddeds = append(ityp.embeddeds, check.typ(f.Type))
- check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
- }
- }
-
- // type constraints
- ityp.types = _NewSum(check.collectTypeConstraints(iface.Pos(), types))
-
- if len(ityp.methods) == 0 && ityp.types == nil && len(ityp.embeddeds) == 0 {
- // empty interface
- ityp.allMethods = markComplete
- return
- }
-
- // sort for API stability
- sortMethods(ityp.methods)
- sortTypes(ityp.embeddeds)
-
- check.later(func() { check.completeInterface(iface.Pos(), ityp) })
-}
-
-func (check *Checker) completeInterface(pos token.Pos, ityp *Interface) {
- if ityp.allMethods != nil {
- return
- }
-
- // completeInterface may be called via the LookupFieldOrMethod,
- // MissingMethod, Identical, or IdenticalIgnoreTags external API
- // in which case check will be nil. In this case, type-checking
- // must be finished and all interfaces should have been completed.
- if check == nil {
- panic("internal error: incomplete interface")
- }
-
- if trace {
- // Types don't generally have position information.
- // If we don't have a valid pos provided, try to use
- // one close enough.
- if !pos.IsValid() && len(ityp.methods) > 0 {
- pos = ityp.methods[0].pos
- }
-
- check.trace(pos, "complete %s", ityp)
- check.indent++
- defer func() {
- check.indent--
- check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
- }()
- }
-
- // An infinitely expanding interface (due to a cycle) is detected
- // elsewhere (Checker.validType), so here we simply assume we only
- // have valid interfaces. Mark the interface as complete to avoid
- // infinite recursion if the validType check occurs later for some
- // reason.
- ityp.allMethods = markComplete
-
- // Methods of embedded interfaces are collected unchanged; i.e., the identity
- // of a method I.m's Func Object of an interface I is the same as that of
- // the method m in an interface that embeds interface I. On the other hand,
- // if a method is embedded via multiple overlapping embedded interfaces, we
- // don't provide a guarantee which "original m" got chosen for the embedding
- // interface. See also issue #34421.
- //
- // If we don't care to provide this identity guarantee anymore, instead of
- // reusing the original method in embeddings, we can clone the method's Func
- // Object and give it the position of a corresponding embedded interface. Then
- // we can get rid of the mpos map below and simply use the cloned method's
- // position.
-
- var seen objset
- var methods []*Func
- mpos := make(map[*Func]token.Pos) // method specification or method embedding position, for good error messages
- addMethod := func(pos token.Pos, m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- mpos[m] = pos
- case explicit:
- check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
- check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
- default:
- // We have a duplicate method name in an embedded (not explicitly declared) method.
- // Check method signatures after all types are computed (issue #33656).
- // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
- // error here as well (even though we could do it eagerly) because it's the same
- // error message.
- check.later(func() {
- if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
- check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
- check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
- }
- })
- }
- }
-
- for _, m := range ityp.methods {
- addMethod(m.pos, m, true)
- }
-
- // collect types
- allTypes := ityp.types
-
- posList := check.posMap[ityp]
- for i, typ := range ityp.embeddeds {
- pos := posList[i] // embedding position
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- var format string
- if _, ok := utyp.(*_TypeParam); ok {
- format = "%s is a type parameter, not an interface"
- } else {
- format = "%s is not an interface"
- }
- // TODO: correct error code.
- check.errorf(atPos(pos), _InvalidIfaceEmbed, format, typ)
- }
- continue
- }
- check.completeInterface(pos, etyp)
- for _, m := range etyp.allMethods {
- addMethod(pos, m, false) // use embedding position pos rather than m.pos
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- if methods != nil {
- sort.Sort(byUniqueMethodName(methods))
- ityp.allMethods = methods
- }
- ityp.allTypes = allTypes
-}
-
-// intersect computes the intersection of the types x and y.
-// Note: A incomming nil type stands for the top type. A top
-// type result is returned as nil.
-func intersect(x, y Type) (r Type) {
- defer func() {
- if r == theTop {
- r = nil
- }
- }()
-
- switch {
- case x == theBottom || y == theBottom:
- return theBottom
- case x == nil || x == theTop:
- return y
- case y == nil || x == theTop:
- return x
- }
-
- xtypes := unpackType(x)
- ytypes := unpackType(y)
- // Compute the list rtypes which includes only
- // types that are in both xtypes and ytypes.
- // Quadratic algorithm, but good enough for now.
- // TODO(gri) fix this
- var rtypes []Type
- for _, x := range xtypes {
- if includes(ytypes, x) {
- rtypes = append(rtypes, x)
- }
- }
-
- if rtypes == nil {
- return theBottom
- }
- return _NewSum(rtypes)
-}
-
-func sortTypes(list []Type) {
- sort.Stable(byUniqueTypeName(list))
-}
-
-// byUniqueTypeName named type lists can be sorted by their unique type names.
-type byUniqueTypeName []Type
-
-func (a byUniqueTypeName) Len() int { return len(a) }
-func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
-func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func sortName(t Type) string {
- if named := asNamed(t); named != nil {
- return named.obj.Id()
- }
- return ""
-}
-
-func sortMethods(list []*Func) {
- sort.Sort(byUniqueMethodName(list))
-}
-
-func assertSortedMethods(list []*Func) {
- if !debug {
- panic("internal error: assertSortedMethods called outside debug mode")
- }
- if !sort.IsSorted(byUniqueMethodName(list)) {
- panic("internal error: methods not sorted")
- }
-}
-
-// byUniqueMethodName method lists can be sorted by their unique method names.
-type byUniqueMethodName []*Func
-
-func (a byUniqueMethodName) Len() int { return len(a) }
-func (a byUniqueMethodName) Less(i, j int) bool { return a[i].Id() < a[j].Id() }
-func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (check *Checker) tag(t *ast.BasicLit) string {
- if t != nil {
- if t.Kind == token.STRING {
- if val, err := strconv.Unquote(t.Value); err == nil {
- return val
- }
- }
- check.invalidAST(t, "incorrect tag syntax: %q", t.Value)
- }
- return ""
-}
-
-func (check *Checker) structType(styp *Struct, e *ast.StructType) {
- list := e.Fields
- if list == nil {
- return
- }
-
- // struct fields and tags
- var fields []*Var
- var tags []string
-
- // for double-declaration checks
- var fset objset
-
- // current field typ and tag
- var typ Type
- var tag string
- add := func(ident *ast.Ident, embedded bool, pos token.Pos) {
- if tag != "" && tags == nil {
- tags = make([]string, len(fields))
- }
- if tags != nil {
- tags = append(tags, tag)
- }
-
- name := ident.Name
- fld := NewField(pos, check.pkg, name, typ, embedded)
- // spec: "Within a struct, non-blank field names must be unique."
- if name == "_" || check.declareInSet(&fset, pos, fld) {
- fields = append(fields, fld)
- check.recordDef(ident, fld)
- }
- }
-
- // addInvalid adds an embedded field of invalid type to the struct for
- // fields with errors; this keeps the number of struct fields in sync
- // with the source as long as the fields are _ or have different names
- // (issue #25627).
- addInvalid := func(ident *ast.Ident, pos token.Pos) {
- typ = Typ[Invalid]
- tag = ""
- add(ident, true, pos)
- }
-
- for _, f := range list.List {
- typ = check.varType(f.Type)
- tag = check.tag(f.Tag)
- if len(f.Names) > 0 {
- // named fields
- for _, name := range f.Names {
- add(name, false, name.Pos())
- }
- } else {
- // embedded field
- // spec: "An embedded type must be specified as a type name T or as a
- // pointer to a non-interface type name *T, and T itself may not be a
- // pointer type."
- pos := f.Type.Pos()
- name := embeddedFieldIdent(f.Type)
- if name == nil {
- // TODO(rFindley): using invalidAST here causes test failures (all
- // errors should have codes). Clean this up.
- check.errorf(f.Type, _Todo, "invalid AST: embedded field type %s has no name", f.Type)
- name = ast.NewIdent("_")
- name.NamePos = pos
- addInvalid(name, pos)
- continue
- }
- add(name, true, pos)
-
- // Because we have a name, typ must be of the form T or *T, where T is the name
- // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
- // We must delay this check to the end because we don't want to instantiate
- // (via under(t)) a possibly incomplete type.
-
- // for use in the closure below
- embeddedTyp := typ
- embeddedPos := f.Type
-
- check.later(func() {
- t, isPtr := deref(embeddedTyp)
- switch t := optype(t).(type) {
- case *Basic:
- if t == Typ[Invalid] {
- // error was reported before
- return
- }
- // unsafe.Pointer is treated like a regular pointer
- if t.kind == UnsafePointer {
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer")
- }
- case *Pointer:
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer")
- case *Interface:
- if isPtr {
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface")
- }
- }
- })
- }
- }
-
- styp.fields = fields
- styp.tags = tags
-}
-
-func embeddedFieldIdent(e ast.Expr) *ast.Ident {
- switch e := e.(type) {
- case *ast.Ident:
- return e
- case *ast.StarExpr:
- // *T is valid, but **T is not
- if _, ok := e.X.(*ast.StarExpr); !ok {
- return embeddedFieldIdent(e.X)
- }
- case *ast.SelectorExpr:
- return e.Sel
- case *ast.IndexExpr:
- return embeddedFieldIdent(e.X)
- }
- return nil // invalid embedded field
-}
-
-func (check *Checker) collectTypeConstraints(pos token.Pos, types []ast.Expr) []Type {
- list := make([]Type, 0, len(types)) // assume all types are correct
- for _, texpr := range types {
- if texpr == nil {
- check.invalidAST(atPos(pos), "missing type constraint")
- continue
- }
- list = append(list, check.varType(texpr))
- }
-
- // Ensure that each type is only present once in the type list. Types may be
- // interfaces, which may not be complete yet. It's ok to do this check at the
- // end because it's not a requirement for correctness of the code.
- // Note: This is a quadratic algorithm, but type lists tend to be short.
- check.later(func() {
- for i, t := range list {
- if t := asInterface(t); t != nil {
- check.completeInterface(types[i].Pos(), t)
- }
- if includes(list[:i], t) {
- check.softErrorf(types[i], _Todo, "duplicate type %s in type list", t)
- }
- }
- })
-
- return list
-}
-
-// includes reports whether typ is in list.
-func includes(list []Type, typ Type) bool {
- for _, e := range list {
- if Identical(typ, e) {
- return true
- }
- }
- return false
-}
diff --git a/src/go/types/unify.go b/src/go/types/unify.go
index db06e21cf7..90a5cf7c72 100644
--- a/src/go/types/unify.go
+++ b/src/go/types/unify.go
@@ -8,7 +8,7 @@ package types
import (
"bytes"
- "go/token"
+ "fmt"
"sort"
)
@@ -38,7 +38,6 @@ import (
// and the respective types inferred for each type parameter.
// A unifier is created by calling newUnifier.
type unifier struct {
- check *Checker
exact bool
x, y tparamsList // x and y must initialized via tparamsList.init
types []Type // inferred types, shared by x and y
@@ -49,8 +48,8 @@ type unifier struct {
// exactly. If exact is not set, a named type's underlying type
// is considered if unification would fail otherwise, and the
// direction of channels is ignored.
-func newUnifier(check *Checker, exact bool) *unifier {
- u := &unifier{check: check, exact: exact}
+func newUnifier(exact bool) *unifier {
+ u := &unifier{exact: exact}
u.x.unifier = u
u.y.unifier = u
return u
@@ -100,7 +99,7 @@ func (d *tparamsList) init(tparams []*TypeName) {
}
if debug {
for i, tpar := range tparams {
- assert(i == tpar.typ.(*_TypeParam).index)
+ assert(i == tpar.typ.(*TypeParam).index)
}
}
d.tparams = tparams
@@ -148,10 +147,17 @@ func (u *unifier) join(i, j int) bool {
// If typ is a type parameter of d, index returns the type parameter index.
// Otherwise, the result is < 0.
func (d *tparamsList) index(typ Type) int {
- if t, ok := typ.(*_TypeParam); ok {
- if i := t.index; i < len(d.tparams) && d.tparams[i].typ == t {
- return i
- }
+ if tpar, ok := typ.(*TypeParam); ok {
+ return tparamIndex(d.tparams, tpar)
+ }
+ return -1
+}
+
+// If tpar is a type parameter in list, tparamIndex returns the type parameter index.
+// Otherwise, the result is < 0. tpar must not be nil.
+func tparamIndex(list []*TypeName, tpar *TypeParam) int {
+ if i := tpar.index; i < len(list) && list[i].typ == tpar {
+ return i
}
return -1
}
@@ -352,25 +358,21 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
u.nify(x.results, y.results, p)
}
- case *_Sum:
- // This should not happen with the current internal use of sum types.
- panic("type inference across sum types not implemented")
+ case *Union:
+ panic("unimplemented: unification with type sets described by types")
case *Interface:
// Two interface types are identical if they have the same set of methods with
// the same names and identical function types. Lower-case method names from
// different packages are always different. The order of the methods is irrelevant.
if y, ok := y.(*Interface); ok {
- // If identical0 is called (indirectly) via an external API entry point
- // (such as Identical, IdenticalIgnoreTags, etc.), check is nil. But in
- // that case, interfaces are expected to be complete and lazy completion
- // here is not needed.
- if u.check != nil {
- u.check.completeInterface(token.NoPos, x)
- u.check.completeInterface(token.NoPos, y)
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if !Identical(xset.types, yset.types) {
+ return false
}
- a := x.allMethods
- b := y.allMethods
+ a := xset.methods
+ b := yset.methods
if len(a) == len(b) {
// Interface types are the only types where cycles can occur
// that are not "terminated" via named types; and such cycles
@@ -448,21 +450,17 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
}
}
- case *_TypeParam:
+ case *TypeParam:
// Two type parameters (which are not part of the type parameters of the
// enclosing type as those are handled in the beginning of this function)
// are identical if they originate in the same declaration.
return x == y
- // case *instance:
- // unreachable since types are expanded
-
case nil:
// avoid a crash in case of nil type
default:
- u.check.dump("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams)
- unreachable()
+ panic(fmt.Sprintf("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
}
return false
diff --git a/src/go/types/union.go b/src/go/types/union.go
new file mode 100644
index 0000000000..a56f9d29f3
--- /dev/null
+++ b/src/go/types/union.go
@@ -0,0 +1,271 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Union represents a union of terms.
+type Union struct {
+ terms []*term
+}
+
+// NewUnion returns a new Union type with the given terms (types[i], tilde[i]).
+// The lengths of both arguments must match. An empty union represents the set
+// of no types.
+func NewUnion(types []Type, tilde []bool) *Union { return newUnion(types, tilde) }
+
+func (u *Union) IsEmpty() bool { return len(u.terms) == 0 }
+func (u *Union) NumTerms() int { return len(u.terms) }
+func (u *Union) Term(i int) (Type, bool) { t := u.terms[i]; return t.typ, t.tilde }
+
+func (u *Union) Underlying() Type { return u }
+func (u *Union) String() string { return TypeString(u, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+var emptyUnion = new(Union)
+
+func newUnion(types []Type, tilde []bool) *Union {
+ assert(len(types) == len(tilde))
+ if len(types) == 0 {
+ return emptyUnion
+ }
+ t := new(Union)
+ t.terms = make([]*term, len(types))
+ for i, typ := range types {
+ t.terms[i] = &term{tilde[i], typ}
+ }
+ return t
+}
+
+// is reports whether f returns true for all terms of u.
+func (u *Union) is(f func(*term) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for _, t := range u.terms {
+ if !f(t) {
+ return false
+ }
+ }
+ return true
+}
+
+// underIs reports whether f returned true for the underlying types of all terms of u.
+func (u *Union) underIs(f func(Type) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for _, t := range u.terms {
+ if !f(under(t.typ)) {
+ return false
+ }
+ }
+ return true
+}
+
+func parseUnion(check *Checker, tlist []ast.Expr) Type {
+ var types []Type
+ var tilde []bool
+ for _, x := range tlist {
+ t, d := parseTilde(check, x)
+ if len(tlist) == 1 && !d {
+ return t // single type
+ }
+ types = append(types, t)
+ tilde = append(tilde, d)
+ }
+
+ // Ensure that each type is only present once in the type list.
+ // It's ok to do this check later because it's not a requirement
+ // for correctness of the code.
+ // Note: This is a quadratic algorithm, but unions tend to be short.
+ check.later(func() {
+ for i, t := range types {
+ t := expand(t)
+ if t == Typ[Invalid] {
+ continue
+ }
+
+ x := tlist[i]
+ pos := x.Pos()
+ // We may not know the position of x if it was a typechecker-
+ // introduced ~T term for a type list entry T. Use the position
+ // of T instead.
+ // TODO(rfindley) remove this test once we don't support type lists anymore
+ if !pos.IsValid() {
+ if op, _ := x.(*ast.UnaryExpr); op != nil {
+ pos = op.X.Pos()
+ }
+ }
+
+ u := under(t)
+ f, _ := u.(*Interface)
+ if tilde[i] {
+ if f != nil {
+ check.errorf(x, _Todo, "invalid use of ~ (%s is an interface)", t)
+ continue // don't report another error for t
+ }
+
+ if !Identical(u, t) {
+ check.errorf(x, _Todo, "invalid use of ~ (underlying type of %s is %s)", t, u)
+ continue // don't report another error for t
+ }
+ }
+
+ // Stand-alone embedded interfaces are ok and are handled by the single-type case
+ // in the beginning. Embedded interfaces with tilde are excluded above. If we reach
+ // here, we must have at least two terms in the union.
+ if f != nil && !f.typeSet().IsTypeSet() {
+ check.errorf(atPos(pos), _Todo, "cannot use %s in union (interface contains methods)", t)
+ continue // don't report another error for t
+ }
+
+ // Complain about duplicate entries a|a, but also a|~a, and ~a|~a.
+ // TODO(gri) We should also exclude myint|~int since myint is included in ~int.
+ if includes(types[:i], t) {
+ // TODO(rfindley) this currently doesn't print the ~ if present
+ check.softErrorf(atPos(pos), _Todo, "duplicate term %s in union element", t)
+ }
+ }
+ })
+
+ return newUnion(types, tilde)
+}
+
+func parseTilde(check *Checker, x ast.Expr) (typ Type, tilde bool) {
+ if op, _ := x.(*ast.UnaryExpr); op != nil && op.Op == token.TILDE {
+ x = op.X
+ tilde = true
+ }
+ typ = check.anyType(x)
+ // embedding stand-alone type parameters is not permitted (issue #47127).
+ if _, ok := under(typ).(*TypeParam); ok {
+ check.error(x, _Todo, "cannot embed a type parameter")
+ typ = Typ[Invalid]
+ }
+ return
+}
+
+// intersect computes the intersection of the types x and y,
+// A nil type stands for the set of all types; an empty union
+// stands for the set of no types.
+func intersect(x, y Type) (r Type) {
+ // If one of the types is nil (no restrictions)
+ // the result is the other type.
+ switch {
+ case x == nil:
+ return y
+ case y == nil:
+ return x
+ }
+
+ // Compute the terms which are in both x and y.
+ // TODO(gri) This is not correct as it may not always compute
+ // the "largest" intersection. For instance, for
+ // x = myInt|~int, y = ~int
+ // we get the result myInt but we should get ~int.
+ xu, _ := x.(*Union)
+ yu, _ := y.(*Union)
+ switch {
+ case xu != nil && yu != nil:
+ return &Union{intersectTerms(xu.terms, yu.terms)}
+
+ case xu != nil:
+ if r, _ := xu.intersect(y, false); r != nil {
+ return y
+ }
+
+ case yu != nil:
+ if r, _ := yu.intersect(x, false); r != nil {
+ return x
+ }
+
+ default: // xu == nil && yu == nil
+ if Identical(x, y) {
+ return x
+ }
+ }
+
+ return emptyUnion
+}
+
+// includes reports whether typ is in list.
+func includes(list []Type, typ Type) bool {
+ for _, e := range list {
+ if Identical(typ, e) {
+ return true
+ }
+ }
+ return false
+}
+
+// intersect computes the intersection of the union u and term (y, yt)
+// and returns the intersection term, if any. Otherwise the result is
+// (nil, false).
+// TODO(gri) this needs to cleaned up/removed once we switch to lazy
+// union type set computation.
+func (u *Union) intersect(y Type, yt bool) (Type, bool) {
+ under_y := under(y)
+ for _, x := range u.terms {
+ xt := x.tilde
+ // determine which types xx, yy to compare
+ xx := x.typ
+ if yt {
+ xx = under(xx)
+ }
+ yy := y
+ if xt {
+ yy = under_y
+ }
+ if Identical(xx, yy) {
+ // T ∩ T = T
+ // T ∩ ~t = T
+ // ~t ∩ T = T
+ // ~t ∩ ~t = ~t
+ return xx, xt && yt
+ }
+ }
+ return nil, false
+}
+
+func identicalTerms(list1, list2 []*term) bool {
+ if len(list1) != len(list2) {
+ return false
+ }
+ // Every term in list1 must be in list2.
+ // Quadratic algorithm, but probably good enough for now.
+ // TODO(gri) we need a fast quick type ID/hash for all types.
+L:
+ for _, x := range list1 {
+ for _, y := range list2 {
+ if x.equal(y) {
+ continue L // x is in list2
+ }
+ }
+ return false
+ }
+ return true
+}
+
+func intersectTerms(list1, list2 []*term) (list []*term) {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ for _, x := range list1 {
+ for _, y := range list2 {
+ if r := x.intersect(y); r != nil {
+ list = append(list, r)
+ }
+ }
+ }
+ return
+}
diff --git a/src/go/types/universe.go b/src/go/types/universe.go
index d7feb2c609..83c54c8cd3 100644
--- a/src/go/types/universe.go
+++ b/src/go/types/universe.go
@@ -8,7 +8,6 @@ package types
import (
"go/constant"
- "go/internal/typeparams"
"go/token"
"strings"
)
@@ -22,11 +21,12 @@ var Universe *Scope
var Unsafe *Package
var (
- universeIota *Const
- universeByte *Basic // uint8 alias, but has name "byte"
- universeRune *Basic // int32 alias, but has name "rune"
- universeAny *Interface
- universeError *Named
+ universeIota Object
+ universeByte Type // uint8 alias, but has name "byte"
+ universeRune Type // int32 alias, but has name "rune"
+ universeAny Object
+ universeError Type
+ universeComparable Object
)
// Typ contains the predeclared *Basic types indexed by their
@@ -79,20 +79,30 @@ func defPredeclaredTypes() {
def(NewTypeName(token.NoPos, nil, t.name, t))
}
- // any
- // (Predeclared and entered into universe scope so we do all the
- // usual checks; but removed again from scope later since it's
- // only visible as constraint in a type parameter list.)
+ // type any = interface{}
def(NewTypeName(token.NoPos, nil, "any", &emptyInterface))
- // Error has a nil package in its qualified name since it is in no package
+ // type error interface{ Error() string }
{
+ obj := NewTypeName(token.NoPos, nil, "error", nil)
+ obj.setColor(black)
res := NewVar(token.NoPos, nil, "", Typ[String])
- sig := &Signature{results: NewTuple(res)}
+ sig := NewSignature(nil, nil, NewTuple(res), false)
err := NewFunc(token.NoPos, nil, "Error", sig)
- typ := &Named{underlying: NewInterfaceType([]*Func{err}, nil).Complete()}
+ ityp := &Interface{obj, []*Func{err}, nil, nil, true, nil}
+ computeTypeSet(nil, token.NoPos, ityp) // prevent races due to lazy computation of tset
+ typ := NewNamed(obj, ityp, nil)
sig.recv = NewVar(token.NoPos, nil, "", typ)
- def(NewTypeName(token.NoPos, nil, "error", typ))
+ def(obj)
+ }
+
+ // type comparable interface{ /* type set marked comparable */ }
+ {
+ obj := NewTypeName(token.NoPos, nil, "comparable", nil)
+ obj.setColor(black)
+ ityp := &Interface{obj, nil, nil, nil, true, &_TypeSet{true, nil, nil}}
+ NewNamed(obj, ityp, nil)
+ def(obj)
}
}
@@ -202,33 +212,6 @@ func DefPredeclaredTestFuncs() {
def(newBuiltin(_Trace))
}
-func defPredeclaredComparable() {
- // The "comparable" interface can be imagined as defined like
- //
- // type comparable interface {
- // == () untyped bool
- // != () untyped bool
- // }
- //
- // == and != cannot be user-declared but we can declare
- // a magic method == and check for its presence when needed.
-
- // Define interface { == () }. We don't care about the signature
- // for == so leave it empty except for the receiver, which is
- // set up later to match the usual interface method assumptions.
- sig := new(Signature)
- eql := NewFunc(token.NoPos, nil, "==", sig)
- iface := NewInterfaceType([]*Func{eql}, nil).Complete()
-
- // set up the defined type for the interface
- obj := NewTypeName(token.NoPos, nil, "comparable", nil)
- named := NewNamed(obj, iface, nil)
- obj.color_ = black
- sig.recv = NewVar(token.NoPos, nil, "", named) // complete == signature
-
- def(obj)
-}
-
func init() {
Universe = NewScope(nil, token.NoPos, token.NoPos, "universe")
Unsafe = NewPackage("unsafe", "unsafe")
@@ -238,18 +221,13 @@ func init() {
defPredeclaredConsts()
defPredeclaredNil()
defPredeclaredFuncs()
- if typeparams.Enabled {
- defPredeclaredComparable()
- }
-
- universeIota = Universe.Lookup("iota").(*Const)
- universeByte = Universe.Lookup("byte").(*TypeName).typ.(*Basic)
- universeRune = Universe.Lookup("rune").(*TypeName).typ.(*Basic)
- universeAny = Universe.Lookup("any").(*TypeName).typ.(*Interface)
- universeError = Universe.Lookup("error").(*TypeName).typ.(*Named)
- // "any" is only visible as constraint in a type parameter list
- delete(Universe.elems, "any")
+ universeIota = Universe.Lookup("iota")
+ universeByte = Universe.Lookup("byte").Type()
+ universeRune = Universe.Lookup("rune").Type()
+ universeAny = Universe.Lookup("any")
+ universeError = Universe.Lookup("error").Type()
+ universeComparable = Universe.Lookup("comparable")
}
// Objects with names containing blanks are internal and not entered into
diff --git a/src/internal/abi/abi.go b/src/internal/abi/abi.go
index aaff9cece3..eadff248d9 100644
--- a/src/internal/abi/abi.go
+++ b/src/internal/abi/abi.go
@@ -4,7 +4,10 @@
package abi
-import "unsafe"
+import (
+ "internal/goarch"
+ "unsafe"
+)
// RegArgs is a struct that has space for each argument
// and return value register on the current architecture.
@@ -33,6 +36,46 @@ type RegArgs struct {
ReturnIsPtr IntArgRegBitmap
}
+// IntRegArgAddr returns a pointer inside of r.Ints[reg] that is appropriately
+// offset for an argument of size argSize.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+//
+// This method is a helper for dealing with the endianness of different CPU
+// architectures, since sub-word-sized arguments in big endian architectures
+// need to be "aligned" to the upper edge of the register to be interpreted
+// by the CPU correctly.
+func (r *RegArgs) IntRegArgAddr(reg int, argSize uintptr) unsafe.Pointer {
+ if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
+ panic("invalid argSize")
+ }
+ offset := uintptr(0)
+ if goarch.BigEndian {
+ offset = goarch.PtrSize - argSize
+ }
+ return unsafe.Pointer(uintptr(unsafe.Pointer(&r.Ints[reg])) + offset)
+}
+
+// FloatRegArgAddr returns a pointer inside of r.Floats[reg] that is appropriately
+// offset for an argument of size argSize.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+//
+// This method is a helper for dealing with the endianness of different CPU
+// architectures, since sub-word-sized arguments in big endian architectures
+// need to be "aligned" to the upper edge of the register to be interpreted
+// by the CPU correctly.
+func (r *RegArgs) FloatRegArgAddr(reg int, argSize uintptr) unsafe.Pointer {
+ if argSize > EffectiveFloatRegSize || argSize == 0 || argSize&(argSize-1) != 0 {
+ panic("invalid argSize")
+ }
+ offset := uintptr(0)
+ if goarch.BigEndian {
+ offset = EffectiveFloatRegSize - argSize
+ }
+ return unsafe.Pointer(uintptr(unsafe.Pointer(&r.Floats[reg])) + offset)
+}
+
// IntArgRegBitmap is a bitmap large enough to hold one bit per
// integer argument/return register.
type IntArgRegBitmap [(IntArgRegs + 7) / 8]uint8
diff --git a/src/internal/abi/abi_amd64.go b/src/internal/abi/abi_amd64.go
index aff71f6a58..d3c5678223 100644
--- a/src/internal/abi/abi_amd64.go
+++ b/src/internal/abi/abi_amd64.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.regabireflect
-// +build goexperiment.regabireflect
-
package abi
const (
diff --git a/src/internal/abi/abi_arm64.go b/src/internal/abi/abi_arm64.go
new file mode 100644
index 0000000000..7544d7506e
--- /dev/null
+++ b/src/internal/abi/abi_arm64.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.regabireflect
+// +build goexperiment.regabireflect
+
+package abi
+
+const (
+ // See abi_generic.go.
+
+ // R0 - R15.
+ IntArgRegs = 16
+
+ // F0 - F15.
+ FloatArgRegs = 16
+
+ EffectiveFloatRegSize = 8
+)
diff --git a/src/internal/abi/abi_generic.go b/src/internal/abi/abi_generic.go
index 69400f930f..e8f94f805f 100644
--- a/src/internal/abi/abi_generic.go
+++ b/src/internal/abi/abi_generic.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !goexperiment.regabireflect
-// +build !goexperiment.regabireflect
+//go:build !goexperiment.regabireflect && !amd64
+// +build !goexperiment.regabireflect,!amd64
package abi
diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go
index 9a60253aab..384f2f96af 100644
--- a/src/internal/buildcfg/exp.go
+++ b/src/internal/buildcfg/exp.go
@@ -46,13 +46,11 @@ var FramePointerEnabled = GOARCH == "amd64" || GOARCH == "arm64"
//
// TODO(mdempsky): Move to internal/goexperiment.
func ParseGOEXPERIMENT(goos, goarch, goexp string) (flags, baseline goexperiment.Flags, err error) {
- regabiSupported := goarch == "amd64" && (goos == "android" || goos == "linux" || goos == "darwin" || goos == "windows")
+ regabiSupported := goarch == "amd64" || goarch == "arm64"
baseline = goexperiment.Flags{
RegabiWrappers: regabiSupported,
- RegabiG: regabiSupported,
RegabiReflect: regabiSupported,
- RegabiDefer: regabiSupported,
RegabiArgs: regabiSupported,
}
@@ -78,9 +76,7 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (flags, baseline goexperiment
// do the right thing.
names["regabi"] = func(v bool) {
flags.RegabiWrappers = v
- flags.RegabiG = v
flags.RegabiReflect = v
- flags.RegabiDefer = v
flags.RegabiArgs = v
}
@@ -109,20 +105,20 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (flags, baseline goexperiment
}
}
- // regabi is only supported on amd64.
- if goarch != "amd64" {
- flags.RegabiWrappers = false
- flags.RegabiG = false
+ // regabi is always enabled on amd64.
+ if goarch == "amd64" {
+ flags.RegabiWrappers = true
+ flags.RegabiReflect = true
+ flags.RegabiArgs = true
+ }
+ // regabi is only supported on amd64 and arm64.
+ if goarch != "amd64" && goarch != "arm64" {
flags.RegabiReflect = false
- flags.RegabiDefer = false
flags.RegabiArgs = false
}
// Check regabi dependencies.
- if flags.RegabiG && !flags.RegabiWrappers {
- err = fmt.Errorf("GOEXPERIMENT regabig requires regabiwrappers")
- }
- if flags.RegabiArgs && !(flags.RegabiWrappers && flags.RegabiG && flags.RegabiReflect && flags.RegabiDefer) {
- err = fmt.Errorf("GOEXPERIMENT regabiargs requires regabiwrappers,regabig,regabireflect,regabidefer")
+ if flags.RegabiArgs && !(flags.RegabiWrappers && flags.RegabiReflect) {
+ err = fmt.Errorf("GOEXPERIMENT regabiargs requires regabiwrappers,regabireflect")
}
return
}
diff --git a/src/internal/bytealg/compare_amd64.s b/src/internal/bytealg/compare_amd64.s
index 8295acb03a..4ccaca5e87 100644
--- a/src/internal/bytealg/compare_amd64.s
+++ b/src/internal/bytealg/compare_amd64.s
@@ -6,7 +6,6 @@
#include "textflag.h"
TEXT ·Compare<ABIInternal>(SB),NOSPLIT,$0-56
-#ifdef GOEXPERIMENT_regabiargs
// AX = a_base (want in SI)
// BX = a_len (want in BX)
// CX = a_cap (unused)
@@ -15,17 +14,9 @@ TEXT ·Compare<ABIInternal>(SB),NOSPLIT,$0-56
// R8 = b_cap (unused)
MOVQ SI, DX
MOVQ AX, SI
-#else
- MOVQ a_base+0(FP), SI
- MOVQ a_len+8(FP), BX
- MOVQ b_base+24(FP), DI
- MOVQ b_len+32(FP), DX
- LEAQ ret+48(FP), R9
-#endif
JMP cmpbody<>(SB)
TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT,$0-40
-#ifdef GOEXPERIMENT_regabiargs
// AX = a_base (want in SI)
// BX = a_len (want in BX)
// CX = b_base (want in DI)
@@ -33,13 +24,6 @@ TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT,$0-40
MOVQ AX, SI
MOVQ DI, DX
MOVQ CX, DI
-#else
- MOVQ a_base+0(FP), SI
- MOVQ a_len+8(FP), BX
- MOVQ b_base+16(FP), DI
- MOVQ b_len+24(FP), DX
- LEAQ ret+32(FP), R9
-#endif
JMP cmpbody<>(SB)
// input:
@@ -47,12 +31,8 @@ TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT,$0-40
// DI = b
// BX = alen
// DX = blen
-#ifndef GOEXPERIMENT_regabiargs
-// R9 = address of output word (stores -1/0/1 here)
-#else
// output:
// AX = output (-1/0/1)
-#endif
TEXT cmpbody<>(SB),NOSPLIT,$0-0
CMPQ SI, DI
JEQ allsame
@@ -100,9 +80,6 @@ diff16:
CMPB CX, (DI)(BX*1)
SETHI AX
LEAQ -1(AX*2), AX // convert 1/0 to +1/-1
-#ifndef GOEXPERIMENT_regabiargs
- MOVQ AX, (R9)
-#endif
RET
// 0 through 16 bytes left, alen>=8, blen>=8
@@ -128,9 +105,6 @@ diff8:
SHRQ CX, AX // move a's bit to bottom
ANDQ $1, AX // mask bit
LEAQ -1(AX*2), AX // 1/0 => +1/-1
-#ifndef GOEXPERIMENT_regabiargs
- MOVQ AX, (R9)
-#endif
RET
// 0-7 bytes in common
@@ -169,9 +143,6 @@ di_finish:
SHRQ CX, SI // move a's bit to bottom
ANDQ $1, SI // mask bit
LEAQ -1(SI*2), AX // 1/0 => +1/-1
-#ifndef GOEXPERIMENT_regabiargs
- MOVQ AX, (R9)
-#endif
RET
allsame:
@@ -181,9 +152,6 @@ allsame:
SETGT AX // 1 if alen > blen
SETEQ CX // 1 if alen == blen
LEAQ -1(CX)(AX*2), AX // 1,0,-1 result
-#ifndef GOEXPERIMENT_regabiargs
- MOVQ AX, (R9)
-#endif
RET
// this works for >= 64 bytes of data.
diff --git a/src/internal/bytealg/compare_arm64.s b/src/internal/bytealg/compare_arm64.s
index 56d56f241e..5a80207258 100644
--- a/src/internal/bytealg/compare_arm64.s
+++ b/src/internal/bytealg/compare_arm64.s
@@ -5,65 +5,88 @@
#include "go_asm.h"
#include "textflag.h"
-TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56
- MOVD a_base+0(FP), R2
- MOVD a_len+8(FP), R0
- MOVD b_base+24(FP), R3
- MOVD b_len+32(FP), R1
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
+#ifdef GOEXPERIMENT_regabiargs
+ // R0 = a_base (want in R0)
+ // R1 = a_len (want in R1)
+ // R2 = a_cap (unused)
+ // R3 = b_base (want in R2)
+ // R4 = b_len (want in R3)
+ // R5 = b_cap (unused)
+ MOVD R3, R2
+ MOVD R4, R3
+#else
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+24(FP), R2
+ MOVD b_len+32(FP), R3
MOVD $ret+48(FP), R7
+#endif
B cmpbody<>(SB)
-TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40
- MOVD a_base+0(FP), R2
- MOVD a_len+8(FP), R0
- MOVD b_base+16(FP), R3
- MOVD b_len+24(FP), R1
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+#ifdef GOEXPERIMENT_regabiargs
+ // R0 = a_base
+ // R1 = a_len
+ // R2 = b_base
+ // R3 = b_len
+#else
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+16(FP), R2
+ MOVD b_len+24(FP), R3
MOVD $ret+32(FP), R7
+#endif
B cmpbody<>(SB)
// On entry:
-// R0 is the length of a
-// R1 is the length of b
-// R2 points to the start of a
-// R3 points to the start of b
+// R0 points to the start of a
+// R1 is the length of a
+// R2 points to the start of b
+// R3 is the length of b
+#ifndef GOEXPERIMENT_regabiargs
// R7 points to return value (-1/0/1 will be written here)
+#endif
//
// On exit:
+#ifdef GOEXPERIMENT_regabiargs
+// R0 is the result
+#endif
// R4, R5, R6, R8, R9 and R10 are clobbered
TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
- CMP R2, R3
+ CMP R0, R2
BEQ samebytes // same starting pointers; compare lengths
- CMP R0, R1
- CSEL LT, R1, R0, R6 // R6 is min(R0, R1)
+ CMP R1, R3
+ CSEL LT, R3, R1, R6 // R6 is min(R1, R3)
CBZ R6, samebytes
BIC $0xf, R6, R10
CBZ R10, small // length < 16
- ADD R2, R10 // end of chunk16
+ ADD R0, R10 // end of chunk16
// length >= 16
chunk16_loop:
- LDP.P 16(R2), (R4, R8)
- LDP.P 16(R3), (R5, R9)
+ LDP.P 16(R0), (R4, R8)
+ LDP.P 16(R2), (R5, R9)
CMP R4, R5
BNE cmp
CMP R8, R9
BNE cmpnext
- CMP R10, R2
+ CMP R10, R0
BNE chunk16_loop
AND $0xf, R6, R6
CBZ R6, samebytes
SUBS $8, R6
BLT tail
// the length of tail > 8 bytes
- MOVD.P 8(R2), R4
- MOVD.P 8(R3), R5
+ MOVD.P 8(R0), R4
+ MOVD.P 8(R2), R5
CMP R4, R5
BNE cmp
SUB $8, R6
// compare last 8 bytes
tail:
- MOVD (R2)(R6), R4
- MOVD (R3)(R6), R5
+ MOVD (R0)(R6), R4
+ MOVD (R2)(R6), R5
CMP R4, R5
BEQ samebytes
cmp:
@@ -71,52 +94,56 @@ cmp:
REV R5, R5
CMP R4, R5
ret:
- MOVD $1, R4
- CNEG HI, R4, R4
- MOVD R4, (R7)
+ MOVD $1, R0
+ CNEG HI, R0, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD R0, (R7)
+#endif
RET
small:
TBZ $3, R6, lt_8
- MOVD (R2), R4
- MOVD (R3), R5
+ MOVD (R0), R4
+ MOVD (R2), R5
CMP R4, R5
BNE cmp
SUBS $8, R6
BEQ samebytes
+ ADD $8, R0
ADD $8, R2
- ADD $8, R3
SUB $8, R6
B tail
lt_8:
TBZ $2, R6, lt_4
- MOVWU (R2), R4
- MOVWU (R3), R5
+ MOVWU (R0), R4
+ MOVWU (R2), R5
CMPW R4, R5
BNE cmp
SUBS $4, R6
BEQ samebytes
+ ADD $4, R0
ADD $4, R2
- ADD $4, R3
lt_4:
TBZ $1, R6, lt_2
- MOVHU (R2), R4
- MOVHU (R3), R5
+ MOVHU (R0), R4
+ MOVHU (R2), R5
CMPW R4, R5
BNE cmp
+ ADD $2, R0
ADD $2, R2
- ADD $2, R3
lt_2:
TBZ $0, R6, samebytes
one:
- MOVBU (R2), R4
- MOVBU (R3), R5
+ MOVBU (R0), R4
+ MOVBU (R2), R5
CMPW R4, R5
BNE ret
samebytes:
- CMP R1, R0
- CSET NE, R4
- CNEG LO, R4, R4
- MOVD R4, (R7)
+ CMP R3, R1
+ CSET NE, R0
+ CNEG LO, R0, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD R0, (R7)
+#endif
RET
cmpnext:
REV R8, R4
diff --git a/src/internal/bytealg/equal_amd64.s b/src/internal/bytealg/equal_amd64.s
index 6f12d2a169..dd46e2e0fd 100644
--- a/src/internal/bytealg/equal_amd64.s
+++ b/src/internal/bytealg/equal_amd64.s
@@ -7,7 +7,6 @@
// memequal(a, b unsafe.Pointer, size uintptr) bool
TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT,$0-25
-#ifdef GOEXPERIMENT_regabiargs
// AX = a (want in SI)
// BX = b (want in DI)
// CX = size (want in BX)
@@ -20,22 +19,9 @@ neq:
MOVQ BX, DI
MOVQ CX, BX
JMP memeqbody<>(SB)
-#else
- MOVQ a+0(FP), SI
- MOVQ b+8(FP), DI
- CMPQ SI, DI
- JEQ eq
- MOVQ size+16(FP), BX
- LEAQ ret+24(FP), AX
- JMP memeqbody<>(SB)
-eq:
- MOVB $1, ret+24(FP)
- RET
-#endif
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17
-#ifdef GOEXPERIMENT_regabiargs
// AX = a (want in SI)
// BX = b (want in DI)
// 8(DX) = size (want in BX)
@@ -48,29 +34,13 @@ neq:
MOVQ BX, DI
MOVQ 8(DX), BX // compiler stores size at offset 8 in the closure
JMP memeqbody<>(SB)
-#else
- MOVQ a+0(FP), SI
- MOVQ b+8(FP), DI
- CMPQ SI, DI
- JEQ eq
- MOVQ 8(DX), BX // compiler stores size at offset 8 in the closure
- LEAQ ret+16(FP), AX
- JMP memeqbody<>(SB)
-eq:
- MOVB $1, ret+16(FP)
- RET
-#endif
// Input:
// a in SI
// b in DI
// count in BX
-#ifndef GOEXPERIMENT_regabiargs
-// address of result byte in AX
-#else
// Output:
// result in AX
-#endif
TEXT memeqbody<>(SB),NOSPLIT,$0-0
CMPQ BX, $8
JB small
@@ -104,11 +74,7 @@ hugeloop:
SUBQ $64, BX
CMPL DX, $0xffff
JEQ hugeloop
-#ifdef GOEXPERIMENT_regabiargs
XORQ AX, AX // return 0
-#else
- MOVB $0, (AX)
-#endif
RET
// 64 bytes at a time using ymm registers
@@ -129,11 +95,7 @@ hugeloop_avx2:
CMPL DX, $0xffffffff
JEQ hugeloop_avx2
VZEROUPPER
-#ifdef GOEXPERIMENT_regabiargs
XORQ AX, AX // return 0
-#else
- MOVB $0, (AX)
-#endif
RET
bigloop_avx2:
@@ -150,11 +112,7 @@ bigloop:
SUBQ $8, BX
CMPQ CX, DX
JEQ bigloop
-#ifdef GOEXPERIMENT_regabiargs
XORQ AX, AX // return 0
-#else
- MOVB $0, (AX)
-#endif
RET
// remaining 0-8 bytes
@@ -162,11 +120,7 @@ leftover:
MOVQ -8(SI)(BX*1), CX
MOVQ -8(DI)(BX*1), DX
CMPQ CX, DX
-#ifdef GOEXPERIMENT_regabiargs
SETEQ AX
-#else
- SETEQ (AX)
-#endif
RET
small:
@@ -201,10 +155,5 @@ di_finish:
SUBQ SI, DI
SHLQ CX, DI
equal:
-#ifdef GOEXPERIMENT_regabiargs
SETEQ AX
-#else
- SETEQ (AX)
-#endif
RET
-
diff --git a/src/internal/bytealg/equal_arm64.s b/src/internal/bytealg/equal_arm64.s
index 01aa7b7b7a..cf5cf54e59 100644
--- a/src/internal/bytealg/equal_arm64.s
+++ b/src/internal/bytealg/equal_arm64.s
@@ -6,58 +6,70 @@
#include "textflag.h"
// memequal(a, b unsafe.Pointer, size uintptr) bool
-TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
- MOVD size+16(FP), R1
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD size+16(FP), R2
+#endif
// short path to handle 0-byte case
- CBZ R1, equal
+ CBZ R2, equal
+#ifndef GOEXPERIMENT_regabiargs
MOVD a+0(FP), R0
- MOVD b+8(FP), R2
+ MOVD b+8(FP), R1
MOVD $ret+24(FP), R8
+#endif
B memeqbody<>(SB)
equal:
MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
MOVB R0, ret+24(FP)
+#endif
RET
// memequal_varlen(a, b unsafe.Pointer) bool
-TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
- MOVD a+0(FP), R3
- MOVD b+8(FP), R4
- CMP R3, R4
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD a+0(FP), R0
+ MOVD b+8(FP), R1
+#endif
+ CMP R0, R1
BEQ eq
- MOVD 8(R26), R5 // compiler stores size at offset 8 in the closure
- CBZ R5, eq
- MOVD R3, 8(RSP)
- MOVD R4, 16(RSP)
- MOVD R5, 24(RSP)
- BL runtime·memequal(SB)
- MOVBU 32(RSP), R3
- MOVB R3, ret+16(FP)
- RET
+ MOVD 8(R26), R2 // compiler stores size at offset 8 in the closure
+ CBZ R2, eq
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD $ret+16(FP), R8
+#endif
+ B memeqbody<>(SB)
eq:
- MOVD $1, R3
- MOVB R3, ret+16(FP)
+ MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVB R0, ret+16(FP)
+#endif
RET
// input:
// R0: pointer a
-// R1: data len
-// R2: pointer b
+// R1: pointer b
+// R2: data len
+#ifdef GOEXPERIMENT_regabiargs
+// at return: result in R0
+#else
// R8: address to put result
+#endif
+
TEXT memeqbody<>(SB),NOSPLIT,$0
- CMP $1, R1
+ CMP $1, R2
// handle 1-byte special case for better performance
BEQ one
- CMP $16, R1
+ CMP $16, R2
// handle specially if length < 16
BLO tail
- BIC $0x3f, R1, R3
+ BIC $0x3f, R2, R3
CBZ R3, chunk16
// work with 64-byte chunks
ADD R3, R0, R6 // end of chunks
chunk64_loop:
VLD1.P (R0), [V0.D2, V1.D2, V2.D2, V3.D2]
- VLD1.P (R2), [V4.D2, V5.D2, V6.D2, V7.D2]
+ VLD1.P (R1), [V4.D2, V5.D2, V6.D2, V7.D2]
VCMEQ V0.D2, V4.D2, V8.D2
VCMEQ V1.D2, V5.D2, V9.D2
VCMEQ V2.D2, V6.D2, V10.D2
@@ -71,66 +83,72 @@ chunk64_loop:
CBZ R4, not_equal
CBZ R5, not_equal
BNE chunk64_loop
- AND $0x3f, R1, R1
- CBZ R1, equal
+ AND $0x3f, R2, R2
+ CBZ R2, equal
chunk16:
// work with 16-byte chunks
- BIC $0xf, R1, R3
+ BIC $0xf, R2, R3
CBZ R3, tail
ADD R3, R0, R6 // end of chunks
chunk16_loop:
LDP.P 16(R0), (R4, R5)
- LDP.P 16(R2), (R7, R9)
+ LDP.P 16(R1), (R7, R9)
EOR R4, R7
CBNZ R7, not_equal
EOR R5, R9
CBNZ R9, not_equal
CMP R0, R6
BNE chunk16_loop
- AND $0xf, R1, R1
- CBZ R1, equal
+ AND $0xf, R2, R2
+ CBZ R2, equal
tail:
// special compare of tail with length < 16
- TBZ $3, R1, lt_8
+ TBZ $3, R2, lt_8
MOVD (R0), R4
- MOVD (R2), R5
+ MOVD (R1), R5
EOR R4, R5
CBNZ R5, not_equal
- SUB $8, R1, R6 // offset of the last 8 bytes
+ SUB $8, R2, R6 // offset of the last 8 bytes
MOVD (R0)(R6), R4
- MOVD (R2)(R6), R5
+ MOVD (R1)(R6), R5
EOR R4, R5
CBNZ R5, not_equal
B equal
lt_8:
- TBZ $2, R1, lt_4
+ TBZ $2, R2, lt_4
MOVWU (R0), R4
- MOVWU (R2), R5
+ MOVWU (R1), R5
EOR R4, R5
CBNZ R5, not_equal
- SUB $4, R1, R6 // offset of the last 4 bytes
+ SUB $4, R2, R6 // offset of the last 4 bytes
MOVWU (R0)(R6), R4
- MOVWU (R2)(R6), R5
+ MOVWU (R1)(R6), R5
EOR R4, R5
CBNZ R5, not_equal
B equal
lt_4:
- TBZ $1, R1, lt_2
+ TBZ $1, R2, lt_2
MOVHU.P 2(R0), R4
- MOVHU.P 2(R2), R5
+ MOVHU.P 2(R1), R5
CMP R4, R5
BNE not_equal
lt_2:
- TBZ $0, R1, equal
+ TBZ $0, R2, equal
one:
MOVBU (R0), R4
- MOVBU (R2), R5
+ MOVBU (R1), R5
CMP R4, R5
BNE not_equal
equal:
MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
MOVB R0, (R8)
+#endif
RET
not_equal:
+#ifdef GOEXPERIMENT_regabiargs
+ MOVB ZR, R0
+#else
MOVB ZR, (R8)
+#endif
RET
diff --git a/src/internal/goarch/gengoarch.go b/src/internal/goarch/gengoarch.go
new file mode 100644
index 0000000000..af15518ad8
--- /dev/null
+++ b/src/internal/goarch/gengoarch.go
@@ -0,0 +1,59 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+)
+
+var goarches []string
+
+func main() {
+ data, err := os.ReadFile("../../go/build/syslist.go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ const goarchPrefix = `const goarchList = `
+ for _, line := range strings.Split(string(data), "\n") {
+ if strings.HasPrefix(line, goarchPrefix) {
+ text, err := strconv.Unquote(strings.TrimPrefix(line, goarchPrefix))
+ if err != nil {
+ log.Fatalf("parsing goarchList: %v", err)
+ }
+ goarches = strings.Fields(text)
+ }
+ }
+
+ for _, target := range goarches {
+ if target == "amd64p32" {
+ continue
+ }
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.\n\n")
+ fmt.Fprintf(&buf, "//go:build %s\n", target)
+ fmt.Fprintf(&buf, "// +build %s\n\n", target) // must explicitly include target for bootstrapping purposes
+ fmt.Fprintf(&buf, "package goarch\n\n")
+ fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target)
+ for _, goarch := range goarches {
+ value := 0
+ if goarch == target {
+ value = 1
+ }
+ fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goarch), value)
+ }
+ err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
diff --git a/src/runtime/internal/sys/arch.go b/src/internal/goarch/goarch.go
index 3c99a2f7da..921f5a208f 100644
--- a/src/runtime/internal/sys/arch.go
+++ b/src/internal/goarch/goarch.go
@@ -1,8 +1,15 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+// package goarch contains GOARCH-specific constants.
+package goarch
+
+// The next line makes 'go generate' write the zgoarch*.go files with
+// per-arch information, including constants named $GOARCH for every
+// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying
+// by them is useful for defining GOARCH-specific constants.
+//go:generate go run gengoarch.go
type ArchFamilyType int
@@ -23,14 +30,11 @@ const (
// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
const PtrSize = 4 << (^uintptr(0) >> 63)
-// AIX requires a larger stack for syscalls.
-const StackGuardMultiplier = StackGuardMultiplierDefault*(1-GoosAix) + 2*GoosAix
-
// ArchFamily is the architecture family (AMD64, ARM, ...)
const ArchFamily ArchFamilyType = _ArchFamily
// BigEndian reports whether the architecture is big-endian.
-const BigEndian = GoarchArmbe|GoarchArm64be|GoarchMips|GoarchMips64|GoarchPpc|GoarchPpc64|GoarchS390|GoarchS390x|GoarchSparc|GoarchSparc64 == 1
+const BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1
// DefaultPhysPageSize is the default physical page size.
const DefaultPhysPageSize = _DefaultPhysPageSize
diff --git a/src/runtime/internal/sys/arch_386.go b/src/internal/goarch/goarch_386.go
index 1ebce3435e..c6214217fc 100644
--- a/src/runtime/internal/sys/arch_386.go
+++ b/src/internal/goarch/goarch_386.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = I386
diff --git a/src/runtime/internal/sys/arch_amd64.go b/src/internal/goarch/goarch_amd64.go
index 7f003d0f1d..911e3e7242 100644
--- a/src/runtime/internal/sys/arch_amd64.go
+++ b/src/internal/goarch/goarch_amd64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = AMD64
diff --git a/src/runtime/internal/sys/arch_arm.go b/src/internal/goarch/goarch_arm.go
index ef2048bb71..a6591713c8 100644
--- a/src/runtime/internal/sys/arch_arm.go
+++ b/src/internal/goarch/goarch_arm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = ARM
diff --git a/src/runtime/internal/sys/arch_arm64.go b/src/internal/goarch/goarch_arm64.go
index b9f2f7b1fe..85d0b47639 100644
--- a/src/runtime/internal/sys/arch_arm64.go
+++ b/src/internal/goarch/goarch_arm64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = ARM64
diff --git a/src/runtime/internal/sys/arch_mips.go b/src/internal/goarch/goarch_mips.go
index 4cb0eebea7..59f3995e2a 100644
--- a/src/runtime/internal/sys/arch_mips.go
+++ b/src/internal/goarch/goarch_mips.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS
diff --git a/src/runtime/internal/sys/arch_mips64le.go b/src/internal/goarch/goarch_mips64.go
index 57636ac4a4..9e4f82797d 100644
--- a/src/runtime/internal/sys/arch_mips64le.go
+++ b/src/internal/goarch/goarch_mips64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS64
diff --git a/src/runtime/internal/sys/arch_mips64.go b/src/internal/goarch/goarch_mips64le.go
index 57636ac4a4..9e4f82797d 100644
--- a/src/runtime/internal/sys/arch_mips64.go
+++ b/src/internal/goarch/goarch_mips64le.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS64
diff --git a/src/runtime/internal/sys/arch_mipsle.go b/src/internal/goarch/goarch_mipsle.go
index 4240f5ce47..3e6642bb86 100644
--- a/src/runtime/internal/sys/arch_mipsle.go
+++ b/src/internal/goarch/goarch_mipsle.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS
diff --git a/src/runtime/internal/sys/arch_ppc64.go b/src/internal/goarch/goarch_ppc64.go
index 1869213ce2..60cc846e6a 100644
--- a/src/runtime/internal/sys/arch_ppc64.go
+++ b/src/internal/goarch/goarch_ppc64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = PPC64
diff --git a/src/runtime/internal/sys/arch_ppc64le.go b/src/internal/goarch/goarch_ppc64le.go
index 1869213ce2..60cc846e6a 100644
--- a/src/runtime/internal/sys/arch_ppc64le.go
+++ b/src/internal/goarch/goarch_ppc64le.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = PPC64
diff --git a/src/runtime/internal/sys/arch_riscv64.go b/src/internal/goarch/goarch_riscv64.go
index 360d236e32..3b6da1e02f 100644
--- a/src/runtime/internal/sys/arch_riscv64.go
+++ b/src/internal/goarch/goarch_riscv64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = RISCV64
diff --git a/src/runtime/internal/sys/arch_s390x.go b/src/internal/goarch/goarch_s390x.go
index e33e0b7f2b..20c5705581 100644
--- a/src/runtime/internal/sys/arch_s390x.go
+++ b/src/internal/goarch/goarch_s390x.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = S390X
diff --git a/src/runtime/internal/sys/arch_wasm.go b/src/internal/goarch/goarch_wasm.go
index ee919ff9e6..98618d6980 100644
--- a/src/runtime/internal/sys/arch_wasm.go
+++ b/src/internal/goarch/goarch_wasm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = WASM
diff --git a/src/internal/goarch/zgoarch_386.go b/src/internal/goarch/zgoarch_386.go
new file mode 100644
index 0000000000..2395b80951
--- /dev/null
+++ b/src/internal/goarch/zgoarch_386.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build 386
+// +build 386
+
+package goarch
+
+const GOARCH = `386`
+
+const Is386 = 1
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_amd64.go b/src/internal/goarch/zgoarch_amd64.go
new file mode 100644
index 0000000000..c301d279c1
--- /dev/null
+++ b/src/internal/goarch/zgoarch_amd64.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build amd64
+// +build amd64
+
+package goarch
+
+const GOARCH = `amd64`
+
+const Is386 = 0
+const IsAmd64 = 1
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_arm.go b/src/internal/goarch/zgoarch_arm.go
new file mode 100644
index 0000000000..4a5ff37999
--- /dev/null
+++ b/src/internal/goarch/zgoarch_arm.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm
+// +build arm
+
+package goarch
+
+const GOARCH = `arm`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 1
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_arm64.go b/src/internal/goarch/zgoarch_arm64.go
new file mode 100644
index 0000000000..840e280f94
--- /dev/null
+++ b/src/internal/goarch/zgoarch_arm64.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm64
+// +build arm64
+
+package goarch
+
+const GOARCH = `arm64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 1
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_arm64be.go b/src/internal/goarch/zgoarch_arm64be.go
new file mode 100644
index 0000000000..bdbe5faacf
--- /dev/null
+++ b/src/internal/goarch/zgoarch_arm64be.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm64be
+// +build arm64be
+
+package goarch
+
+const GOARCH = `arm64be`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 1
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_armbe.go b/src/internal/goarch/zgoarch_armbe.go
new file mode 100644
index 0000000000..36be514a0b
--- /dev/null
+++ b/src/internal/goarch/zgoarch_armbe.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build armbe
+// +build armbe
+
+package goarch
+
+const GOARCH = `armbe`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 1
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_loong64.go b/src/internal/goarch/zgoarch_loong64.go
new file mode 100644
index 0000000000..9465cf5bf5
--- /dev/null
+++ b/src/internal/goarch/zgoarch_loong64.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build loong64
+// +build loong64
+
+package goarch
+
+const GOARCH = `loong64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 1
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips.go b/src/internal/goarch/zgoarch_mips.go
new file mode 100644
index 0000000000..e1458c0485
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips
+// +build mips
+
+package goarch
+
+const GOARCH = `mips`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 1
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64.go b/src/internal/goarch/zgoarch_mips64.go
new file mode 100644
index 0000000000..7de345e321
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64
+// +build mips64
+
+package goarch
+
+const GOARCH = `mips64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 1
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64le.go b/src/internal/goarch/zgoarch_mips64le.go
new file mode 100644
index 0000000000..96b1c3c021
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64le.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64le
+// +build mips64le
+
+package goarch
+
+const GOARCH = `mips64le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 1
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64p32.go b/src/internal/goarch/zgoarch_mips64p32.go
new file mode 100644
index 0000000000..91d1f3c4df
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64p32.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64p32
+// +build mips64p32
+
+package goarch
+
+const GOARCH = `mips64p32`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 1
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mips64p32le.go b/src/internal/goarch/zgoarch_mips64p32le.go
new file mode 100644
index 0000000000..18f2ef2347
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mips64p32le.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64p32le
+// +build mips64p32le
+
+package goarch
+
+const GOARCH = `mips64p32le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 1
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_mipsle.go b/src/internal/goarch/zgoarch_mipsle.go
new file mode 100644
index 0000000000..4551de1a32
--- /dev/null
+++ b/src/internal/goarch/zgoarch_mipsle.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mipsle
+// +build mipsle
+
+package goarch
+
+const GOARCH = `mipsle`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 1
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_ppc.go b/src/internal/goarch/zgoarch_ppc.go
new file mode 100644
index 0000000000..ffed58c2e5
--- /dev/null
+++ b/src/internal/goarch/zgoarch_ppc.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc
+// +build ppc
+
+package goarch
+
+const GOARCH = `ppc`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 1
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_ppc64.go b/src/internal/goarch/zgoarch_ppc64.go
new file mode 100644
index 0000000000..c369c74166
--- /dev/null
+++ b/src/internal/goarch/zgoarch_ppc64.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc64
+// +build ppc64
+
+package goarch
+
+const GOARCH = `ppc64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 1
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_ppc64le.go b/src/internal/goarch/zgoarch_ppc64le.go
new file mode 100644
index 0000000000..de5bae2a81
--- /dev/null
+++ b/src/internal/goarch/zgoarch_ppc64le.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc64le
+// +build ppc64le
+
+package goarch
+
+const GOARCH = `ppc64le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 1
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_riscv.go b/src/internal/goarch/zgoarch_riscv.go
new file mode 100644
index 0000000000..07c9d81b39
--- /dev/null
+++ b/src/internal/goarch/zgoarch_riscv.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build riscv
+// +build riscv
+
+package goarch
+
+const GOARCH = `riscv`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 1
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_riscv64.go b/src/internal/goarch/zgoarch_riscv64.go
new file mode 100644
index 0000000000..24e6ef3fdf
--- /dev/null
+++ b/src/internal/goarch/zgoarch_riscv64.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build riscv64
+// +build riscv64
+
+package goarch
+
+const GOARCH = `riscv64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 1
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_s390.go b/src/internal/goarch/zgoarch_s390.go
new file mode 100644
index 0000000000..429206d653
--- /dev/null
+++ b/src/internal/goarch/zgoarch_s390.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build s390
+// +build s390
+
+package goarch
+
+const GOARCH = `s390`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 1
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_s390x.go b/src/internal/goarch/zgoarch_s390x.go
new file mode 100644
index 0000000000..0c59005216
--- /dev/null
+++ b/src/internal/goarch/zgoarch_s390x.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build s390x
+// +build s390x
+
+package goarch
+
+const GOARCH = `s390x`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 1
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_sparc.go b/src/internal/goarch/zgoarch_sparc.go
new file mode 100644
index 0000000000..83a356e4c7
--- /dev/null
+++ b/src/internal/goarch/zgoarch_sparc.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build sparc
+// +build sparc
+
+package goarch
+
+const GOARCH = `sparc`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 1
+const IsSparc64 = 0
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_sparc64.go b/src/internal/goarch/zgoarch_sparc64.go
new file mode 100644
index 0000000000..7c9d40986e
--- /dev/null
+++ b/src/internal/goarch/zgoarch_sparc64.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build sparc64
+// +build sparc64
+
+package goarch
+
+const GOARCH = `sparc64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 1
+const IsWasm = 0
diff --git a/src/internal/goarch/zgoarch_wasm.go b/src/internal/goarch/zgoarch_wasm.go
new file mode 100644
index 0000000000..5aac1dfc47
--- /dev/null
+++ b/src/internal/goarch/zgoarch_wasm.go
@@ -0,0 +1,33 @@
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build wasm
+// +build wasm
+
+package goarch
+
+const GOARCH = `wasm`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsLoong64 = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 1
diff --git a/src/internal/goexperiment/exp_regabi_off.go b/src/internal/goexperiment/exp_regabi_off.go
deleted file mode 100644
index 5d8823843d..0000000000
--- a/src/internal/goexperiment/exp_regabi_off.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabi
-// +build !goexperiment.regabi
-
-package goexperiment
-
-const Regabi = false
-const RegabiInt = 0
diff --git a/src/internal/goexperiment/exp_regabi_on.go b/src/internal/goexperiment/exp_regabi_on.go
deleted file mode 100644
index c08d58e9b2..0000000000
--- a/src/internal/goexperiment/exp_regabi_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabi
-// +build goexperiment.regabi
-
-package goexperiment
-
-const Regabi = true
-const RegabiInt = 1
diff --git a/src/internal/goexperiment/exp_regabidefer_off.go b/src/internal/goexperiment/exp_regabidefer_off.go
deleted file mode 100644
index b47c0c2cf5..0000000000
--- a/src/internal/goexperiment/exp_regabidefer_off.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabidefer
-// +build !goexperiment.regabidefer
-
-package goexperiment
-
-const RegabiDefer = false
-const RegabiDeferInt = 0
diff --git a/src/internal/goexperiment/exp_regabidefer_on.go b/src/internal/goexperiment/exp_regabidefer_on.go
deleted file mode 100644
index bbf2f6c69b..0000000000
--- a/src/internal/goexperiment/exp_regabidefer_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabidefer
-// +build goexperiment.regabidefer
-
-package goexperiment
-
-const RegabiDefer = true
-const RegabiDeferInt = 1
diff --git a/src/internal/goexperiment/exp_regabig_off.go b/src/internal/goexperiment/exp_regabig_off.go
deleted file mode 100644
index 1b37d45186..0000000000
--- a/src/internal/goexperiment/exp_regabig_off.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabig
-// +build !goexperiment.regabig
-
-package goexperiment
-
-const RegabiG = false
-const RegabiGInt = 0
diff --git a/src/internal/goexperiment/exp_regabig_on.go b/src/internal/goexperiment/exp_regabig_on.go
deleted file mode 100644
index 7e5b162e0b..0000000000
--- a/src/internal/goexperiment/exp_regabig_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabig
-// +build goexperiment.regabig
-
-package goexperiment
-
-const RegabiG = true
-const RegabiGInt = 1
diff --git a/src/internal/goexperiment/exp_unified_off.go b/src/internal/goexperiment/exp_unified_off.go
new file mode 100644
index 0000000000..4c16fd8562
--- /dev/null
+++ b/src/internal/goexperiment/exp_unified_off.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.unified
+// +build !goexperiment.unified
+
+package goexperiment
+
+const Unified = false
+const UnifiedInt = 0
diff --git a/src/internal/goexperiment/exp_unified_on.go b/src/internal/goexperiment/exp_unified_on.go
new file mode 100644
index 0000000000..2b17ba3e79
--- /dev/null
+++ b/src/internal/goexperiment/exp_unified_on.go
@@ -0,0 +1,9 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.unified
+// +build goexperiment.unified
+
+package goexperiment
+
+const Unified = true
+const UnifiedInt = 1
diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go
index cd4c178818..0a61a0e5fc 100644
--- a/src/internal/goexperiment/flags.go
+++ b/src/internal/goexperiment/flags.go
@@ -59,6 +59,10 @@ type Flags struct {
PreemptibleLoops bool
StaticLockRanking bool
+ // Unified enables the compiler's unified IR construction
+ // experiment.
+ Unified bool
+
// Regabi is split into several sub-experiments that can be
// enabled individually. Not all combinations work.
// The "regabi" GOEXPERIMENT is an alias for all "working"
@@ -68,26 +72,15 @@ type Flags struct {
// ABI0 and ABIInternal functions. Without this, the ABIs are
// assumed to be identical so cross-ABI calls are direct.
RegabiWrappers bool
- // RegabiG enables dedicated G and zero registers in
- // ABIInternal.
- //
- // Requires wrappers because it makes the ABIs incompatible.
- RegabiG bool
// RegabiReflect enables the register-passing paths in
// reflection calls. This is also gated by intArgRegs in
// reflect and runtime (which are disabled by default) so it
// can be used in targeted tests.
RegabiReflect bool
- // RegabiDefer enables desugaring defer and go calls
- // into argument-less closures.
- RegabiDefer bool
// RegabiArgs enables register arguments/results in all
// compiled Go functions.
//
- // Requires wrappers (to do ABI translation), g (because
- // runtime assembly that's been ported to ABIInternal uses the
- // G register), reflect (so reflection calls use registers),
- // and defer (because the runtime doesn't support passing
- // register arguments to defer/go).
+ // Requires wrappers (to do ABI translation), and reflect (so
+ // reflection calls use registers).
RegabiArgs bool
}
diff --git a/src/runtime/internal/sys/gengoos.go b/src/internal/goos/gengoos.go
index ffe962f71d..1860cd700d 100644
--- a/src/runtime/internal/sys/gengoos.go
+++ b/src/internal/goos/gengoos.go
@@ -16,17 +16,14 @@ import (
"strings"
)
-var gooses, goarches []string
+var gooses []string
func main() {
- data, err := os.ReadFile("../../../go/build/syslist.go")
+ data, err := os.ReadFile("../../go/build/syslist.go")
if err != nil {
log.Fatal(err)
}
- const (
- goosPrefix = `const goosList = `
- goarchPrefix = `const goarchList = `
- )
+ const goosPrefix = `const goosList = `
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, goosPrefix) {
text, err := strconv.Unquote(strings.TrimPrefix(line, goosPrefix))
@@ -35,13 +32,6 @@ func main() {
}
gooses = strings.Fields(text)
}
- if strings.HasPrefix(line, goarchPrefix) {
- text, err := strconv.Unquote(strings.TrimPrefix(line, goarchPrefix))
- if err != nil {
- log.Fatalf("parsing goarchList: %v", err)
- }
- goarches = strings.Fields(text)
- }
}
for _, target := range gooses {
@@ -63,41 +53,18 @@ func main() {
fmt.Fprintf(&buf, "// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.\n\n")
fmt.Fprintf(&buf, "//go:build %s\n", strings.Join(tags, " && "))
fmt.Fprintf(&buf, "// +build %s\n\n", strings.Join(tags, ","))
- fmt.Fprintf(&buf, "package sys\n\n")
+ fmt.Fprintf(&buf, "package goos\n\n")
fmt.Fprintf(&buf, "const GOOS = `%s`\n\n", target)
for _, goos := range gooses {
value := 0
if goos == target {
value = 1
}
- fmt.Fprintf(&buf, "const Goos%s = %d\n", strings.Title(goos), value)
+ fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goos), value)
}
err := os.WriteFile("zgoos_"+target+".go", buf.Bytes(), 0666)
if err != nil {
log.Fatal(err)
}
}
-
- for _, target := range goarches {
- if target == "amd64p32" {
- continue
- }
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.\n\n")
- fmt.Fprintf(&buf, "//go:build %s\n", target)
- fmt.Fprintf(&buf, "// +build %s\n\n", target) // must explicitly include target for bootstrapping purposes
- fmt.Fprintf(&buf, "package sys\n\n")
- fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target)
- for _, goarch := range goarches {
- value := 0
- if goarch == target {
- value = 1
- }
- fmt.Fprintf(&buf, "const Goarch%s = %d\n", strings.Title(goarch), value)
- }
- err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666)
- if err != nil {
- log.Fatal(err)
- }
- }
}
diff --git a/src/internal/goos/goos.go b/src/internal/goos/goos.go
new file mode 100644
index 0000000000..ebb521fec6
--- /dev/null
+++ b/src/internal/goos/goos.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package goos contains GOOS-specific constants.
+package goos
+
+// The next line makes 'go generate' write the zgoos*.go files with
+// per-OS information, including constants named Is$GOOS for every
+// known GOOS. The constant is 1 on the current system, 0 otherwise;
+// multiplying by them is useful for defining GOOS-specific constants.
+//go:generate go run gengoos.go
diff --git a/src/internal/goos/zgoos_aix.go b/src/internal/goos/zgoos_aix.go
new file mode 100644
index 0000000000..063e698b82
--- /dev/null
+++ b/src/internal/goos/zgoos_aix.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build aix
+// +build aix
+
+package goos
+
+const GOOS = `aix`
+
+const IsAix = 1
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_android.go b/src/internal/goos/zgoos_android.go
new file mode 100644
index 0000000000..e9e4864978
--- /dev/null
+++ b/src/internal/goos/zgoos_android.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build android
+// +build android
+
+package goos
+
+const GOOS = `android`
+
+const IsAix = 0
+const IsAndroid = 1
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_darwin.go b/src/internal/goos/zgoos_darwin.go
new file mode 100644
index 0000000000..309d6a2717
--- /dev/null
+++ b/src/internal/goos/zgoos_darwin.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !ios && darwin
+// +build !ios,darwin
+
+package goos
+
+const GOOS = `darwin`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 1
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_dragonfly.go b/src/internal/goos/zgoos_dragonfly.go
new file mode 100644
index 0000000000..4e8711b94c
--- /dev/null
+++ b/src/internal/goos/zgoos_dragonfly.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build dragonfly
+// +build dragonfly
+
+package goos
+
+const GOOS = `dragonfly`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 1
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_freebsd.go b/src/internal/goos/zgoos_freebsd.go
new file mode 100644
index 0000000000..f312bd1608
--- /dev/null
+++ b/src/internal/goos/zgoos_freebsd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build freebsd
+// +build freebsd
+
+package goos
+
+const GOOS = `freebsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 1
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_hurd.go b/src/internal/goos/zgoos_hurd.go
new file mode 100644
index 0000000000..0f0dd28b81
--- /dev/null
+++ b/src/internal/goos/zgoos_hurd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build hurd
+// +build hurd
+
+package goos
+
+const GOOS = `hurd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 1
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_illumos.go b/src/internal/goos/zgoos_illumos.go
new file mode 100644
index 0000000000..17e7c53a40
--- /dev/null
+++ b/src/internal/goos/zgoos_illumos.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build illumos
+// +build illumos
+
+package goos
+
+const GOOS = `illumos`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 1
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_ios.go b/src/internal/goos/zgoos_ios.go
new file mode 100644
index 0000000000..e4745ca413
--- /dev/null
+++ b/src/internal/goos/zgoos_ios.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build ios
+// +build ios
+
+package goos
+
+const GOOS = `ios`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 1
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_js.go b/src/internal/goos/zgoos_js.go
new file mode 100644
index 0000000000..bd2417e9ce
--- /dev/null
+++ b/src/internal/goos/zgoos_js.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build js
+// +build js
+
+package goos
+
+const GOOS = `js`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 1
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_linux.go b/src/internal/goos/zgoos_linux.go
new file mode 100644
index 0000000000..476702f442
--- /dev/null
+++ b/src/internal/goos/zgoos_linux.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !android && linux
+// +build !android,linux
+
+package goos
+
+const GOOS = `linux`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 1
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_netbsd.go b/src/internal/goos/zgoos_netbsd.go
new file mode 100644
index 0000000000..97b7564bab
--- /dev/null
+++ b/src/internal/goos/zgoos_netbsd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build netbsd
+// +build netbsd
+
+package goos
+
+const GOOS = `netbsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 1
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_openbsd.go b/src/internal/goos/zgoos_openbsd.go
new file mode 100644
index 0000000000..384a96480d
--- /dev/null
+++ b/src/internal/goos/zgoos_openbsd.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build openbsd
+// +build openbsd
+
+package goos
+
+const GOOS = `openbsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 1
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_plan9.go b/src/internal/goos/zgoos_plan9.go
new file mode 100644
index 0000000000..fcc279a79e
--- /dev/null
+++ b/src/internal/goos/zgoos_plan9.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build plan9
+// +build plan9
+
+package goos
+
+const GOOS = `plan9`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 1
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_solaris.go b/src/internal/goos/zgoos_solaris.go
new file mode 100644
index 0000000000..3f366cf710
--- /dev/null
+++ b/src/internal/goos/zgoos_solaris.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !illumos && solaris
+// +build !illumos,solaris
+
+package goos
+
+const GOOS = `solaris`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 1
+const IsWindows = 0
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_windows.go b/src/internal/goos/zgoos_windows.go
new file mode 100644
index 0000000000..dfa55339d3
--- /dev/null
+++ b/src/internal/goos/zgoos_windows.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build windows
+// +build windows
+
+package goos
+
+const GOOS = `windows`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 1
+const IsZos = 0
diff --git a/src/internal/goos/zgoos_zos.go b/src/internal/goos/zgoos_zos.go
new file mode 100644
index 0000000000..714f24963a
--- /dev/null
+++ b/src/internal/goos/zgoos_zos.go
@@ -0,0 +1,26 @@
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build zos
+// +build zos
+
+package goos
+
+const GOOS = `zos`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 1
diff --git a/src/internal/reflectlite/swapper.go b/src/internal/reflectlite/swapper.go
index 6330ab2d34..ac081d49bb 100644
--- a/src/internal/reflectlite/swapper.go
+++ b/src/internal/reflectlite/swapper.go
@@ -5,6 +5,7 @@
package reflectlite
import (
+ "internal/goarch"
"internal/unsafeheader"
"unsafe"
)
@@ -36,7 +37,7 @@ func Swapper(slice interface{}) func(i, j int) {
// Some common & small cases, without using memmove:
if hasPtr {
- if size == ptrSize {
+ if size == goarch.PtrSize {
ps := *(*[]unsafe.Pointer)(v.ptr)
return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] }
}
diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go
index 0365eeeabf..136273842c 100644
--- a/src/internal/reflectlite/value.go
+++ b/src/internal/reflectlite/value.go
@@ -5,13 +5,12 @@
package reflectlite
import (
+ "internal/goarch"
"internal/unsafeheader"
"runtime"
"unsafe"
)
-const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-
// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values. Restrictions,
@@ -90,7 +89,7 @@ func (f flag) ro() flag {
// pointer returns the underlying pointer represented by v.
// v.Kind() must be Ptr, Map, Chan, Func, or UnsafePointer
func (v Value) pointer() unsafe.Pointer {
- if v.typ.size != ptrSize || !v.typ.pointers() {
+ if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
panic("can't call pointer on a non-pointer Value")
}
if v.flag&flagIndir != 0 {
diff --git a/src/reflect/abi.go b/src/reflect/abi.go
index 17b79a8394..9ddde3ae57 100644
--- a/src/reflect/abi.go
+++ b/src/reflect/abi.go
@@ -6,6 +6,7 @@ package reflect
import (
"internal/abi"
+ "internal/goarch"
"internal/goexperiment"
"unsafe"
)
@@ -167,7 +168,7 @@ func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) {
a.valueStart = append(a.valueStart, len(a.steps))
var ok, ptr bool
if ifaceIndir(rcvr) || rcvr.pointers() {
- ok = a.assignIntN(0, ptrSize, 1, 0b1)
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
ptr = true
} else {
// TODO(mknyszek): Is this case even possible?
@@ -176,11 +177,11 @@ func (a *abiSeq) addRcvr(rcvr *rtype) (*abiStep, bool) {
// in the reflect package which only conditionally added
// a pointer bit to the reflect.(Value).Call stack frame's
// GC bitmap.
- ok = a.assignIntN(0, ptrSize, 1, 0b0)
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
ptr = false
}
if !ok {
- a.stackAssign(ptrSize, ptrSize)
+ a.stackAssign(goarch.PtrSize, goarch.PtrSize)
return &a.steps[len(a.steps)-1], ptr
}
return nil, ptr
@@ -202,7 +203,7 @@ func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool {
case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr:
return a.assignIntN(offset, t.size, 1, 0b0)
case Int64, Uint64:
- switch ptrSize {
+ switch goarch.PtrSize {
case 4:
return a.assignIntN(offset, 4, 2, 0b0)
case 8:
@@ -215,11 +216,11 @@ func (a *abiSeq) regAssign(t *rtype, offset uintptr) bool {
case Complex128:
return a.assignFloatN(offset, 8, 2)
case String:
- return a.assignIntN(offset, ptrSize, 2, 0b01)
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
case Interface:
- return a.assignIntN(offset, ptrSize, 2, 0b10)
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
case Slice:
- return a.assignIntN(offset, ptrSize, 3, 0b001)
+ return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
switch tt.len {
@@ -262,7 +263,7 @@ func (a *abiSeq) assignIntN(offset, size uintptr, n int, ptrMap uint8) bool {
if n > 8 || n < 0 {
panic("invalid n")
}
- if ptrMap != 0 && size != ptrSize {
+ if ptrMap != 0 && size != goarch.PtrSize {
panic("non-empty pointer map passed for non-pointer-size values")
}
if a.iregs+n > intArgRegs {
@@ -413,7 +414,7 @@ func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
stackPtrs.append(0)
}
} else {
- spill += ptrSize
+ spill += goarch.PtrSize
}
}
for i, arg := range t.in() {
@@ -430,12 +431,12 @@ func newAbiDesc(t *funcType, rcvr *rtype) abiDesc {
}
}
}
- spill = align(spill, ptrSize)
+ spill = align(spill, goarch.PtrSize)
// From the input parameters alone, we now know
// the stackCallArgsSize and retOffset.
stackCallArgsSize := in.stackBytes
- retOffset := align(in.stackBytes, ptrSize)
+ retOffset := align(in.stackBytes, goarch.PtrSize)
// Compute the stack frame pointer bitmap and register
// pointer bitmap for return values.
diff --git a/src/reflect/abi_test.go b/src/reflect/abi_test.go
index 5a0130f7b4..2b247d1d79 100644
--- a/src/reflect/abi_test.go
+++ b/src/reflect/abi_test.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.regabireflect
-// +build goexperiment.regabireflect
+//go:build goexperiment.regabireflect && goexperiment.regabiargs
+// +build goexperiment.regabireflect,goexperiment.regabiargs
package reflect_test
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index eac27e886f..5e5e4c1e60 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -10,6 +10,7 @@ import (
"flag"
"fmt"
"go/token"
+ "internal/goarch"
"io"
"math"
"math/rand"
@@ -6466,10 +6467,10 @@ func clobber() {
func TestFuncLayout(t *testing.T) {
align := func(x uintptr) uintptr {
- return (x + PtrSize - 1) &^ (PtrSize - 1)
+ return (x + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
}
var r []byte
- if PtrSize == 4 {
+ if goarch.PtrSize == 4 {
r = []byte{0, 0, 0, 1}
} else {
r = []byte{0, 0, 1}
@@ -6490,56 +6491,56 @@ func TestFuncLayout(t *testing.T) {
tests := []test{
{
typ: ValueOf(func(a, b string) string { return "" }).Type(),
- size: 6 * PtrSize,
- argsize: 4 * PtrSize,
- retOffset: 4 * PtrSize,
+ size: 6 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
stack: []byte{1, 0, 1, 0, 1},
gc: []byte{1, 0, 1, 0, 1},
},
{
typ: ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(),
- size: align(align(3*4) + PtrSize + 2),
- argsize: align(3*4) + PtrSize + 2,
- retOffset: align(align(3*4) + PtrSize + 2),
+ size: align(align(3*4) + goarch.PtrSize + 2),
+ argsize: align(3*4) + goarch.PtrSize + 2,
+ retOffset: align(align(3*4) + goarch.PtrSize + 2),
stack: r,
gc: r,
},
{
typ: ValueOf(func(a map[int]int, b uintptr, c interface{}) {}).Type(),
- size: 4 * PtrSize,
- argsize: 4 * PtrSize,
- retOffset: 4 * PtrSize,
+ size: 4 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
stack: []byte{1, 0, 1, 1},
gc: []byte{1, 0, 1, 1},
},
{
typ: ValueOf(func(a S) {}).Type(),
- size: 4 * PtrSize,
- argsize: 4 * PtrSize,
- retOffset: 4 * PtrSize,
+ size: 4 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
stack: []byte{0, 0, 1, 1},
gc: []byte{0, 0, 1, 1},
},
{
rcvr: ValueOf((*byte)(nil)).Type(),
typ: ValueOf(func(a uintptr, b *int) {}).Type(),
- size: 3 * PtrSize,
- argsize: 3 * PtrSize,
- retOffset: 3 * PtrSize,
+ size: 3 * goarch.PtrSize,
+ argsize: 3 * goarch.PtrSize,
+ retOffset: 3 * goarch.PtrSize,
stack: []byte{1, 0, 1},
gc: []byte{1, 0, 1},
},
{
typ: ValueOf(func(a uintptr) {}).Type(),
- size: PtrSize,
- argsize: PtrSize,
- retOffset: PtrSize,
+ size: goarch.PtrSize,
+ argsize: goarch.PtrSize,
+ retOffset: goarch.PtrSize,
stack: []byte{},
gc: []byte{},
},
{
typ: ValueOf(func() uintptr { return 0 }).Type(),
- size: PtrSize,
+ size: goarch.PtrSize,
argsize: 0,
retOffset: 0,
stack: []byte{},
@@ -6548,9 +6549,9 @@ func TestFuncLayout(t *testing.T) {
{
rcvr: ValueOf(uintptr(0)).Type(),
typ: ValueOf(func(a uintptr) {}).Type(),
- size: 2 * PtrSize,
- argsize: 2 * PtrSize,
- retOffset: 2 * PtrSize,
+ size: 2 * goarch.PtrSize,
+ argsize: 2 * goarch.PtrSize,
+ retOffset: 2 * goarch.PtrSize,
stack: []byte{1},
gc: []byte{1},
// Note: this one is tricky, as the receiver is not a pointer. But we
@@ -6756,7 +6757,7 @@ func TestGCBits(t *testing.T) {
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
- hdr := make([]byte, 8/PtrSize)
+ hdr := make([]byte, 8/goarch.PtrSize)
verifyMapBucket := func(t *testing.T, k, e Type, m interface{}, want []byte) {
verifyGCBits(t, MapBucketOf(k, e), want)
@@ -6772,7 +6773,7 @@ func TestGCBits(t *testing.T) {
join(hdr, rep(8, lit(0, 1)), rep(8, lit(1)), lit(1)))
verifyMapBucket(t, Tint64, Tptr,
map[int64]Xptr(nil),
- join(hdr, rep(8, rep(8/PtrSize, lit(0))), rep(8, lit(1)), lit(1)))
+ join(hdr, rep(8, rep(8/goarch.PtrSize, lit(0))), rep(8, lit(1)), lit(1)))
verifyMapBucket(t,
Tscalar, Tscalar,
map[Xscalar]Xscalar(nil),
@@ -6782,20 +6783,20 @@ func TestGCBits(t *testing.T) {
map[[2]Xscalarptr][3]Xptrscalar(nil),
join(hdr, rep(8*2, lit(0, 1)), rep(8*3, lit(1, 0)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
- map[[64 / PtrSize]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
- join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8*64/goarch.PtrSize, lit(0, 1)), rep(8*64/goarch.PtrSize, lit(1, 0)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
- map[[64/PtrSize + 1]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
- join(hdr, rep(8, lit(1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8, lit(1)), rep(8*64/goarch.PtrSize, lit(1, 0)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
- map[[64 / PtrSize]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1)))
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(8*64/goarch.PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
- map[[64/PtrSize + 1]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
join(hdr, rep(8, lit(1)), rep(8, lit(1)), lit(1)))
}
diff --git a/src/reflect/asm_amd64.s b/src/reflect/asm_amd64.s
index facf07516d..d21d498063 100644
--- a/src/reflect/asm_amd64.s
+++ b/src/reflect/asm_amd64.s
@@ -24,15 +24,13 @@
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-// makeFuncStub must be ABIInternal because it is placed directly
-// in function values.
// This frame contains two locals. See the comment above LOCAL_RETVALID.
-TEXT ·makeFuncStub<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$312
NO_LOCAL_POINTERS
// NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
// frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·spillArgs<ABIInternal>(SB)
+ CALL runtime·spillArgs(SB)
MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
MOVQ DX, 0(SP)
MOVQ R12, 8(SP)
@@ -48,22 +46,20 @@ TEXT ·makeFuncStub<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
MOVQ AX, 24(SP)
CALL ·callReflect(SB)
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·unspillArgs<ABIInternal>(SB)
+ CALL runtime·unspillArgs(SB)
RET
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-// methodValueCall must be ABIInternal because it is placed directly
-// in function values.
// This frame contains two locals. See the comment above LOCAL_RETVALID.
-TEXT ·methodValueCall<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$312
NO_LOCAL_POINTERS
// NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
// frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·spillArgs<ABIInternal>(SB)
+ CALL runtime·spillArgs(SB)
MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
MOVQ DX, 0(SP)
MOVQ R12, 8(SP)
@@ -79,5 +75,5 @@ TEXT ·methodValueCall<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
MOVQ AX, 24(SP)
CALL ·callMethod(SB)
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·unspillArgs<ABIInternal>(SB)
+ CALL runtime·unspillArgs(SB)
RET
diff --git a/src/reflect/asm_arm64.s b/src/reflect/asm_arm64.s
index 5fe88e27e4..5b9b3573fa 100644
--- a/src/reflect/asm_arm64.s
+++ b/src/reflect/asm_arm64.s
@@ -5,34 +5,75 @@
#include "textflag.h"
#include "funcdata.h"
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+#define LOCAL_RETVALID 40
+#define LOCAL_REGARGS 48
+
+// The frame size of the functions below is
+// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432.
+
// makeFuncStub is the code half of the function returned by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No arg size here, runtime pulls arg map out of the func value.
-TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432
NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, 8(RSP)
+ MOVD R20, 16(RSP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
MOVD R3, 16(RSP)
- MOVB $0, 40(RSP)
- ADD $40, RSP, R3
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
MOVD R3, 24(RSP)
- MOVD $0, 32(RSP)
- BL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
RET
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432
NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, 8(RSP)
+ MOVD R20, 16(RSP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
MOVD R3, 16(RSP)
- MOVB $0, 40(RSP)
- ADD $40, RSP, R3
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
MOVD R3, 24(RSP)
- MOVD $0, 32(RSP)
- BL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
RET
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
index b6830a9802..01749e30d8 100644
--- a/src/reflect/export_test.go
+++ b/src/reflect/export_test.go
@@ -5,6 +5,7 @@
package reflect
import (
+ "internal/goarch"
"sync"
"unsafe"
)
@@ -22,8 +23,6 @@ func IsRO(v Value) bool {
var CallGC = &callGC
-const PtrSize = ptrSize
-
// FuncLayout calls funcLayout and returns a subset of the results for testing.
//
// Bitmaps like stack, gc, inReg, and outReg are expanded such that each bit
@@ -65,7 +64,7 @@ func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr,
// Expand frame type's GC bitmap into byte-map.
ptrs = ft.ptrdata != 0
if ptrs {
- nptrs := ft.ptrdata / ptrSize
+ nptrs := ft.ptrdata / goarch.PtrSize
gcdata := ft.gcSlice(0, (nptrs+7)/8)
for i := uintptr(0); i < nptrs; i++ {
gc = append(gc, gcdata[i/8]>>(i%8)&1)
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
index d53e68a359..588be8bcc1 100644
--- a/src/reflect/makefunc.go
+++ b/src/reflect/makefunc.go
@@ -52,11 +52,7 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
t := typ.common()
ftyp := (*funcType)(unsafe.Pointer(t))
- // Indirect Go func value (dummy) to obtain
- // actual code address. (A Go func value is a pointer
- // to a C function pointer. https://golang.org/s/go11func.)
- dummy := makeFuncStub
- code := **(**uintptr)(unsafe.Pointer(&dummy))
+ code := abi.FuncPCABI0(makeFuncStub)
// makeFuncImpl contains a stack map for use by the runtime
_, _, abi := funcLayout(ftyp, nil)
@@ -111,11 +107,7 @@ func makeMethodValue(op string, v Value) Value {
// v.Type returns the actual type of the method value.
ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
- // Indirect Go func value (dummy) to obtain
- // actual code address. (A Go func value is a pointer
- // to a C function pointer. https://golang.org/s/go11func.)
- dummy := methodValueCall
- code := **(**uintptr)(unsafe.Pointer(&dummy))
+ code := abi.FuncPCABI0(methodValueCall)
// methodValue contains a stack map for use by the runtime
_, _, abi := funcLayout(ftyp, nil)
diff --git a/src/reflect/swapper.go b/src/reflect/swapper.go
index 0cf40666b1..67b7fbe59b 100644
--- a/src/reflect/swapper.go
+++ b/src/reflect/swapper.go
@@ -5,6 +5,7 @@
package reflect
import (
+ "internal/goarch"
"internal/unsafeheader"
"unsafe"
)
@@ -36,7 +37,7 @@ func Swapper(slice interface{}) func(i, j int) {
// Some common & small cases, without using memmove:
if hasPtr {
- if size == ptrSize {
+ if size == goarch.PtrSize {
ps := *(*[]unsafe.Pointer)(v.ptr)
return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] }
}
diff --git a/src/reflect/type.go b/src/reflect/type.go
index f672a7a5d5..278426da09 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -16,6 +16,7 @@
package reflect
import (
+ "internal/goarch"
"internal/unsafeheader"
"strconv"
"sync"
@@ -1924,13 +1925,13 @@ func MapOf(key, elem Type) Type {
}
mt.flags = 0
if ktyp.size > maxKeySize {
- mt.keysize = uint8(ptrSize)
+ mt.keysize = uint8(goarch.PtrSize)
mt.flags |= 1 // indirect key
} else {
mt.keysize = uint8(ktyp.size)
}
if etyp.size > maxValSize {
- mt.valuesize = uint8(ptrSize)
+ mt.valuesize = uint8(goarch.PtrSize)
mt.flags |= 2 // indirect value
} else {
mt.valuesize = uint8(etyp.size)
@@ -2231,31 +2232,31 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
var ptrdata uintptr
var overflowPad uintptr
- size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
+ size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + goarch.PtrSize
if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
panic("reflect: bad size computation in MapOf")
}
if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
- nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
+ nptr := (bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize) / goarch.PtrSize
mask := make([]byte, (nptr+7)/8)
- base := bucketSize / ptrSize
+ base := bucketSize / goarch.PtrSize
if ktyp.ptrdata != 0 {
emitGCMask(mask, base, ktyp, bucketSize)
}
- base += bucketSize * ktyp.size / ptrSize
+ base += bucketSize * ktyp.size / goarch.PtrSize
if etyp.ptrdata != 0 {
emitGCMask(mask, base, etyp, bucketSize)
}
- base += bucketSize * etyp.size / ptrSize
- base += overflowPad / ptrSize
+ base += bucketSize * etyp.size / goarch.PtrSize
+ base += overflowPad / goarch.PtrSize
word := base
mask[word/8] |= 1 << (word % 8)
gcdata = &mask[0]
- ptrdata = (word + 1) * ptrSize
+ ptrdata = (word + 1) * goarch.PtrSize
// overflow word must be last
if ptrdata != size {
@@ -2264,7 +2265,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
}
b := &rtype{
- align: ptrSize,
+ align: goarch.PtrSize,
size: size,
kind: uint8(Struct),
ptrdata: ptrdata,
@@ -2288,8 +2289,8 @@ func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
if typ.kind&kindGCProg != 0 {
panic("reflect: unexpected GC program")
}
- ptrs := typ.ptrdata / ptrSize
- words := typ.size / ptrSize
+ ptrs := typ.ptrdata / goarch.PtrSize
+ words := typ.size / goarch.PtrSize
mask := typ.gcSlice(0, (ptrs+7)/8)
for j := uintptr(0); j < ptrs; j++ {
if (mask[j/8]>>(j%8))&1 != 0 {
@@ -2312,7 +2313,7 @@ func appendGCProg(dst []byte, typ *rtype) []byte {
}
// Element is small with pointer mask; use as literal bits.
- ptrs := typ.ptrdata / ptrSize
+ ptrs := typ.ptrdata / goarch.PtrSize
mask := typ.gcSlice(0, (ptrs+7)/8)
// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
@@ -2759,7 +2760,7 @@ func StructOf(fields []StructField) Type {
}
// Pad to start of this field with zeros.
if ft.offset() > off {
- n := (ft.offset() - off) / ptrSize
+ n := (ft.offset() - off) / goarch.PtrSize
prog = append(prog, 0x01, 0x00) // emit a 0 bit
if n > 1 {
prog = append(prog, 0x81) // repeat previous bit
@@ -2936,11 +2937,11 @@ func ArrayOf(length int, elem Type) Type {
array.gcdata = typ.gcdata
array.ptrdata = typ.ptrdata
- case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
+ case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize:
// Element is small with pointer mask; array is still small.
// Create direct pointer mask by turning each 1 bit in elem
// into length 1 bits in larger mask.
- mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
+ mask := make([]byte, (array.ptrdata/goarch.PtrSize+7)/8)
emitGCMask(mask, 0, typ, array.len)
array.gcdata = &mask[0]
@@ -2950,8 +2951,8 @@ func ArrayOf(length int, elem Type) Type {
prog := []byte{0, 0, 0, 0} // will be length of prog
prog = appendGCProg(prog, typ)
// Pad from ptrdata to size.
- elemPtrs := typ.ptrdata / ptrSize
- elemWords := typ.size / ptrSize
+ elemPtrs := typ.ptrdata / goarch.PtrSize
+ elemWords := typ.size / goarch.PtrSize
if elemPtrs < elemWords {
// Emit literal 0 bit, then repeat as needed.
prog = append(prog, 0x01, 0x00)
@@ -3063,13 +3064,13 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Poo
// build dummy rtype holding gc program
x := &rtype{
- align: ptrSize,
+ align: goarch.PtrSize,
// Don't add spill space here; it's only necessary in
// reflectcall's frame, not in the allocated frame.
// TODO(mknyszek): Remove this comment when register
// spill space in the frame is no longer required.
- size: align(abi.retOffset+abi.ret.stackBytes, ptrSize),
- ptrdata: uintptr(abi.stackPtrs.n) * ptrSize,
+ size: align(abi.retOffset+abi.ret.stackBytes, goarch.PtrSize),
+ ptrdata: uintptr(abi.stackPtrs.n) * goarch.PtrSize,
}
if abi.stackPtrs.n > 0 {
x.gcdata = &abi.stackPtrs.data[0]
@@ -3124,14 +3125,14 @@ func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
switch Kind(t.kind & kindMask) {
case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
// 1 pointer at start of representation
- for bv.n < uint32(offset/uintptr(ptrSize)) {
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
bv.append(0)
}
bv.append(1)
case Interface:
// 2 pointers
- for bv.n < uint32(offset/uintptr(ptrSize)) {
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
bv.append(0)
}
bv.append(1)
diff --git a/src/reflect/value.go b/src/reflect/value.go
index 6f878eba5b..b4b2d2e38b 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -6,6 +6,7 @@ package reflect
import (
"internal/abi"
+ "internal/goarch"
"internal/itoa"
"internal/unsafeheader"
"math"
@@ -13,8 +14,6 @@ import (
"unsafe"
)
-const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-
// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values. Restrictions,
@@ -94,7 +93,7 @@ func (f flag) ro() flag {
// v.Kind() must be Ptr, Map, Chan, Func, or UnsafePointer
// if v.Kind() == Ptr, the base type must not be go:notinheap.
func (v Value) pointer() unsafe.Pointer {
- if v.typ.size != ptrSize || !v.typ.pointers() {
+ if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
panic("can't call pointer on a non-pointer Value")
}
if v.flag&flagIndir != 0 {
@@ -533,7 +532,7 @@ func (v Value) call(op string, in []Value) []Value {
}
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
- frameSize = align(frameSize, ptrSize)
+ frameSize = align(frameSize, goarch.PtrSize)
frameSize += abi.spill
// Mark pointers in registers for the return path.
@@ -958,9 +957,6 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *a
// 2. Stack -> registers translation.
// 3. Registers -> stack translation.
// 4. Registers -> registers translation.
- // TODO(mknyszek): Cases 2 and 3 below only work on little endian
- // architectures. This is OK for now, but this needs to be fixed
- // before supporting the register ABI on big endian architectures.
// If the value ABI passes the value on the stack,
// then the method ABI does too, because it has strictly
@@ -986,9 +982,9 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *a
methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from)
fallthrough // We need to make sure this ends up in Ints, too.
case abiStepIntReg:
- memmove(unsafe.Pointer(&methodRegs.Ints[mStep.ireg]), from, mStep.size)
+ memmove(methodRegs.IntRegArgAddr(mStep.ireg, mStep.size), from, mStep.size)
case abiStepFloatReg:
- memmove(unsafe.Pointer(&methodRegs.Floats[mStep.freg]), from, mStep.size)
+ memmove(methodRegs.FloatRegArgAddr(mStep.freg, mStep.size), from, mStep.size)
default:
panic("unexpected method step")
}
@@ -1004,9 +1000,9 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *a
// Do the pointer copy directly so we get a write barrier.
*(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg]
case abiStepIntReg:
- memmove(to, unsafe.Pointer(&valueRegs.Ints[vStep.ireg]), vStep.size)
+ memmove(to, valueRegs.IntRegArgAddr(vStep.ireg, vStep.size), vStep.size)
case abiStepFloatReg:
- memmove(to, unsafe.Pointer(&valueRegs.Floats[vStep.freg]), vStep.size)
+ memmove(to, valueRegs.FloatRegArgAddr(vStep.freg, vStep.size), vStep.size)
default:
panic("unexpected value step")
}
@@ -1043,7 +1039,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool, regs *a
methodFrameSize := methodFrameType.size
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
- methodFrameSize = align(methodFrameSize, ptrSize)
+ methodFrameSize = align(methodFrameSize, goarch.PtrSize)
methodFrameSize += methodABI.spill
// Mark pointers in registers for the return path.
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index 39c7426842..978a3b85dc 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -6,13 +6,13 @@ package runtime
import (
"internal/cpu"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
const (
- c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
- c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
+ c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
+ c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
)
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
@@ -300,7 +300,7 @@ func ifaceHash(i interface {
return interhash(noescape(unsafe.Pointer(&i)), seed)
}
-const hashRandomBytes = sys.PtrSize / 4 * 64
+const hashRandomBytes = goarch.PtrSize / 4 * 64
// used in asm_{386,amd64,arm64}.s to seed the hash function
var aeskeysched [hashRandomBytes]byte
@@ -321,7 +321,7 @@ func alginit() {
initAlgAES()
return
}
- getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
+ getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
hashkey[0] |= 1 // make sure these numbers are odd
hashkey[1] |= 1
hashkey[2] |= 1
@@ -337,7 +337,7 @@ func initAlgAES() {
// Note: These routines perform the read with a native endianness.
func readUnaligned32(p unsafe.Pointer) uint32 {
q := (*[4]byte)(p)
- if sys.BigEndian {
+ if goarch.BigEndian {
return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24
}
return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24
@@ -345,7 +345,7 @@ func readUnaligned32(p unsafe.Pointer) uint32 {
func readUnaligned64(p unsafe.Pointer) uint64 {
q := (*[8]byte)(p)
- if sys.BigEndian {
+ if goarch.BigEndian {
return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 |
uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56
}
diff --git a/src/runtime/asm.s b/src/runtime/asm.s
index 72c744925d..0e14fcd3e6 100644
--- a/src/runtime/asm.s
+++ b/src/runtime/asm.s
@@ -13,6 +13,6 @@ DATA runtime·no_pointers_stackmap+0x04(SB)/4, $0
GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
#ifndef GOARCH_amd64
-TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·sigpanic0(SB),NOSPLIT,$0-0
JMP ·sigpanic<ABIInternal>(SB)
#endif
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index ec5ea58028..11c60309f4 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -244,10 +244,8 @@ ok:
// create a new goroutine to start program
PUSHL $runtime·mainPC(SB) // entry
- PUSHL $0 // arg size
CALL runtime·newproc(SB)
POPL AX
- POPL AX
// start this M
CALL runtime·mstart(SB)
@@ -584,26 +582,6 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// compile barrier.
RET
-// void jmpdefer(fn, sp);
-// called from deferreturn.
-// 1. pop the caller
-// 2. sub 5 bytes (the length of CALL & a 32 bit displacement) from the callers
-// return (when building for shared libraries, subtract 16 bytes -- 5 bytes
-// for CALL & displacement to call __x86.get_pc_thunk.cx, 6 bytes for the
-// LEAL to load the offset into BX, and finally 5 for the call & displacement)
-// 3. jmp to the argument
-TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
- MOVL fv+0(FP), DX // fn
- MOVL argp+4(FP), BX // caller sp
- LEAL -4(BX), SP // caller sp after CALL
-#ifdef GOBUILDMODE_shared
- SUBL $16, (SP) // return to CALL again
-#else
- SUBL $5, (SP) // return to CALL again
-#endif
- MOVL 0(DX), BX
- JMP BX // but first run the deferred function
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 14f29e1964..2083ecb53e 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -214,10 +214,8 @@ ok:
// create a new goroutine to start program
MOVQ $runtime·mainPC(SB), AX // entry
PUSHQ AX
- PUSHQ $0 // arg size
CALL runtime·newproc(SB)
POPQ AX
- POPQ AX
// start this M
CALL runtime·mstart(SB)
@@ -279,7 +277,6 @@ TEXT gogo<>(SB), NOSPLIT, $0
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
-#ifdef GOEXPERIMENT_regabiargs
TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT, $0-8
MOVQ AX, DX // DX = fn
@@ -308,38 +305,6 @@ goodm:
POPQ AX
JMP runtime·badmcall2(SB)
RET
-#else
-TEXT runtime·mcall(SB), NOSPLIT, $0-8
- MOVQ fn+0(FP), DI
-
- get_tls(CX)
- MOVQ g(CX), AX // save state in g->sched
- MOVQ 0(SP), BX // caller's PC
- MOVQ BX, (g_sched+gobuf_pc)(AX)
- LEAQ fn+0(FP), BX // caller's SP
- MOVQ BX, (g_sched+gobuf_sp)(AX)
- MOVQ BP, (g_sched+gobuf_bp)(AX)
-
- // switch to m->g0 & its stack, call fn
- MOVQ g(CX), BX
- MOVQ g_m(BX), BX
- MOVQ m_g0(BX), SI
- CMPQ SI, AX // if g == m->g0 call badmcall
- JNE 3(PC)
- MOVQ $runtime·badmcall(SB), AX
- JMP AX
- MOVQ SI, g(CX) // g = m->g0
- MOVQ SI, R14 // set the g register
- MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
- PUSHQ AX
- MOVQ DI, DX
- MOVQ 0(DI), DI
- CALL DI
- POPQ AX
- MOVQ $runtime·badmcall2(SB), AX
- JMP AX
- RET
-#endif
// systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that
@@ -467,9 +432,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack(SB)
-#ifdef GOEXPERIMENT_regabireflect
// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
-TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
MOVQ AX, 0(R12)
MOVQ BX, 8(R12)
MOVQ CX, 16(R12)
@@ -497,7 +461,7 @@ TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
RET
// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
-TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
MOVQ 0(R12), AX
MOVQ 8(R12), BX
MOVQ 16(R12), CX
@@ -523,15 +487,6 @@ TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
MOVQ 176(R12), X13
MOVQ 184(R12), X14
RET
-#else
-// spillArgs stores return values from registers to a pointer in R12.
-TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
- RET
-
-// unspillArgs loads args into registers from a pointer in R12.
-TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
- RET
-#endif
// reflectcall: call a function with the given argument list
// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
@@ -588,7 +543,7 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
REP;MOVSB; \
/* set up argument registers */ \
MOVQ regArgs+40(FP), R12; \
- CALL ·unspillArgs<ABIInternal>(SB); \
+ CALL ·unspillArgs(SB); \
/* call function */ \
MOVQ f+8(FP), DX; \
PCDATA $PCDATA_StackMapIndex, $0; \
@@ -596,7 +551,7 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
CALL R12; \
/* copy register return values back */ \
MOVQ regArgs+40(FP), R12; \
- CALL ·spillArgs<ABIInternal>(SB); \
+ CALL ·spillArgs(SB); \
MOVLQZX stackArgsSize+24(FP), CX; \
MOVLQZX stackRetOffset+28(FP), BX; \
MOVQ stackArgs+16(FP), DI; \
@@ -664,31 +619,12 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// compile barrier.
RET
-// func jmpdefer(fv *funcval, argp uintptr)
-// argp is a caller SP.
-// called from deferreturn.
-// 1. pop the caller
-// 2. sub 5 bytes from the callers return
-// 3. jmp to the argument
-TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
- MOVQ fv+0(FP), DX // fn
- MOVQ argp+8(FP), BX // caller sp
- LEAQ -8(BX), SP // caller sp after CALL
- MOVQ -8(SP), BP // restore BP as if deferreturn returned (harmless if framepointers not in use)
- SUBQ $5, (SP) // return to CALL again
- MOVQ 0(DX), BX
- JMP BX // but first run the deferred function
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
// or else unwinding from systemstack_switch is incorrect.
// Smashes R9.
TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R14)
- MOVQ g(R14), R14
-#endif
MOVQ $runtime·systemstack_switch(SB), R9
MOVQ R9, (g_sched+gobuf_pc)(R14)
LEAQ 8(SP), R9
@@ -1009,61 +945,34 @@ done:
// func memhash(p unsafe.Pointer, h, s uintptr) uintptr
// hash function using AES hardware instructions
TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT,$0-32
-#ifdef GOEXPERIMENT_regabiargs
// AX = ptr to data
// BX = seed
// CX = size
-#endif
CMPB runtime·useAeshash(SB), $0
JEQ noaes
-#ifndef GOEXPERIMENT_regabiargs
- MOVQ p+0(FP), AX // ptr to data
- MOVQ s+16(FP), CX // size
- LEAQ ret+24(FP), DX
-#endif
JMP aeshashbody<>(SB)
noaes:
JMP runtime·memhashFallback<ABIInternal>(SB)
// func strhash(p unsafe.Pointer, h uintptr) uintptr
TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT,$0-24
-#ifdef GOEXPERIMENT_regabiargs
// AX = ptr to string struct
// BX = seed
-#endif
CMPB runtime·useAeshash(SB), $0
JEQ noaes
-#ifndef GOEXPERIMENT_regabiargs
- MOVQ p+0(FP), AX // ptr to string struct
-#endif
MOVQ 8(AX), CX // length of string
MOVQ (AX), AX // string data
-#ifndef GOEXPERIMENT_regabiargs
- LEAQ ret+16(FP), DX
-#endif
JMP aeshashbody<>(SB)
noaes:
JMP runtime·strhashFallback<ABIInternal>(SB)
// AX: data
-#ifdef GOEXPERIMENT_regabiargs
// BX: hash seed
-#else
-// h+8(FP): hash seed
-#endif
// CX: length
-#ifdef GOEXPERIMENT_regabiargs
// At return: AX = return value
-#else
-// DX: address to put return value
-#endif
TEXT aeshashbody<>(SB),NOSPLIT,$0-0
// Fill an SSE register with our seeds.
-#ifdef GOEXPERIMENT_regabiargs
MOVQ BX, X0 // 64 bits of per-table hash seed
-#else
- MOVQ h+8(FP), X0 // 64 bits of per-table hash seed
-#endif
PINSRW $4, CX, X0 // 16 bits of length
PSHUFHW $0, X0, X0 // repeat length 4 times total
MOVO X0, X1 // save unscrambled seed
@@ -1100,11 +1009,7 @@ final1:
AESENC X1, X1 // scramble combo 3 times
AESENC X1, X1
AESENC X1, X1
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X1, AX // return X1
-#else
- MOVQ X1, (DX)
-#endif
RET
endofpage:
@@ -1120,11 +1025,7 @@ endofpage:
aes0:
// Return scrambled input seed
AESENC X0, X0
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X0, AX // return X0
-#else
- MOVQ X0, (DX)
-#endif
RET
aes16:
@@ -1154,11 +1055,7 @@ aes17to32:
// combine results
PXOR X3, X2
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X2, AX // return X2
-#else
- MOVQ X2, (DX)
-#endif
RET
aes33to64:
@@ -1200,11 +1097,7 @@ aes33to64:
PXOR X6, X4
PXOR X7, X5
PXOR X5, X4
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X4, AX // return X4
-#else
- MOVQ X4, (DX)
-#endif
RET
aes65to128:
@@ -1286,15 +1179,9 @@ aes65to128:
PXOR X10, X8
PXOR X11, X9
PXOR X9, X8
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X8, AX // return X8
-#else
- MOVQ X8, (DX)
-#endif
RET
aes129plus:
@@ -1410,41 +1297,24 @@ aesloop:
PXOR X10, X8
PXOR X11, X9
PXOR X9, X8
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X8, AX // return X8
-#else
- MOVQ X8, (DX)
-#endif
RET
// func memhash32(p unsafe.Pointer, h uintptr) uintptr
// ABIInternal for performance.
TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT,$0-24
-#ifdef GOEXPERIMENT_regabiargs
// AX = ptr to data
// BX = seed
-#endif
CMPB runtime·useAeshash(SB), $0
JEQ noaes
-#ifdef GOEXPERIMENT_regabiargs
MOVQ BX, X0 // X0 = seed
-#else
- MOVQ p+0(FP), AX // ptr to data
- MOVQ h+8(FP), X0 // seed
-#endif
PINSRD $2, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X0, AX // return X0
-#else
- MOVQ X0, ret+16(FP)
-#endif
RET
noaes:
JMP runtime·memhash32Fallback<ABIInternal>(SB)
@@ -1452,28 +1322,16 @@ noaes:
// func memhash64(p unsafe.Pointer, h uintptr) uintptr
// ABIInternal for performance.
TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT,$0-24
-#ifdef GOEXPERIMENT_regabiargs
// AX = ptr to data
// BX = seed
-#else
-#endif
CMPB runtime·useAeshash(SB), $0
JEQ noaes
-#ifdef GOEXPERIMENT_regabiargs
MOVQ BX, X0 // X0 = seed
-#else
- MOVQ p+0(FP), AX // ptr to data
- MOVQ h+8(FP), X0 // seed
-#endif
PINSRQ $1, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
-#ifdef GOEXPERIMENT_regabiargs
MOVQ X0, AX // return X0
-#else
- MOVQ X0, ret+16(FP)
-#endif
RET
noaes:
JMP runtime·memhash64Fallback<ABIInternal>(SB)
@@ -1596,10 +1454,10 @@ TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
// This function is injected from the signal handler for panicking
// signals. It is quite painful to set X15 in the signal context,
// so we do it here.
-TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
-#ifdef GOEXPERIMENT_regabig
+TEXT ·sigpanic0(SB),NOSPLIT,$0-0
get_tls(R14)
MOVQ g(R14), R14
+#ifndef GOOS_plan9
XORPS X15, X15
#endif
JMP ·sigpanic<ABIInternal>(SB)
@@ -1619,13 +1477,7 @@ TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$112
MOVQ R13, 104(SP)
// TODO: Consider passing g.m.p in as an argument so they can be shared
// across a sequence of write barriers.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), R13
-#else
- get_tls(R13)
- MOVQ g(R13), R13
- MOVQ g_m(R13), R13
-#endif
MOVQ m_p(R13), R13
MOVQ (p_wbBuf+wbBuf_next)(R13), R12
// Increment wbBuf.next position.
@@ -1956,146 +1808,61 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
// The tail call makes these stubs disappear in backtraces.
// Defined as ABIInternal since they do not use the stack-based Go ABI.
TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, BX
-#else
- MOVQ AX, x+0(FP)
- MOVQ CX, y+8(FP)
-#endif
JMP runtime·goPanicIndex<ABIInternal>(SB)
TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, BX
-#else
- MOVQ AX, x+0(FP)
- MOVQ CX, y+8(FP)
-#endif
JMP runtime·goPanicIndexU<ABIInternal>(SB)
TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, AX
MOVQ DX, BX
-#else
- MOVQ CX, x+0(FP)
- MOVQ DX, y+8(FP)
-#endif
JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, AX
MOVQ DX, BX
-#else
- MOVQ CX, x+0(FP)
- MOVQ DX, y+8(FP)
-#endif
JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, AX
MOVQ DX, BX
-#else
- MOVQ CX, x+0(FP)
- MOVQ DX, y+8(FP)
-#endif
JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, AX
MOVQ DX, BX
-#else
- MOVQ CX, x+0(FP)
- MOVQ DX, y+8(FP)
-#endif
JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, BX
-#else
- MOVQ AX, x+0(FP)
- MOVQ CX, y+8(FP)
-#endif
JMP runtime·goPanicSliceB<ABIInternal>(SB)
TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, BX
-#else
- MOVQ AX, x+0(FP)
- MOVQ CX, y+8(FP)
-#endif
JMP runtime·goPanicSliceBU<ABIInternal>(SB)
TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ DX, AX
-#else
- MOVQ DX, x+0(FP)
- MOVQ BX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ DX, AX
-#else
- MOVQ DX, x+0(FP)
- MOVQ BX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ DX, AX
-#else
- MOVQ DX, x+0(FP)
- MOVQ BX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ DX, AX
-#else
- MOVQ DX, x+0(FP)
- MOVQ BX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, AX
MOVQ DX, BX
-#else
- MOVQ CX, x+0(FP)
- MOVQ DX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3B<ABIInternal>(SB)
TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, AX
MOVQ DX, BX
-#else
- MOVQ CX, x+0(FP)
- MOVQ DX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, BX
-#else
- MOVQ AX, x+0(FP)
- MOVQ CX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3C<ABIInternal>(SB)
TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ CX, BX
-#else
- MOVQ AX, x+0(FP)
- MOVQ CX, y+8(FP)
-#endif
JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ DX, AX
-#else
- MOVQ DX, x+0(FP)
- MOVQ BX, y+8(FP)
-#endif
JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
#ifdef GOOS_android
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index 6d3573d68f..a1164781d2 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -168,14 +168,13 @@ TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0
BL runtime·schedinit(SB)
// create a new goroutine to start program
+ SUB $8, R13
MOVW $runtime·mainPC(SB), R0
- MOVW.W R0, -4(R13)
- MOVW $8, R0
- MOVW.W R0, -4(R13)
+ MOVW R0, 4(R13) // arg 1: fn
MOVW $0, R0
- MOVW.W R0, -4(R13) // push $0 as guard
+ MOVW R0, 0(R13) // dummy LR
BL runtime·newproc(SB)
- MOVW $12(R13), R13 // pop args and LR
+ ADD $8, R13 // pop args and LR
// start this M
BL runtime·mstart(SB)
@@ -507,20 +506,6 @@ CALLFN(·call268435456, 268435456)
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
-// void jmpdefer(fn, sp);
-// called from deferreturn.
-// 1. grab stored LR for caller
-// 2. sub 4 bytes to get back to BL deferreturn
-// 3. B to fn
-TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
- MOVW 0(R13), LR
- MOVW $-4(LR), LR // BL deferreturn
- MOVW fv+0(FP), R7
- MOVW argp+4(FP), R13
- MOVW $-4(R13), R13 // SP is 4 below argp, due to saved LR
- MOVW 0(R7), R1
- B (R1)
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index 2d495397a8..e51ce2f831 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -87,14 +87,11 @@ nocgo:
// create a new goroutine to start program
MOVD $runtime·mainPC(SB), R0 // entry
- MOVD RSP, R7
- MOVD.W $0, -8(R7)
- MOVD.W R0, -8(R7)
- MOVD.W $0, -8(R7)
- MOVD.W $0, -8(R7)
- MOVD R7, RSP
+ SUB $16, RSP
+ MOVD R0, 8(RSP) // arg
+ MOVD $0, 0(RSP) // dummy LR
BL runtime·newproc(SB)
- ADD $32, RSP
+ ADD $16, RSP
// start this M
BL runtime·mstart(SB)
@@ -103,7 +100,7 @@ nocgo:
MOVD R0, (R0) // boom
UNDEF
-DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
+DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
GLOBL runtime·mainPC(SB),RODATA,$8
TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
@@ -152,7 +149,13 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
-TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
+TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R26 // context
+#else
+ MOVD fn+0(FP), R26 // context
+#endif
+
// Save caller state in g->sched
MOVD RSP, R0
MOVD R0, (g_sched+gobuf_sp)(g)
@@ -168,14 +171,18 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
CMP g, R3
BNE 2(PC)
B runtime·badmcall(SB)
- MOVD fn+0(FP), R26 // context
- MOVD 0(R26), R4 // code pointer
+
MOVD (g_sched+gobuf_sp)(g), R0
MOVD R0, RSP // sp = m->g0->sched.sp
MOVD (g_sched+gobuf_bp)(g), R29
- MOVD R3, -8(RSP)
- MOVD $0, -16(RSP)
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R3, R0 // arg = g
+#else
+ MOVD R3, -8(RSP) // arg = g
+#endif
+ MOVD $0, -16(RSP) // dummy LR
SUB $16, RSP
+ MOVD 0(R26), R4 // code pointer
BL (R4)
B runtime·badmcall2(SB)
@@ -310,6 +317,86 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
MOVW $0, R26
B runtime·morestack(SB)
+#ifdef GOEXPERIMENT_regabireflect
+// spillArgs stores return values from registers to a *internal/abi.RegArgs in R20.
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+ MOVD R0, (0*8)(R20)
+ MOVD R1, (1*8)(R20)
+ MOVD R2, (2*8)(R20)
+ MOVD R3, (3*8)(R20)
+ MOVD R4, (4*8)(R20)
+ MOVD R5, (5*8)(R20)
+ MOVD R6, (6*8)(R20)
+ MOVD R7, (7*8)(R20)
+ MOVD R8, (8*8)(R20)
+ MOVD R9, (9*8)(R20)
+ MOVD R10, (10*8)(R20)
+ MOVD R11, (11*8)(R20)
+ MOVD R12, (12*8)(R20)
+ MOVD R13, (13*8)(R20)
+ MOVD R14, (14*8)(R20)
+ MOVD R15, (15*8)(R20)
+ FMOVD F0, (16*8)(R20)
+ FMOVD F1, (17*8)(R20)
+ FMOVD F2, (18*8)(R20)
+ FMOVD F3, (19*8)(R20)
+ FMOVD F4, (20*8)(R20)
+ FMOVD F5, (21*8)(R20)
+ FMOVD F6, (22*8)(R20)
+ FMOVD F7, (23*8)(R20)
+ FMOVD F8, (24*8)(R20)
+ FMOVD F9, (25*8)(R20)
+ FMOVD F10, (26*8)(R20)
+ FMOVD F11, (27*8)(R20)
+ FMOVD F12, (28*8)(R20)
+ FMOVD F13, (29*8)(R20)
+ FMOVD F14, (30*8)(R20)
+ FMOVD F15, (31*8)(R20)
+ RET
+
+// unspillArgs loads args into registers from a *internal/abi.RegArgs in R20.
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+ MOVD (0*8)(R20), R0
+ MOVD (1*8)(R20), R1
+ MOVD (2*8)(R20), R2
+ MOVD (3*8)(R20), R3
+ MOVD (4*8)(R20), R4
+ MOVD (5*8)(R20), R5
+ MOVD (6*8)(R20), R6
+ MOVD (7*8)(R20), R7
+ MOVD (8*8)(R20), R8
+ MOVD (9*8)(R20), R9
+ MOVD (10*8)(R20), R10
+ MOVD (11*8)(R20), R11
+ MOVD (12*8)(R20), R12
+ MOVD (13*8)(R20), R13
+ MOVD (14*8)(R20), R14
+ MOVD (15*8)(R20), R15
+ FMOVD (16*8)(R20), F0
+ FMOVD (17*8)(R20), F1
+ FMOVD (18*8)(R20), F2
+ FMOVD (19*8)(R20), F3
+ FMOVD (20*8)(R20), F4
+ FMOVD (21*8)(R20), F5
+ FMOVD (22*8)(R20), F6
+ FMOVD (23*8)(R20), F7
+ FMOVD (24*8)(R20), F8
+ FMOVD (25*8)(R20), F9
+ FMOVD (26*8)(R20), F10
+ FMOVD (27*8)(R20), F11
+ FMOVD (28*8)(R20), F12
+ FMOVD (29*8)(R20), F13
+ FMOVD (30*8)(R20), F14
+ FMOVD (31*8)(R20), F15
+ RET
+#else
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+ RET
+
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+ RET
+#endif
+
// reflectcall: call a function with the given argument list
// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
@@ -381,12 +468,17 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
MOVBU.P R7, 1(R5); \
CMP R5, R6; \
BNE -3(PC); \
+ /* set up argument registers */ \
+ MOVD regArgs+40(FP), R20; \
+ CALL ·unspillArgs(SB); \
/* call function */ \
MOVD f+8(FP), R26; \
- MOVD (R26), R0; \
- PCDATA $PCDATA_StackMapIndex, $0; \
- BL (R0); \
+ MOVD (R26), R20; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ BL (R20); \
/* copy return values back */ \
+ MOVD regArgs+40(FP), R20; \
+ CALL ·spillArgs(SB); \
MOVD stackArgsType+0(FP), R7; \
MOVD stackArgs+16(FP), R3; \
MOVWU stackArgsSize+24(FP), R4; \
@@ -403,11 +495,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $48-0
+ NO_LOCAL_POINTERS
MOVD R7, 8(RSP)
MOVD R3, 16(RSP)
MOVD R5, 24(RSP)
MOVD R4, 32(RSP)
- MOVD $0, 40(RSP)
+ MOVD R20, 40(RSP)
BL runtime·reflectcallmove(SB)
RET
@@ -440,12 +533,14 @@ CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
// func memhash32(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
MOVD h+8(FP), R1
MOVD $ret+16(FP), R2
+#endif
MOVD $runtime·aeskeysched+0(SB), R3
VEOR V0.B16, V0.B16, V0.B16
@@ -459,18 +554,24 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
AESMC V0.B16, V0.B16
AESE V2.B16, V0.B16
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
VST1 [V0.D1], (R2)
+#endif
RET
noaes:
- B runtime·memhash32Fallback(SB)
+ B runtime·memhash32Fallback<ABIInternal>(SB)
// func memhash64(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
MOVD h+8(FP), R1
MOVD $ret+16(FP), R2
+#endif
MOVD $runtime·aeskeysched+0(SB), R3
VEOR V0.B16, V0.B16, V0.B16
@@ -484,75 +585,89 @@ TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
AESMC V0.B16, V0.B16
AESE V2.B16, V0.B16
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
VST1 [V0.D1], (R2)
+#endif
RET
noaes:
- B runtime·memhash64Fallback(SB)
+ B runtime·memhash64Fallback<ABIInternal>(SB)
// func memhash(p unsafe.Pointer, h, size uintptr) uintptr
-TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
- MOVD s+16(FP), R1
- MOVD h+8(FP), R3
- MOVD $ret+24(FP), R2
+ MOVD h+8(FP), R1
+ MOVD s+16(FP), R2
+ MOVD $ret+24(FP), R8
+#endif
B aeshashbody<>(SB)
noaes:
- B runtime·memhashFallback(SB)
+ B runtime·memhashFallback<ABIInternal>(SB)
// func strhash(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
- MOVD p+0(FP), R10 // string pointer
- LDP (R10), (R0, R1) //string data/ length
- MOVD h+8(FP), R3
- MOVD $ret+16(FP), R2 // return adddress
+TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifdef GOEXPERIMENT_regabiargs
+ LDP (R0), (R0, R2) // string data / length
+#else
+ MOVD p+0(FP), R10 // string pointer
+ LDP (R10), (R0, R2) // string data / length
+ MOVD h+8(FP), R1
+ MOVD $ret+16(FP), R8 // return adddress
+#endif
B aeshashbody<>(SB)
noaes:
- B runtime·strhashFallback(SB)
+ B runtime·strhashFallback<ABIInternal>(SB)
// R0: data
-// R1: length
-// R2: address to put return value
-// R3: seed data
+// R1: seed data
+// R2: length
+#ifdef GOEXPERIMENT_regabiargs
+// At return, R0 = return value
+#else
+// R8: address to put return value
+#endif
TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0
VEOR V30.B16, V30.B16, V30.B16
- VMOV R3, V30.D[0]
- VMOV R1, V30.D[1] // load length into seed
+ VMOV R1, V30.D[0]
+ VMOV R2, V30.D[1] // load length into seed
MOVD $runtime·aeskeysched+0(SB), R4
VLD1.P 16(R4), [V0.B16]
AESE V30.B16, V0.B16
AESMC V0.B16, V0.B16
- CMP $16, R1
+ CMP $16, R2
BLO aes0to15
BEQ aes16
- CMP $32, R1
+ CMP $32, R2
BLS aes17to32
- CMP $64, R1
+ CMP $64, R2
BLS aes33to64
- CMP $128, R1
+ CMP $128, R2
BLS aes65to128
B aes129plus
aes0to15:
- CBZ R1, aes0
+ CBZ R2, aes0
VEOR V2.B16, V2.B16, V2.B16
- TBZ $3, R1, less_than_8
+ TBZ $3, R2, less_than_8
VLD1.P 8(R0), V2.D[0]
less_than_8:
- TBZ $2, R1, less_than_4
+ TBZ $2, R2, less_than_4
VLD1.P 4(R0), V2.S[2]
less_than_4:
- TBZ $1, R1, less_than_2
+ TBZ $1, R2, less_than_2
VLD1.P 2(R0), V2.H[6]
less_than_2:
- TBZ $0, R1, done
+ TBZ $0, R2, done
VLD1 (R0), V2.B[14]
done:
AESE V0.B16, V2.B16
@@ -561,11 +676,21 @@ done:
AESMC V2.B16, V2.B16
AESE V0.B16, V2.B16
- VST1 [V2.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V2.D[0], R0
+#else
+ VST1 [V2.D1], (R8)
+#endif
RET
+
aes0:
- VST1 [V0.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
+ VST1 [V0.D1], (R8)
+#endif
RET
+
aes16:
VLD1 (R0), [V2.B16]
B done
@@ -575,7 +700,7 @@ aes17to32:
VLD1 (R4), [V1.B16]
AESE V30.B16, V1.B16
AESMC V1.B16, V1.B16
- SUB $16, R1, R10
+ SUB $16, R2, R10
VLD1.P (R0)(R10), [V2.B16]
VLD1 (R0), [V3.B16]
@@ -593,7 +718,11 @@ aes17to32:
AESE V1.B16, V3.B16
VEOR V3.B16, V2.B16, V2.B16
- VST1 [V2.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V2.D[0], R0
+#else
+ VST1 [V2.D1], (R8)
+#endif
RET
aes33to64:
@@ -604,7 +733,7 @@ aes33to64:
AESMC V2.B16, V2.B16
AESE V30.B16, V3.B16
AESMC V3.B16, V3.B16
- SUB $32, R1, R10
+ SUB $32, R2, R10
VLD1.P (R0)(R10), [V4.B16, V5.B16]
VLD1 (R0), [V6.B16, V7.B16]
@@ -636,7 +765,11 @@ aes33to64:
VEOR V7.B16, V5.B16, V5.B16
VEOR V5.B16, V4.B16, V4.B16
- VST1 [V4.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V4.D[0], R0
+#else
+ VST1 [V4.D1], (R8)
+#endif
RET
aes65to128:
@@ -657,7 +790,7 @@ aes65to128:
AESE V30.B16, V7.B16
AESMC V7.B16, V7.B16
- SUB $64, R1, R10
+ SUB $64, R2, R10
VLD1.P (R0)(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
VLD1 (R0), [V12.B16, V13.B16, V14.B16, V15.B16]
AESE V0.B16, V8.B16
@@ -711,7 +844,11 @@ aes65to128:
VEOR V11.B16, V9.B16, V9.B16
VEOR V9.B16, V8.B16, V8.B16
- VST1 [V8.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V8.D[0], R0
+#else
+ VST1 [V8.D1], (R8)
+#endif
RET
aes129plus:
@@ -732,12 +869,12 @@ aes129plus:
AESMC V6.B16, V6.B16
AESE V30.B16, V7.B16
AESMC V7.B16, V7.B16
- ADD R0, R1, R10
+ ADD R0, R2, R10
SUB $128, R10, R10
VLD1.P 64(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
VLD1 (R10), [V12.B16, V13.B16, V14.B16, V15.B16]
- SUB $1, R1, R1
- LSR $7, R1, R1
+ SUB $1, R2, R2
+ LSR $7, R2, R2
aesloop:
AESE V8.B16, V0.B16
@@ -776,8 +913,8 @@ aesloop:
AESMC V6.B16, V6.B16
AESE V15.B16, V7.B16
AESMC V7.B16, V7.B16
- SUB $1, R1, R1
- CBNZ R1, aesloop
+ SUB $1, R2, R2
+ CBNZ R2, aesloop
AESE V8.B16, V0.B16
AESMC V0.B16, V0.B16
@@ -830,7 +967,11 @@ aesloop:
VEOR V4.B16, V6.B16, V4.B16
VEOR V4.B16, V0.B16, V0.B16
- VST1 [V0.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
+ VST1 [V0.D1], (R8)
+#endif
RET
TEXT runtime·procyield(SB),NOSPLIT,$0-0
@@ -841,23 +982,6 @@ again:
CBNZ R0, again
RET
-// void jmpdefer(fv, sp);
-// called from deferreturn.
-// 1. grab stored LR for caller
-// 2. sub 4 bytes to get back to BL deferreturn
-// 3. BR to fn
-TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
- MOVD 0(RSP), R0
- SUB $4, R0
- MOVD R0, LR
-
- MOVD fv+0(FP), R26
- MOVD argp+8(FP), R0
- MOVD R0, RSP
- SUB $8, RSP
- MOVD 0(R26), R3
- B (R3)
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
@@ -1052,7 +1176,8 @@ havem:
MOVD R1, 8(RSP)
MOVD R2, 16(RSP)
MOVD R3, 24(RSP)
- BL runtime·cgocallbackg(SB)
+ MOVD $runtime·cgocallbackg(SB), R0
+ CALL (R0) // indirect call to bypass nosplit check. We're on a different stack now.
// Restore g->sched (== m->curg->sched) from saved values.
MOVD 0(RSP), R5
@@ -1158,7 +1283,10 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1
// It does not clobber any general-purpose registers,
// but may clobber others (e.g., floating point registers)
// The act of CALLing gcWriteBarrier will clobber R30 (LR).
-TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$200
+//
+// Defined as ABIInternal since the compiler generates ABIInternal
+// calls to it directly and it does not use the stack-based Go ABI.
+TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$200
// Save the registers clobbered by the fast path.
MOVD R0, 184(RSP)
MOVD R1, 192(RSP)
@@ -1250,71 +1378,129 @@ flush:
// in the caller's stack frame. These stubs write the args into that stack space and
// then tail call to the corresponding runtime handler.
// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
+//
+// Defined as ABIInternal since the compiler generates ABIInternal
+// calls to it directly and it does not use the stack-based Go ABI.
+TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicIndex<ABIInternal>(SB)
+TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicIndexU<ABIInternal>(SB)
+TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
+TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
+TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
+TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
+TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSliceB<ABIInternal>(SB)
+TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSliceBU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
+TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
+TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
+TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3B<ABIInternal>(SB)
+TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
+TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSlice3C<ABIInternal>(SB)
+TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
+TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSliceConvert(SB)
+#endif
+ JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index d4d2280105..b2e2384c36 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -63,12 +63,11 @@ nocgo:
// create a new goroutine to start program
MOVV $runtime·mainPC(SB), R1 // entry
- ADDV $-24, R29
- MOVV R1, 16(R29)
- MOVV R0, 8(R29)
+ ADDV $-16, R29
+ MOVV R1, 8(R29)
MOVV R0, 0(R29)
JAL runtime·newproc(SB)
- ADDV $24, R29
+ ADDV $16, R29
// start this M
JAL runtime·mstart(SB)
@@ -385,22 +384,6 @@ CALLFN(·call1073741824, 1073741824)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET
-// void jmpdefer(fv, sp);
-// called from deferreturn.
-// 1. grab stored LR for caller
-// 2. sub 8 bytes to get back to JAL deferreturn
-// 3. JMP to fn
-TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
- MOVV 0(R29), R31
- ADDV $-8, R31
-
- MOVV fv+0(FP), REGCTXT
- MOVV argp+8(FP), R29
- ADDV $-8, R29
- NOR R0, R0 // prevent scheduling
- MOVV 0(REGCTXT), R4
- JMP (R4)
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s
index ea7edf20cf..87a1344e8f 100644
--- a/src/runtime/asm_mipsx.s
+++ b/src/runtime/asm_mipsx.s
@@ -64,12 +64,11 @@ nocgo:
// create a new goroutine to start program
MOVW $runtime·mainPC(SB), R1 // entry
- ADDU $-12, R29
- MOVW R1, 8(R29)
- MOVW R0, 4(R29)
+ ADDU $-8, R29
+ MOVW R1, 4(R29)
MOVW R0, 0(R29)
JAL runtime·newproc(SB)
- ADDU $12, R29
+ ADDU $8, R29
// start this M
JAL runtime·mstart(SB)
@@ -383,22 +382,6 @@ CALLFN(·call1073741824, 1073741824)
TEXT runtime·procyield(SB),NOSPLIT,$0-4
RET
-// void jmpdefer(fv, sp);
-// called from deferreturn.
-// 1. grab stored LR for caller
-// 2. sub 8 bytes to get back to JAL deferreturn
-// 3. JMP to fn
-TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
- MOVW 0(R29), R31
- ADDU $-8, R31
-
- MOVW fv+0(FP), REGCTXT
- MOVW argp+4(FP), R29
- ADDU $-4, R29
- NOR R0, R0 // prevent scheduling
- MOVW 0(REGCTXT), R4
- JMP (R4)
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index 942cc14f17..5dc96c5947 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -94,9 +94,8 @@ nocgo:
MOVDU R0, -8(R1)
MOVDU R0, -8(R1)
MOVDU R0, -8(R1)
- MOVDU R0, -8(R1)
BL runtime·newproc(SB)
- ADD $(16+FIXED_FRAME), R1
+ ADD $(8+FIXED_FRAME), R1
// start this M
BL runtime·mstart(SB)
@@ -504,34 +503,6 @@ again:
OR R6, R6, R6 // Set PPR priority back to medium-low
RET
-// void jmpdefer(fv, sp);
-// called from deferreturn.
-// 1. grab stored LR for caller
-// 2. sub 8 bytes to get back to either nop or toc reload before deferreturn
-// 3. BR to fn
-// When dynamically linking Go, it is not sufficient to rewind to the BL
-// deferreturn -- we might be jumping between modules and so we need to reset
-// the TOC pointer in r2. To do this, codegen inserts MOVD 24(R1), R2 *before*
-// the BL deferreturn and jmpdefer rewinds to that.
-TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
- MOVD 0(R1), R31
- SUB $8, R31
- MOVD R31, LR
-
- MOVD fv+0(FP), R11
- MOVD argp+8(FP), R1
- SUB $FIXED_FRAME, R1
-#ifdef GOOS_aix
- // AIX won't trigger a SIGSEGV if R11 = nil
- // So it manually triggers it
- CMP R0, R11
- BNE 2(PC)
- MOVD R0, 0(R0)
-#endif
- MOVD 0(R11), R12
- MOVD R12, CTR
- BR (CTR)
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index ef7af4e10d..9927a817f7 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -57,12 +57,11 @@ nocgo:
// create a new goroutine to start program
MOV $runtime·mainPC(SB), T0 // entry
- ADD $-24, X2
- MOV T0, 16(X2)
- MOV ZERO, 8(X2)
+ ADD $-16, X2
+ MOV T0, 8(X2)
MOV ZERO, 0(X2)
CALL runtime·newproc(SB)
- ADD $24, X2
+ ADD $16, X2
// start this M
CALL runtime·mstart(SB)
@@ -249,21 +248,6 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
MOV gobuf_pc(T0), T0
JALR ZERO, T0
-// func jmpdefer(fv *funcval, argp uintptr)
-// called from deferreturn
-// 1. grab stored return address from the caller's frame
-// 2. sub 8 bytes to get back to JAL deferreturn
-// 3. JMP to fn
-TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
- MOV 0(X2), RA
- ADD $-8, RA
-
- MOV fv+0(FP), CTXT
- MOV argp+8(FP), X2
- ADD $-8, X2
- MOV 0(CTXT), T0
- JALR ZERO, T0
-
// func procyield(cycles uint32)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index fb38271630..d4110d563f 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -147,12 +147,11 @@ nocgo:
// create a new goroutine to start program
MOVD $runtime·mainPC(SB), R2 // entry
- SUB $24, R15
- MOVD R2, 16(R15)
- MOVD $0, 8(R15)
+ SUB $16, R15
+ MOVD R2, 8(R15)
MOVD $0, 0(R15)
BL runtime·newproc(SB)
- ADD $24, R15
+ ADD $16, R15
// start this M
BL runtime·mstart(SB)
@@ -481,21 +480,6 @@ TEXT callfnMVC<>(SB),NOSPLIT|NOFRAME,$0-0
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET
-// void jmpdefer(fv, sp);
-// called from deferreturn.
-// 1. grab stored LR for caller
-// 2. sub 6 bytes to get back to BL deferreturn (size of BRASL instruction)
-// 3. BR to fn
-TEXT runtime·jmpdefer(SB),NOSPLIT|NOFRAME,$0-16
- MOVD 0(R15), R1
- SUB $6, R1, LR
-
- MOVD fv+0(FP), R12
- MOVD argp+8(FP), R15
- SUB $8, R15
- MOVD 0(R12), R3
- BR (R3)
-
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index 33c335ba5a..d885da6e70 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -18,8 +18,7 @@ TEXT runtime·rt0_go(SB), NOSPLIT|NOFRAME|TOPFRAME, $0
CALLNORESUME runtime·args(SB)
CALLNORESUME runtime·osinit(SB)
CALLNORESUME runtime·schedinit(SB)
- MOVD $0, 0(SP)
- MOVD $runtime·mainPC(SB), 8(SP)
+ MOVD $runtime·mainPC(SB), 0(SP)
CALLNORESUME runtime·newproc(SB)
CALL runtime·mstart(SB) // WebAssembly stack will unwind when switching to another goroutine
UNDEF
@@ -194,35 +193,6 @@ TEXT runtime·return0(SB), NOSPLIT, $0-0
MOVD $0, RET0
RET
-TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
- MOVD fv+0(FP), CTXT
-
- Get CTXT
- I64Eqz
- If
- CALLNORESUME runtime·sigpanic<ABIInternal>(SB)
- End
-
- // caller sp after CALL
- I64Load argp+8(FP)
- I64Const $8
- I64Sub
- I32WrapI64
- Set SP
-
- // decrease PC_B by 1 to CALL again
- Get SP
- I32Load16U (SP)
- I32Const $1
- I32Sub
- I32Store16 $0
-
- // but first run the deferred function
- Get CTXT
- I32WrapI64
- I64Load $0
- JMP
-
TEXT runtime·asminit(SB), NOSPLIT, $0-0
// No per-thread init.
RET
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 2626216f95..2f3c609907 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -85,6 +85,7 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -480,7 +481,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
if inheap(uintptr(unsafe.Pointer(it))) {
panic(errorString(msg))
}
- p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
+ p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
if !cgoIsGoPointer(p) {
return
}
@@ -560,7 +561,7 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
}
hbits := heapBitsForAddr(base)
n := span.elemsize
- for i = uintptr(0); i < n; i += sys.PtrSize {
+ for i = uintptr(0); i < n; i += goarch.PtrSize {
if !hbits.morePointers() {
// No more possible pointers.
break
diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go
index 516045c163..3acbadf803 100644
--- a/src/runtime/cgocheck.go
+++ b/src/runtime/cgocheck.go
@@ -8,7 +8,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -151,7 +151,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
// src must be in the regular heap.
hbits := heapBitsForAddr(uintptr(src))
- for i := uintptr(0); i < off+size; i += sys.PtrSize {
+ for i := uintptr(0); i < off+size; i += goarch.PtrSize {
bits := hbits.bits()
if i >= off && bits&bitPointer != 0 {
v := *(*unsafe.Pointer)(add(src, i))
@@ -169,22 +169,22 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
//go:nosplit
//go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
- skipMask := off / sys.PtrSize / 8
- skipBytes := skipMask * sys.PtrSize * 8
+ skipMask := off / goarch.PtrSize / 8
+ skipBytes := skipMask * goarch.PtrSize * 8
ptrmask := addb(gcbits, skipMask)
src = add(src, skipBytes)
off -= skipBytes
size += off
var bits uint32
- for i := uintptr(0); i < size; i += sys.PtrSize {
- if i&(sys.PtrSize*8-1) == 0 {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
bits >>= 1
}
if off > 0 {
- off -= sys.PtrSize
+ off -= goarch.PtrSize
} else {
if bits&1 != 0 {
v := *(*unsafe.Pointer)(add(src, i))
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index f2a75b30f4..3cdb5dce11 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -18,6 +18,7 @@ package runtime
// c.qcount < c.dataqsiz implies that c.sendq is empty.
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/math"
"unsafe"
@@ -169,7 +170,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
}
if raceenabled {
- racereadpc(c.raceaddr(), callerpc, funcPC(chansend))
+ racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend))
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
@@ -365,7 +366,7 @@ func closechan(c *hchan) {
if raceenabled {
callerpc := getcallerpc()
- racewritepc(c.raceaddr(), callerpc, funcPC(closechan))
+ racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
racerelease(c.raceaddr())
}
diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go
index e5d0193b9c..c81ab710c2 100644
--- a/src/runtime/cpuprof.go
+++ b/src/runtime/cpuprof.go
@@ -13,6 +13,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -166,8 +167,8 @@ func (p *cpuProfile) addExtra() {
if p.lostExtra > 0 {
hdr := [1]uint64{p.lostExtra}
lostStk := [2]uintptr{
- funcPC(_LostExternalCode) + sys.PCQuantum,
- funcPC(_ExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_LostExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostExtra = 0
@@ -176,8 +177,8 @@ func (p *cpuProfile) addExtra() {
if p.lostAtomic > 0 {
hdr := [1]uint64{p.lostAtomic}
lostStk := [2]uintptr{
- funcPC(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
- funcPC(_System) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_System) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostAtomic = 0
diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go
index faddf59eed..ad66a18c26 100644
--- a/src/runtime/debugcall.go
+++ b/src/runtime/debugcall.go
@@ -112,7 +112,7 @@ func debugCallWrap(dispatch uintptr) {
// closure and start the goroutine with that closure, but the compiler disallows
// implicit closure allocation in the runtime.
fn := debugCallWrap1
- newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), nil, 0, gp, callerpc)
+ newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
args := &debugCallWrapArgs{
dispatch: dispatch,
callingG: gp,
diff --git a/src/runtime/defs_plan9_386.go b/src/runtime/defs_plan9_386.go
index 49129b3c3f..428044df68 100644
--- a/src/runtime/defs_plan9_386.go
+++ b/src/runtime/defs_plan9_386.go
@@ -61,4 +61,4 @@ func dumpregs(u *ureg) {
print("gs ", hex(u.gs), "\n")
}
-func sigpanictramp() {}
+func sigpanictramp()
diff --git a/src/runtime/defs_plan9_amd64.go b/src/runtime/defs_plan9_amd64.go
index 0099563034..15a27fc7db 100644
--- a/src/runtime/defs_plan9_amd64.go
+++ b/src/runtime/defs_plan9_amd64.go
@@ -78,4 +78,4 @@ func dumpregs(u *ureg) {
print("gs ", hex(u.gs), "\n")
}
-func sigpanictramp() {}
+func sigpanictramp()
diff --git a/src/runtime/duff_arm64.s b/src/runtime/duff_arm64.s
index 128b076af9..33c4905078 100644
--- a/src/runtime/duff_arm64.s
+++ b/src/runtime/duff_arm64.s
@@ -4,7 +4,7 @@
#include "textflag.h"
-TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0
+TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
STP.P (ZR, ZR), 16(R20)
STP.P (ZR, ZR), 16(R20)
STP.P (ZR, ZR), 16(R20)
@@ -71,7 +71,7 @@ TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0
STP (ZR, ZR), (R20)
RET
-TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0
+TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
LDP.P 16(R20), (R26, R27)
STP.P (R26, R27), 16(R21)
diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go
index fe4c9045c1..a2cef02cf8 100644
--- a/src/runtime/export_debug_test.go
+++ b/src/runtime/export_debug_test.go
@@ -9,7 +9,7 @@ package runtime
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -115,7 +115,7 @@ func (h *debugCallHandler) inject(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
return false
}
// Push current PC on the stack.
- rsp := ctxt.rsp() - sys.PtrSize
+ rsp := ctxt.rsp() - goarch.PtrSize
*(*uint64)(unsafe.Pointer(uintptr(rsp))) = ctxt.rip()
ctxt.set_rsp(rsp)
// Write the argument frame size.
@@ -125,7 +125,7 @@ func (h *debugCallHandler) inject(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
h.savedFP = *h.savedRegs.fpstate
h.savedRegs.fpstate = nil
// Set PC to debugCallV2.
- ctxt.set_rip(uint64(funcPC(debugCallV2)))
+ ctxt.set_rip(uint64(abi.FuncPCABIInternal(debugCallV2)))
// Call injected. Switch to the debugCall protocol.
testSigtrap = h.handleF
case _Grunnable:
@@ -166,7 +166,7 @@ func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
storeRegArgs(ctxt.regs(), h.regArgs)
}
// Push return PC.
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
ctxt.set_rsp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = ctxt.rip()
// Set PC to call and context register.
@@ -182,7 +182,7 @@ func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
case 2:
// Function panicked. Copy panic out.
sp := ctxt.rsp()
- memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)), 2*sys.PtrSize)
+ memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)), 2*goarch.PtrSize)
case 8:
// Call isn't safe. Get the reason.
sp := ctxt.rsp()
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index c8d01fbb15..e7279564e3 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -7,6 +7,8 @@
package runtime
import (
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -27,8 +29,6 @@ var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
var Xadduintptr = atomic.Xadduintptr
-var FuncPC = funcPC
-
var Fastlog2 = fastlog2
var Atoi = atoi
@@ -147,40 +147,28 @@ func RunSchedLocalQueueStealTest() {
}
}
-// Temporary to enable register ABI bringup.
-// TODO(register args): convert back to local variables in RunSchedLocalQueueEmptyTest that
-// get passed to the "go" stmts there.
-var RunSchedLocalQueueEmptyState struct {
- done chan bool
- ready *uint32
- p *p
-}
-
func RunSchedLocalQueueEmptyTest(iters int) {
// Test that runq is not spuriously reported as empty.
// Runq emptiness affects scheduling decisions and spurious emptiness
// can lead to underutilization (both runnable Gs and idle Ps coexist
// for arbitrary long time).
done := make(chan bool, 1)
- RunSchedLocalQueueEmptyState.done = done
p := new(p)
- RunSchedLocalQueueEmptyState.p = p
gs := make([]g, 2)
ready := new(uint32)
- RunSchedLocalQueueEmptyState.ready = ready
for i := 0; i < iters; i++ {
*ready = 0
next0 := (i & 1) == 0
next1 := (i & 2) == 0
runqput(p, &gs[0], next0)
go func() {
- for atomic.Xadd(RunSchedLocalQueueEmptyState.ready, 1); atomic.Load(RunSchedLocalQueueEmptyState.ready) != 2; {
+ for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
- if runqempty(RunSchedLocalQueueEmptyState.p) {
- //println("next:", next0, next1)
+ if runqempty(p) {
+ println("next:", next0, next1)
throw("queue is empty")
}
- RunSchedLocalQueueEmptyState.done <- true
+ done <- true
}()
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
@@ -228,8 +216,6 @@ var Write = write
func Envs() []string { return envs }
func SetEnvs(e []string) { envs = e }
-var BigEndian = sys.BigEndian
-
// For benchmarking.
func BenchSetType(n int, x interface{}) {
@@ -259,7 +245,7 @@ func BenchSetType(n int, x interface{}) {
})
}
-const PtrSize = sys.PtrSize
+const PtrSize = goarch.PtrSize
var ForceGCPeriod = &forcegcperiod
@@ -1066,7 +1052,7 @@ func FreePageAlloc(pp *PageAlloc) {
//
// This should not be higher than 0x100*pallocChunkBytes to support
// mips and mipsle, which only have 31-bit address spaces.
-var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*sys.GoosAix))
+var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*goos.IsAix))
// PageBase returns an address given a chunk index and a page index
// relative to that chunk.
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index 48e1e6603b..eca4062e68 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -186,7 +186,10 @@ of the run-time system.
*/
package runtime
-import "runtime/internal/sys"
+import (
+ "internal/goarch"
+ "internal/goos"
+)
// Caller reports file and line number information about function invocations on
// the calling goroutine's stack. The argument skip is the number of stack frames
@@ -260,8 +263,8 @@ func Version() string {
// GOOS is the running program's operating system target:
// one of darwin, freebsd, linux, and so on.
// To view possible combinations of GOOS and GOARCH, run "go tool dist list".
-const GOOS string = sys.GOOS
+const GOOS string = goos.GOOS
// GOARCH is the running program's architecture target:
// one of 386, amd64, arm, s390x, and so on.
-const GOARCH string = sys.GOARCH
+const GOARCH string = goarch.GOARCH
diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go
index 5e7c6c574f..0ec5331534 100644
--- a/src/runtime/gc_test.go
+++ b/src/runtime/gc_test.go
@@ -21,6 +21,7 @@ import (
)
func TestGcSys(t *testing.T) {
+ t.Skip("skipping known-flaky test; golang.org/issue/37331")
if os.Getenv("GOGC") == "off" {
t.Skip("skipping test; GOGC=off in environment")
}
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 934e55f495..8fb30d95b9 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -12,7 +12,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -247,7 +247,7 @@ func dumpbv(cbv *bitvector, offset uintptr) {
for i := uintptr(0); i < uintptr(cbv.n); i++ {
if cbv.ptrbit(i) == 1 {
dumpint(fieldKindPtr)
- dumpint(uint64(offset + i*sys.PtrSize))
+ dumpint(uint64(offset + i*goarch.PtrSize))
}
}
}
@@ -298,7 +298,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
dumpbv(&child.args, child.argoff)
} else {
// conservative - everything might be a pointer
- for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
+ for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
@@ -307,21 +307,21 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
// Dump fields in the local vars section
if stkmap == nil {
// No locals information, dump everything.
- for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
+ for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n < 0 {
// Locals size information, dump just the locals.
size := uintptr(-stkmap.n)
- for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
+ for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n > 0 {
// Locals bitmap information, scan just the pointers in
// locals.
- dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
+ dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
}
dumpint(fieldKindEol)
@@ -381,12 +381,13 @@ func dumpgoroutine(gp *g) {
dumpint(uint64(uintptr(unsafe.Pointer(gp))))
dumpint(uint64(d.sp))
dumpint(uint64(d.pc))
- dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
+ fn := *(**funcval)(unsafe.Pointer(&d.fn))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn))))
if d.fn == nil {
// d.fn can be nil for open-coded defers
dumpint(uint64(0))
} else {
- dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
}
dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
}
@@ -510,7 +511,7 @@ func dumpparams() {
} else {
dumpbool(true) // big-endian ptrs
}
- dumpint(sys.PtrSize)
+ dumpint(goarch.PtrSize)
var arenaStart, arenaEnd uintptr
for i1 := range mheap_.arenas {
if mheap_.arenas[i1] == nil {
@@ -531,7 +532,7 @@ func dumpparams() {
}
dumpint(uint64(arenaStart))
dumpint(uint64(arenaEnd))
- dumpstr(sys.GOARCH)
+ dumpstr(goarch.GOARCH)
dumpstr(buildVersion)
dumpint(uint64(ncpu))
}
@@ -725,7 +726,7 @@ func dumpfields(bv bitvector) {
func makeheapobjbv(p uintptr, size uintptr) bitvector {
// Extend the temp buffer if necessary.
- nptr := size / sys.PtrSize
+ nptr := size / goarch.PtrSize
if uintptr(len(tmpbuf)) < nptr/8+1 {
if tmpbuf != nil {
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
diff --git a/src/runtime/iface.go b/src/runtime/iface.go
index cd5fead999..3d1d9d6ba1 100644
--- a/src/runtime/iface.go
+++ b/src/runtime/iface.go
@@ -5,8 +5,9 @@
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -63,7 +64,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
}
// Entry doesn't exist yet. Make a new entry & add it.
- m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
+ m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*goarch.PtrSize, 0, &memstats.other_sys))
m.inter = inter
m._type = typ
// The hash is used in type switches. However, compiler statically generates itab's
@@ -100,7 +101,7 @@ func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab {
mask := t.size - 1
h := itabHashFunc(inter, typ) & mask
for i := uintptr(1); ; i++ {
- p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
+ p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
// Use atomic read here so if we see m != nil, we also see
// the initializations of the fields of m.
// m := *p
@@ -133,7 +134,7 @@ func itabAdd(m *itab) {
// t2 = new(itabTableType) + some additional entries
// We lie and tell malloc we want pointer-free memory because
// all the pointed-to values are not in the heap.
- t2 := (*itabTableType)(mallocgc((2+2*t.size)*sys.PtrSize, nil, true))
+ t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
t2.size = t.size * 2
// Copy over entries.
@@ -161,7 +162,7 @@ func (t *itabTableType) add(m *itab) {
mask := t.size - 1
h := itabHashFunc(m.inter, m._type) & mask
for i := uintptr(1); ; i++ {
- p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
+ p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
m2 := *p
if m2 == m {
// A given itab may be used in more than one module
@@ -315,26 +316,36 @@ var (
// The convXXX functions succeed on a nil input, whereas the assertXXX
// functions fail on a nil input.
-func convT2E(t *_type, elem unsafe.Pointer) (e eface) {
+// convT converts a value of type t, which is pointed to by v, to a pointer that can
+// be used as the second word of an interface value.
+func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2E))
+ raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT))
}
if msanenabled {
- msanread(elem, t.size)
+ msanread(v, t.size)
}
x := mallocgc(t.size, t, true)
- // TODO: We allocate a zeroed object only to overwrite it with actual data.
- // Figure out how to avoid zeroing. Also below in convT2Eslice, convT2I, convT2Islice.
- typedmemmove(t, x, elem)
- e._type = t
- e.data = x
- return
+ typedmemmove(t, x, v)
+ return x
+}
+func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
+ // TODO: maybe take size instead of type?
+ if raceenabled {
+ raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convTnoptr))
+ }
+ if msanenabled {
+ msanread(v, t.size)
+ }
+ x := mallocgc(t.size, t, false)
+ memmove(x, v, t.size)
+ return x
}
func convT16(val uint16) (x unsafe.Pointer) {
if val < uint16(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
- if sys.BigEndian {
+ if goarch.BigEndian {
x = add(x, 6)
}
} else {
@@ -347,7 +358,7 @@ func convT16(val uint16) (x unsafe.Pointer) {
func convT32(val uint32) (x unsafe.Pointer) {
if val < uint32(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
- if sys.BigEndian {
+ if goarch.BigEndian {
x = add(x, 4)
}
} else {
@@ -388,63 +399,16 @@ func convTslice(val []byte) (x unsafe.Pointer) {
return
}
-func convT2Enoptr(t *_type, elem unsafe.Pointer) (e eface) {
- if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Enoptr))
- }
- if msanenabled {
- msanread(elem, t.size)
- }
- x := mallocgc(t.size, t, false)
- memmove(x, elem, t.size)
- e._type = t
- e.data = x
- return
-}
-
-func convT2I(tab *itab, elem unsafe.Pointer) (i iface) {
- t := tab._type
- if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2I))
- }
- if msanenabled {
- msanread(elem, t.size)
- }
- x := mallocgc(t.size, t, true)
- typedmemmove(t, x, elem)
- i.tab = tab
- i.data = x
- return
-}
-
-func convT2Inoptr(tab *itab, elem unsafe.Pointer) (i iface) {
- t := tab._type
- if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Inoptr))
- }
- if msanenabled {
- msanread(elem, t.size)
- }
- x := mallocgc(t.size, t, false)
- memmove(x, elem, t.size)
- i.tab = tab
- i.data = x
- return
-}
-
-func convI2I(inter *interfacetype, i iface) (r iface) {
- tab := i.tab
- if tab == nil {
- return
+// convI2I returns the new itab to be used for the destination value
+// when converting a value with itab src to the dst interface.
+func convI2I(dst *interfacetype, src *itab) *itab {
+ if src == nil {
+ return nil
}
- if tab.inter == inter {
- r.tab = tab
- r.data = i.data
- return
+ if src.inter == dst {
+ return src
}
- r.tab = getitab(inter, tab._type, false)
- r.data = i.data
- return
+ return getitab(dst, src._type, false)
}
func assertI2I(inter *interfacetype, tab *itab) *itab {
@@ -511,7 +475,7 @@ func iterate_itabs(fn func(*itab)) {
// so no other locks/atomics needed.
t := itabTable
for i := uintptr(0); i < t.size; i++ {
- m := *(**itab)(add(unsafe.Pointer(&t.entries), i*sys.PtrSize))
+ m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
if m != nil {
fn(m)
}
diff --git a/src/runtime/internal/atomic/atomic_test.go b/src/runtime/internal/atomic/atomic_test.go
index c9c2eba248..2ae60b8507 100644
--- a/src/runtime/internal/atomic/atomic_test.go
+++ b/src/runtime/internal/atomic/atomic_test.go
@@ -5,9 +5,9 @@
package atomic_test
import (
+ "internal/goarch"
"runtime"
"runtime/internal/atomic"
- "runtime/internal/sys"
"testing"
"unsafe"
)
@@ -56,7 +56,7 @@ func TestXadduintptr(t *testing.T) {
// Tests that xadduintptr correctly updates 64-bit values. The place where
// we actually do so is mstats.go, functions mSysStat{Inc,Dec}.
func TestXadduintptrOnUint64(t *testing.T) {
- if sys.BigEndian {
+ if goarch.BigEndian {
// On big endian architectures, we never use xadduintptr to update
// 64-bit values and hence we skip the test. (Note that functions
// mSysStat{Inc,Dec} in mstats.go have explicit checks for
diff --git a/src/runtime/internal/math/math.go b/src/runtime/internal/math/math.go
index b6bd12d3e8..c3fac366be 100644
--- a/src/runtime/internal/math/math.go
+++ b/src/runtime/internal/math/math.go
@@ -4,14 +4,14 @@
package math
-import "runtime/internal/sys"
+import "internal/goarch"
const MaxUintptr = ^uintptr(0)
// MulUintptr returns a * b and whether the multiplication overflowed.
// On supported platforms this is an intrinsic lowered by the compiler.
func MulUintptr(a, b uintptr) (uintptr, bool) {
- if a|b < 1<<(4*sys.PtrSize) || a == 0 {
+ if a|b < 1<<(4*goarch.PtrSize) || a == 0 {
return a * b, false
}
overflow := b > MaxUintptr/a
diff --git a/src/runtime/internal/sys/consts.go b/src/runtime/internal/sys/consts.go
new file mode 100644
index 0000000000..fffcf81d1f
--- /dev/null
+++ b/src/runtime/internal/sys/consts.go
@@ -0,0 +1,34 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+import (
+ "internal/goarch"
+ "internal/goos"
+)
+
+// AIX requires a larger stack for syscalls.
+const StackGuardMultiplier = StackGuardMultiplierDefault*(1-goos.IsAix) + 2*goos.IsAix
+
+// DefaultPhysPageSize is the default physical page size.
+const DefaultPhysPageSize = goarch.DefaultPhysPageSize
+
+// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems).
+// The various PC tables record PC deltas pre-divided by PCQuantum.
+const PCQuantum = goarch.PCQuantum
+
+// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit).
+const Int64Align = goarch.PtrSize
+
+// MinFrameSize is the size of the system-reserved words at the bottom
+// of a frame (just above the architectural stack pointer).
+// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems.
+// On PowerPC it is larger, to cover three more reserved words:
+// the compiler word, the link editor word, and the TOC save word.
+const MinFrameSize = goarch.MinFrameSize
+
+// StackAlign is the required alignment of the SP register.
+// The stack must be at least word aligned, but some architectures require more.
+const StackAlign = goarch.StackAlign
diff --git a/src/runtime/internal/sys/sys.go b/src/runtime/internal/sys/sys.go
index 9d9ac4507f..694101d36f 100644
--- a/src/runtime/internal/sys/sys.go
+++ b/src/runtime/internal/sys/sys.go
@@ -5,11 +5,3 @@
// package sys contains system- and configuration- and architecture-specific
// constants used by the runtime.
package sys
-
-// The next line makes 'go generate' write the zgo*.go files with
-// per-OS and per-arch information, including constants
-// named Goos$GOOS and Goarch$GOARCH for every
-// known GOOS and GOARCH. The constant is 1 on the
-// current system, 0 otherwise; multiplying by them is
-// useful for defining GOOS- or GOARCH-specific constants.
-//go:generate go run gengoos.go
diff --git a/src/runtime/internal/sys/zgoarch_386.go b/src/runtime/internal/sys/zgoarch_386.go
deleted file mode 100644
index 5b189e7e73..0000000000
--- a/src/runtime/internal/sys/zgoarch_386.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build 386
-// +build 386
-
-package sys
-
-const GOARCH = `386`
-
-const Goarch386 = 1
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_amd64.go b/src/runtime/internal/sys/zgoarch_amd64.go
deleted file mode 100644
index 312977d079..0000000000
--- a/src/runtime/internal/sys/zgoarch_amd64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build amd64
-// +build amd64
-
-package sys
-
-const GOARCH = `amd64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 1
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_arm.go b/src/runtime/internal/sys/zgoarch_arm.go
deleted file mode 100644
index 5781870324..0000000000
--- a/src/runtime/internal/sys/zgoarch_arm.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build arm
-// +build arm
-
-package sys
-
-const GOARCH = `arm`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 1
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_arm64.go b/src/runtime/internal/sys/zgoarch_arm64.go
deleted file mode 100644
index f72a1f2161..0000000000
--- a/src/runtime/internal/sys/zgoarch_arm64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build arm64
-// +build arm64
-
-package sys
-
-const GOARCH = `arm64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 1
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_arm64be.go b/src/runtime/internal/sys/zgoarch_arm64be.go
deleted file mode 100644
index e805646058..0000000000
--- a/src/runtime/internal/sys/zgoarch_arm64be.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build arm64be
-// +build arm64be
-
-package sys
-
-const GOARCH = `arm64be`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 1
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_armbe.go b/src/runtime/internal/sys/zgoarch_armbe.go
deleted file mode 100644
index d8d4e56d9a..0000000000
--- a/src/runtime/internal/sys/zgoarch_armbe.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build armbe
-// +build armbe
-
-package sys
-
-const GOARCH = `armbe`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 1
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_loong64.go b/src/runtime/internal/sys/zgoarch_loong64.go
deleted file mode 100644
index 6f35eb44a3..0000000000
--- a/src/runtime/internal/sys/zgoarch_loong64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build loong64
-// +build loong64
-
-package sys
-
-const GOARCH = `loong64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 1
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_mips.go b/src/runtime/internal/sys/zgoarch_mips.go
deleted file mode 100644
index bd58a92a0e..0000000000
--- a/src/runtime/internal/sys/zgoarch_mips.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips
-// +build mips
-
-package sys
-
-const GOARCH = `mips`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 1
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_mips64.go b/src/runtime/internal/sys/zgoarch_mips64.go
deleted file mode 100644
index 8e4a3dcd52..0000000000
--- a/src/runtime/internal/sys/zgoarch_mips64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64
-// +build mips64
-
-package sys
-
-const GOARCH = `mips64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 1
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_mips64le.go b/src/runtime/internal/sys/zgoarch_mips64le.go
deleted file mode 100644
index d8e00339ea..0000000000
--- a/src/runtime/internal/sys/zgoarch_mips64le.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64le
-// +build mips64le
-
-package sys
-
-const GOARCH = `mips64le`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 1
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_mips64p32.go b/src/runtime/internal/sys/zgoarch_mips64p32.go
deleted file mode 100644
index 8549cc0ba3..0000000000
--- a/src/runtime/internal/sys/zgoarch_mips64p32.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64p32
-// +build mips64p32
-
-package sys
-
-const GOARCH = `mips64p32`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 1
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_mips64p32le.go b/src/runtime/internal/sys/zgoarch_mips64p32le.go
deleted file mode 100644
index 667b6fe514..0000000000
--- a/src/runtime/internal/sys/zgoarch_mips64p32le.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64p32le
-// +build mips64p32le
-
-package sys
-
-const GOARCH = `mips64p32le`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 1
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_mipsle.go b/src/runtime/internal/sys/zgoarch_mipsle.go
deleted file mode 100644
index 8bedb2bb90..0000000000
--- a/src/runtime/internal/sys/zgoarch_mipsle.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mipsle
-// +build mipsle
-
-package sys
-
-const GOARCH = `mipsle`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 1
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_ppc.go b/src/runtime/internal/sys/zgoarch_ppc.go
deleted file mode 100644
index fe2196a327..0000000000
--- a/src/runtime/internal/sys/zgoarch_ppc.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ppc
-// +build ppc
-
-package sys
-
-const GOARCH = `ppc`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 1
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_ppc64.go b/src/runtime/internal/sys/zgoarch_ppc64.go
deleted file mode 100644
index bd7cc43de3..0000000000
--- a/src/runtime/internal/sys/zgoarch_ppc64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ppc64
-// +build ppc64
-
-package sys
-
-const GOARCH = `ppc64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 1
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_ppc64le.go b/src/runtime/internal/sys/zgoarch_ppc64le.go
deleted file mode 100644
index e101892401..0000000000
--- a/src/runtime/internal/sys/zgoarch_ppc64le.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ppc64le
-// +build ppc64le
-
-package sys
-
-const GOARCH = `ppc64le`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 1
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_riscv.go b/src/runtime/internal/sys/zgoarch_riscv.go
deleted file mode 100644
index 559f86071a..0000000000
--- a/src/runtime/internal/sys/zgoarch_riscv.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build riscv
-// +build riscv
-
-package sys
-
-const GOARCH = `riscv`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 1
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_riscv64.go b/src/runtime/internal/sys/zgoarch_riscv64.go
deleted file mode 100644
index 8485a94b3d..0000000000
--- a/src/runtime/internal/sys/zgoarch_riscv64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build riscv64
-// +build riscv64
-
-package sys
-
-const GOARCH = `riscv64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 1
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_s390.go b/src/runtime/internal/sys/zgoarch_s390.go
deleted file mode 100644
index 4c4569e376..0000000000
--- a/src/runtime/internal/sys/zgoarch_s390.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build s390
-// +build s390
-
-package sys
-
-const GOARCH = `s390`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 1
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_s390x.go b/src/runtime/internal/sys/zgoarch_s390x.go
deleted file mode 100644
index e50d2edbb5..0000000000
--- a/src/runtime/internal/sys/zgoarch_s390x.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build s390x
-// +build s390x
-
-package sys
-
-const GOARCH = `s390x`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 1
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_sparc.go b/src/runtime/internal/sys/zgoarch_sparc.go
deleted file mode 100644
index 0d08752c7b..0000000000
--- a/src/runtime/internal/sys/zgoarch_sparc.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build sparc
-// +build sparc
-
-package sys
-
-const GOARCH = `sparc`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 1
-const GoarchSparc64 = 0
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_sparc64.go b/src/runtime/internal/sys/zgoarch_sparc64.go
deleted file mode 100644
index ba405bbf35..0000000000
--- a/src/runtime/internal/sys/zgoarch_sparc64.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build sparc64
-// +build sparc64
-
-package sys
-
-const GOARCH = `sparc64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 1
-const GoarchWasm = 0
diff --git a/src/runtime/internal/sys/zgoarch_wasm.go b/src/runtime/internal/sys/zgoarch_wasm.go
deleted file mode 100644
index 7c3e5afd1e..0000000000
--- a/src/runtime/internal/sys/zgoarch_wasm.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build wasm
-// +build wasm
-
-package sys
-
-const GOARCH = `wasm`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchLoong64 = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 1
diff --git a/src/runtime/internal/sys/zgoos_aix.go b/src/runtime/internal/sys/zgoos_aix.go
deleted file mode 100644
index f3b907471f..0000000000
--- a/src/runtime/internal/sys/zgoos_aix.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build aix
-// +build aix
-
-package sys
-
-const GOOS = `aix`
-
-const GoosAix = 1
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_android.go b/src/runtime/internal/sys/zgoos_android.go
deleted file mode 100644
index e28baf7c48..0000000000
--- a/src/runtime/internal/sys/zgoos_android.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build android
-// +build android
-
-package sys
-
-const GOOS = `android`
-
-const GoosAix = 0
-const GoosAndroid = 1
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_darwin.go b/src/runtime/internal/sys/zgoos_darwin.go
deleted file mode 100644
index 3c7f7b543e..0000000000
--- a/src/runtime/internal/sys/zgoos_darwin.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build !ios && darwin
-// +build !ios,darwin
-
-package sys
-
-const GOOS = `darwin`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 1
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_dragonfly.go b/src/runtime/internal/sys/zgoos_dragonfly.go
deleted file mode 100644
index f844d29e2a..0000000000
--- a/src/runtime/internal/sys/zgoos_dragonfly.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build dragonfly
-// +build dragonfly
-
-package sys
-
-const GOOS = `dragonfly`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 1
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_freebsd.go b/src/runtime/internal/sys/zgoos_freebsd.go
deleted file mode 100644
index 8999a2797a..0000000000
--- a/src/runtime/internal/sys/zgoos_freebsd.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build freebsd
-// +build freebsd
-
-package sys
-
-const GOOS = `freebsd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 1
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_hurd.go b/src/runtime/internal/sys/zgoos_hurd.go
deleted file mode 100644
index a546488bf8..0000000000
--- a/src/runtime/internal/sys/zgoos_hurd.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build hurd
-// +build hurd
-
-package sys
-
-const GOOS = `hurd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 1
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_illumos.go b/src/runtime/internal/sys/zgoos_illumos.go
deleted file mode 100644
index 02a4ca06e8..0000000000
--- a/src/runtime/internal/sys/zgoos_illumos.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build illumos
-// +build illumos
-
-package sys
-
-const GOOS = `illumos`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 1
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_ios.go b/src/runtime/internal/sys/zgoos_ios.go
deleted file mode 100644
index 033eec623d..0000000000
--- a/src/runtime/internal/sys/zgoos_ios.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ios
-// +build ios
-
-package sys
-
-const GOOS = `ios`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 1
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_js.go b/src/runtime/internal/sys/zgoos_js.go
deleted file mode 100644
index 28226ad60a..0000000000
--- a/src/runtime/internal/sys/zgoos_js.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build js
-// +build js
-
-package sys
-
-const GOOS = `js`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 1
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_linux.go b/src/runtime/internal/sys/zgoos_linux.go
deleted file mode 100644
index 01546e4b9f..0000000000
--- a/src/runtime/internal/sys/zgoos_linux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build !android && linux
-// +build !android,linux
-
-package sys
-
-const GOOS = `linux`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 1
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_netbsd.go b/src/runtime/internal/sys/zgoos_netbsd.go
deleted file mode 100644
index 9d658b20ee..0000000000
--- a/src/runtime/internal/sys/zgoos_netbsd.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build netbsd
-// +build netbsd
-
-package sys
-
-const GOOS = `netbsd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 1
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_openbsd.go b/src/runtime/internal/sys/zgoos_openbsd.go
deleted file mode 100644
index 0f55454a95..0000000000
--- a/src/runtime/internal/sys/zgoos_openbsd.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build openbsd
-// +build openbsd
-
-package sys
-
-const GOOS = `openbsd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 1
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_plan9.go b/src/runtime/internal/sys/zgoos_plan9.go
deleted file mode 100644
index d0347464d6..0000000000
--- a/src/runtime/internal/sys/zgoos_plan9.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build plan9
-// +build plan9
-
-package sys
-
-const GOOS = `plan9`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 1
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_solaris.go b/src/runtime/internal/sys/zgoos_solaris.go
deleted file mode 100644
index 05c3007e2c..0000000000
--- a/src/runtime/internal/sys/zgoos_solaris.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build !illumos && solaris
-// +build !illumos,solaris
-
-package sys
-
-const GOOS = `solaris`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 1
-const GoosWindows = 0
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_windows.go b/src/runtime/internal/sys/zgoos_windows.go
deleted file mode 100644
index 7d07fa3a45..0000000000
--- a/src/runtime/internal/sys/zgoos_windows.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build windows
-// +build windows
-
-package sys
-
-const GOOS = `windows`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 1
-const GoosZos = 0
diff --git a/src/runtime/internal/sys/zgoos_zos.go b/src/runtime/internal/sys/zgoos_zos.go
deleted file mode 100644
index d6e5b9b0cb..0000000000
--- a/src/runtime/internal/sys/zgoos_zos.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build zos
-// +build zos
-
-package sys
-
-const GOOS = `zos`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 1
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index cc22b0f276..f8d5d48a28 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -101,6 +101,8 @@
package runtime
import (
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/math"
"runtime/internal/sys"
@@ -150,7 +152,7 @@ const (
// windows/32 | 4KB | 3
// windows/64 | 8KB | 2
// plan9 | 4KB | 3
- _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
+ _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
// heapAddrBits is the number of bits in a heap address. On
// amd64, addresses are sign-extended beyond heapAddrBits. On
@@ -207,7 +209,7 @@ const (
// arenaBaseOffset to offset into the top 4 GiB.
//
// WebAssembly currently has a limit of 4GB linear memory.
- heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64
+ heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 33*goos.IsIos*goarch.IsArm64
// maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
@@ -248,10 +250,10 @@ const (
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64
+ logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
// heapArenaBitmapBytes is the size of each heap arena's bitmap.
- heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
+ heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
pagesPerArena = heapArenaBytes / pageSize
@@ -268,7 +270,7 @@ const (
// We use the L1 map on 64-bit Windows because the arena size
// is small, but the address space is still 48 bits, and
// there's a high cost to having a large L2.
- arenaL1Bits = 6 * (_64bit * sys.GoosWindows)
+ arenaL1Bits = 6 * (_64bit * goos.IsWindows)
// arenaL2Bits is the number of bits of the arena number
// covered by the second level arena index.
@@ -303,7 +305,7 @@ const (
//
// On other platforms, the user address space is contiguous
// and starts at 0, so no offset is necessary.
- arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix
+ arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
// A typed version of this constant that will make it into DWARF (for viewcore).
arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
@@ -420,8 +422,6 @@ func mallocinit() {
throw("bad TinySizeClass")
}
- testdefersizes()
-
if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
// heapBits expects modular arithmetic on bitmap
// addresses to work.
@@ -485,7 +485,7 @@ func mallocinit() {
lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
// Create initial arena growth hints.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// On a 64-bit machine, we pick the following hints
// because:
//
@@ -732,7 +732,7 @@ mapped:
l2 := h.arenas[ri.l1()]
if l2 == nil {
// Allocate an L2 arena map.
- l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
+ l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
if l2 == nil {
throw("out of memory allocating heap arena map")
}
@@ -743,9 +743,9 @@ mapped:
throw("arena already initialized")
}
var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
- r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
@@ -753,16 +753,16 @@ mapped:
// Add the arena to the arenas list.
if len(h.allArenas) == cap(h.allArenas) {
- size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
+ size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
if size == 0 {
size = physPageSize
}
- newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
+ newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
oldSlice := h.allArenas
- *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
+ *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
copy(h.allArenas, oldSlice)
// Do not free the old backing array because
// there may be concurrent readers. Since we
@@ -1017,7 +1017,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
off = alignUp(off, 8)
- } else if sys.PtrSize == 4 && size == 12 {
+ } else if goarch.PtrSize == 4 && size == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
@@ -1088,15 +1088,6 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var scanSize uintptr
if !noscan {
- // If allocating a defer+arg block, now that we've picked a malloc size
- // large enough to hold everything, cut the "asked for" size down to
- // just the defer header, so that the GC bitmap will record the arg block
- // as containing nothing at all (as if it were unused space at the end of
- // a malloc block caused by size rounding).
- // The defer arg areas are scanned as part of scanstack.
- if typ == deferType {
- dataSize = unsafe.Sizeof(_defer{})
- }
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.size {
// Array allocation. If there are any
@@ -1419,7 +1410,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
break
}
}
- persistent.off = alignUp(sys.PtrSize, align)
+ persistent.off = alignUp(goarch.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 111db56b01..0cad1a354d 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -54,9 +54,10 @@ package runtime
// before the table grows. Typical tables will be somewhat less loaded.
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/math"
- "runtime/internal/sys"
"unsafe"
)
@@ -103,7 +104,7 @@ const (
sameSizeGrow = 8 // the current map growth is to a new map of the same size
// sentinel bucket ID for iterator checks
- noCheck = 1<<(8*sys.PtrSize) - 1
+ noCheck = 1<<(8*goarch.PtrSize) - 1
)
// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
@@ -182,7 +183,7 @@ type hiter struct {
// bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr {
// Masking the shift amount allows overflow checks to be elided.
- return uintptr(1) << (b & (sys.PtrSize*8 - 1))
+ return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
}
// bucketMask returns 1<<b - 1, optimized for code generation.
@@ -192,7 +193,7 @@ func bucketMask(b uint8) uintptr {
// tophash calculates the tophash value for hash.
func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (sys.PtrSize*8 - 8))
+ top := uint8(hash >> (goarch.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -205,11 +206,11 @@ func evacuated(b *bmap) bool {
}
func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
+ return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
}
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
+ *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
}
func (b *bmap) keys() unsafe.Pointer {
@@ -394,7 +395,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapaccess1)
+ pc := abi.FuncPCABIInternal(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -452,7 +453,7 @@ bucketloop:
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapaccess2)
+ pc := abi.FuncPCABIInternal(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -574,7 +575,7 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(mapassign)
+ pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -685,7 +686,7 @@ done:
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapdelete)
+ pc := abi.FuncPCABIInternal(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -802,14 +803,14 @@ search:
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
}
if h == nil || h.count == 0 {
return
}
- if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
+ if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
}
it.t = t
@@ -852,7 +853,7 @@ func mapiternext(it *hiter) {
h := it.h
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
}
if h.flags&hashWriting != 0 {
throw("concurrent map iteration and map write")
@@ -978,7 +979,7 @@ next:
func mapclear(t *maptype, h *hmap) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapclear)
+ pc := abi.FuncPCABIInternal(mapclear)
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
@@ -1280,11 +1281,11 @@ func reflect_makemap(t *maptype, cap int) *hmap {
if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
- if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
+ if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
throw("key size wrong")
}
- if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
+ if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) ||
t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
throw("elem size wrong")
}
@@ -1363,7 +1364,7 @@ func reflect_maplen(h *hmap) int {
}
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
}
@@ -1375,7 +1376,7 @@ func reflectlite_maplen(h *hmap) int {
}
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
}
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index 8d52dad217..e80caeef55 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -5,14 +5,15 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
@@ -52,7 +53,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
@@ -95,7 +96,7 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -185,7 +186,7 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -272,7 +273,7 @@ done:
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
}
if h == nil || h.count == 0 {
return
@@ -301,7 +302,7 @@ search:
// Only clear key if there are pointers in it.
// This can only happen if pointers are 32 bit
// wide as 64 bit pointers do not fit into a 32 bit key.
- if sys.PtrSize == 4 && t.key.ptrdata != 0 {
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
// The key must be a pointer as we checked pointers are
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
@@ -427,7 +428,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index f1368dc774..69d8872885 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -5,14 +5,15 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
@@ -52,7 +53,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
@@ -95,7 +96,7 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -185,7 +186,7 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -272,7 +273,7 @@ done:
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
}
if h == nil || h.count == 0 {
return
@@ -300,7 +301,7 @@ search:
}
// Only clear key if there are pointers in it.
if t.key.ptrdata != 0 {
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
*(*unsafe.Pointer)(k) = nil
} else {
// There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
@@ -430,7 +431,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
// Copy key.
if t.key.ptrdata != 0 && writeBarrier.enabled {
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index 0673dd39c8..4dca882c63 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -5,14 +5,15 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
@@ -26,7 +27,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@@ -35,14 +36,14 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@@ -51,7 +52,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@@ -68,9 +69,9 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
@@ -91,13 +92,13 @@ dohash:
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
}
}
@@ -107,7 +108,7 @@ dohash:
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
@@ -121,7 +122,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@@ -130,14 +131,14 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@@ -146,7 +147,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@@ -163,9 +164,9 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
@@ -186,13 +187,13 @@ dohash:
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
}
}
@@ -205,7 +206,7 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -245,7 +246,7 @@ bucketloop:
}
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
if k.len != key.len {
continue
}
@@ -283,13 +284,13 @@ bucketloop:
}
insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
// store new key at insert position
*((*stringStruct)(insertk)) = *key
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
@@ -300,7 +301,7 @@ done:
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
}
if h == nil || h.count == 0 {
return
@@ -324,7 +325,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
@@ -334,7 +335,7 @@ search:
}
// Clear key's pointer.
k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
memclrHasPointers(e, t.elem.size)
} else {
@@ -410,7 +411,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*2*sys.PtrSize)
+ x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
@@ -418,13 +419,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*2*sys.PtrSize)
+ y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*2*sys.PtrSize)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
+ e := add(k, bucketCnt*2*goarch.PtrSize)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@@ -450,7 +451,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
+ dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
@@ -463,7 +464,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
- dst.k = add(dst.k, 2*sys.PtrSize)
+ dst.k = add(dst.k, 2*goarch.PtrSize)
dst.e = add(dst.e, uintptr(t.elemsize))
}
}
diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go
index 302b3c23c1..24556b4093 100644
--- a/src/runtime/map_test.go
+++ b/src/runtime/map_test.go
@@ -6,10 +6,10 @@ package runtime_test
import (
"fmt"
+ "internal/goarch"
"math"
"reflect"
"runtime"
- "runtime/internal/sys"
"sort"
"strconv"
"strings"
@@ -21,7 +21,7 @@ func TestHmapSize(t *testing.T) {
// The structure of hmap is defined in runtime/map.go
// and in cmd/compile/internal/gc/reflect.go and must be in sync.
// The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
- var hmapSize = uintptr(8 + 5*sys.PtrSize)
+ var hmapSize = uintptr(8 + 5*goarch.PtrSize)
if runtime.RuntimeHmapSize != hmapSize {
t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
}
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index 4994347bde..3fd1cca42c 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -15,7 +15,7 @@ package runtime
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -177,8 +177,8 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
- raceWriteObjectPC(typ, dst, getcallerpc(), funcPC(reflect_typedmemmove))
- raceReadObjectPC(typ, src, getcallerpc(), funcPC(reflect_typedmemmove))
+ raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
+ raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.size)
@@ -197,11 +197,11 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
// off must be a multiple of sys.PtrSize.
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if writeBarrier.needed && typ.ptrdata > off && size >= sys.PtrSize {
- if off&(sys.PtrSize-1) != 0 {
+ if writeBarrier.needed && typ.ptrdata > off && size >= goarch.PtrSize {
+ if off&(goarch.PtrSize-1) != 0 {
panic("reflect: internal error: misaligned offset")
}
- pwsize := alignDown(size, sys.PtrSize)
+ pwsize := alignDown(size, goarch.PtrSize)
if poff := typ.ptrdata - off; pwsize > poff {
pwsize = poff
}
@@ -225,7 +225,7 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
- if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
+ if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= goarch.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
@@ -254,7 +254,7 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe
// code and needs its own instrumentation.
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(slicecopy)
+ pc := abi.FuncPCABIInternal(slicecopy)
racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc)
racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc)
}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 32b8db7a50..9363409e36 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -46,6 +46,7 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -326,8 +327,8 @@ func heapBitsForAddr(addr uintptr) (h heapBits) {
// we expect to crash in the caller.
return
}
- h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
- h.shift = uint32((addr / sys.PtrSize) & 3)
+ h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes]
+ h.shift = uint32((addr / goarch.PtrSize) & 3)
h.arena = uint32(arena)
h.last = &ha.bitmap[len(ha.bitmap)-1]
return
@@ -386,10 +387,10 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
if s == nil {
- if GOARCH == "amd64" && p == clobberdeadPtr && debug.invalidptr != 0 {
- // Crash if clobberdeadPtr is seen. Only on AMD64 for now, as
- // it is the only platform where compiler's clobberdead mode is
- // implemented. On AMD64 clobberdeadPtr cannot be a valid address.
+ if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
+ // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
+ // as they are the only platform where compiler's clobberdead mode is
+ // implemented. On these platforms clobberdeadPtr cannot be a valid address.
badPointer(s, p, refBase, refOff)
}
return
@@ -557,7 +558,7 @@ func (h heapBits) isPointer() bool {
//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
@@ -592,7 +593,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
if src == 0 {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
if !buf.putFast(*dstx, 0) {
@@ -602,7 +603,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
h = h.next()
}
} else {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
@@ -625,7 +626,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
// created and zeroed with malloc.
//go:nosplit
func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
@@ -633,7 +634,7 @@ func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
}
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(0, *srcx) {
@@ -653,17 +654,17 @@ func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
//
//go:nosplit
func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
- word := maskOffset / sys.PtrSize
+ word := maskOffset / goarch.PtrSize
bits = addb(bits, word/8)
mask := uint8(1) << (word % 8)
buf := &getg().m.p.ptr().wbBuf
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if mask == 0 {
bits = addb(bits, 1)
if *bits == 0 {
// Skip 8 words.
- i += 7 * sys.PtrSize
+ i += 7 * goarch.PtrSize
continue
}
mask = 1
@@ -720,8 +721,8 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
ptrmask := typ.gcdata
buf := &getg().m.p.ptr().wbBuf
var bits uint32
- for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
- if i&(sys.PtrSize*8-1) == 0 {
+ for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
@@ -751,14 +752,14 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
// Otherwise, it initializes all words to scalar/dead.
func (h heapBits) initSpan(s *mspan) {
// Clear bits corresponding to objects.
- nw := (s.npages << _PageShift) / sys.PtrSize
+ nw := (s.npages << _PageShift) / goarch.PtrSize
if nw%wordsPerBitmapByte != 0 {
throw("initSpan: unaligned length")
}
if h.shift != 0 {
throw("initSpan: unaligned base")
}
- isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
+ isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
for nw > 0 {
hNext, anw := h.forwardOrBoundary(nw)
nbyte := anw / wordsPerBitmapByte
@@ -836,7 +837,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
// assume that dataSize == size without checking it explicitly.
- if sys.PtrSize == 8 && size == sys.PtrSize {
+ if goarch.PtrSize == 8 && size == goarch.PtrSize {
// It's one word and it has pointers, it must be a pointer.
// Since all allocated one-word objects are pointers
// (non-pointers are aggregated into tinySize allocations),
@@ -862,8 +863,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// objects are at least 4 words long and that their bitmaps start either at the beginning
// of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
- if size == 2*sys.PtrSize {
- if typ.size == sys.PtrSize {
+ if size == 2*goarch.PtrSize {
+ if typ.size == goarch.PtrSize {
// We're allocating a block big enough to hold two pointers.
// On 64-bit, that means the actual object must be two pointers,
// or else we'd have used the one-pointer-sized block.
@@ -872,7 +873,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// just the smallest block available. Distinguish by checking dataSize.
// (In general the number of instances of typ being allocated is
// dataSize/typ.size.)
- if sys.PtrSize == 4 && dataSize == sys.PtrSize {
+ if goarch.PtrSize == 4 && dataSize == goarch.PtrSize {
// 1 pointer object. On 32-bit machines clear the bit for the
// unused second word.
*h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
@@ -886,38 +887,38 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Otherwise typ.size must be 2*sys.PtrSize,
// and typ.kind&kindGCProg == 0.
if doubleCheck {
- if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
+ if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
throw("heapBitsSetType")
}
}
b := uint32(*ptrmask)
hb := b & 3
- hb |= bitScanAll & ((bitScan << (typ.ptrdata / sys.PtrSize)) - 1)
+ hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1)
// Clear the bits for this object so we can set the
// appropriate ones.
*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
*h.bitp |= uint8(hb << h.shift)
return
- } else if size == 3*sys.PtrSize {
+ } else if size == 3*goarch.PtrSize {
b := uint8(*ptrmask)
if doubleCheck {
if b == 0 {
println("runtime: invalid type ", typ.string())
throw("heapBitsSetType: called with non-pointer type")
}
- if sys.PtrSize != 8 {
+ if goarch.PtrSize != 8 {
throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
}
if typ.kind&kindGCProg != 0 {
throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
}
- if typ.size == 2*sys.PtrSize {
+ if typ.size == 2*goarch.PtrSize {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
throw("heapBitsSetType: inconsistent object sizes")
}
}
- if typ.size == sys.PtrSize {
+ if typ.size == goarch.PtrSize {
// The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
// Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
if doubleCheck && *typ.gcdata != 1 {
@@ -1063,8 +1064,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Filling in bits for an array of typ.
// Set up for repetition of ptrmask during main loop.
// Note that ptrmask describes only a prefix of
- const maxBits = sys.PtrSize*8 - 7
- if typ.ptrdata/sys.PtrSize <= maxBits {
+ const maxBits = goarch.PtrSize*8 - 7
+ if typ.ptrdata/goarch.PtrSize <= maxBits {
// Entire ptrmask fits in uintptr with room for a byte fragment.
// Load into pbits and never read from ptrmask again.
// This is especially important when the ptrmask has
@@ -1075,12 +1076,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Accumulate ptrmask into b.
// ptrmask is sized to describe only typ.ptrdata, but we record
// it as describing typ.size bytes, since all the high bits are zero.
- nb = typ.ptrdata / sys.PtrSize
+ nb = typ.ptrdata / goarch.PtrSize
for i := uintptr(0); i < nb; i += 8 {
b |= uintptr(*p) << i
p = add1(p)
}
- nb = typ.size / sys.PtrSize
+ nb = typ.size / goarch.PtrSize
// Replicate ptrmask to fill entire pbits uintptr.
// Doubling and truncating is fewer steps than
@@ -1091,7 +1092,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
pbits = b
endnb = nb
if nb+nb <= maxBits {
- for endnb <= sys.PtrSize*8 {
+ for endnb <= goarch.PtrSize*8 {
pbits |= pbits << endnb
endnb += endnb
}
@@ -1110,9 +1111,9 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
endp = nil
} else {
// Ptrmask is larger. Read it multiple times.
- n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
+ n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1
endp = addb(ptrmask, n)
- endnb = typ.size/sys.PtrSize - n*8
+ endnb = typ.size/goarch.PtrSize - n*8
}
}
if p != nil {
@@ -1123,12 +1124,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
if typ.size == dataSize {
// Single entry: can stop once we reach the non-pointer data.
- nw = typ.ptrdata / sys.PtrSize
+ nw = typ.ptrdata / goarch.PtrSize
} else {
// Repeated instances of typ in an array.
// Have to process first N-1 entries in full, but can stop
// once we reach the non-pointer data in the final entry.
- nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
+ nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize
}
if nw == 0 {
// No pointers! Caller was supposed to check.
@@ -1291,7 +1292,7 @@ Phase3:
}
// Change nw from counting possibly-pointer words to total words in allocation.
- nw = size / sys.PtrSize
+ nw = size / goarch.PtrSize
// Write whole bitmap bytes.
// The first is hb, the rest are zero.
@@ -1325,7 +1326,7 @@ Phase4:
h := heapBitsForAddr(x)
// cnw is the number of heap words, or bit pairs
// remaining (like nw above).
- cnw := size / sys.PtrSize
+ cnw := size / goarch.PtrSize
src := (*uint8)(unsafe.Pointer(x))
// We know the first and last byte of the bitmap are
// not the same, but it's still possible for small
@@ -1390,7 +1391,7 @@ Phase4:
if doubleCheck {
// x+size may not point to the heap, so back up one
// word and then advance it the way we do above.
- end := heapBitsForAddr(x + size - sys.PtrSize)
+ end := heapBitsForAddr(x + size - goarch.PtrSize)
if outOfPlace {
// In out-of-place copying, we just advance
// using next.
@@ -1417,11 +1418,11 @@ Phase4:
// Double-check that bits to be written were written correctly.
// Does not check that other bits were not written, unfortunately.
h := heapBitsForAddr(x)
- nptr := typ.ptrdata / sys.PtrSize
- ndata := typ.size / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
+ ndata := typ.size / goarch.PtrSize
count := dataSize / typ.size
- totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
- for i := uintptr(0); i < size/sys.PtrSize; i++ {
+ totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize
+ for i := uintptr(0); i < size/goarch.PtrSize; i++ {
j := i % ndata
var have, want uint8
have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
@@ -1446,7 +1447,7 @@ Phase4:
print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
- println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
+ println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want))
if typ.kind&kindGCProg != 0 {
println("GC program:")
dumpGCProg(addb(typ.gcdata, 4))
@@ -1477,14 +1478,14 @@ var debugPtrmask struct {
// so that the relevant bitmap bytes are not shared with surrounding
// objects.
func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
- if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
+ if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 {
// Alignment will be wrong.
throw("heapBitsSetTypeGCProg: small allocation")
}
var totalBits uintptr
if elemSize == dataSize {
totalBits = runGCProg(prog, nil, h.bitp, 2)
- if totalBits*sys.PtrSize != progSize {
+ if totalBits*goarch.PtrSize != progSize {
println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
throw("heapBitsSetTypeGCProg: unexpected bit count")
}
@@ -1499,7 +1500,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// repeats that first element to fill the array.
var trailer [40]byte // 3 varints (max 10 each) + some bytes
i := 0
- if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
+ if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 {
// literal(0)
trailer[i] = 0x01
i++
@@ -1521,7 +1522,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// repeat(elemSize/ptrSize, count-1)
trailer[i] = 0x80
i++
- n := elemSize / sys.PtrSize
+ n := elemSize / goarch.PtrSize
for ; n >= 0x80; n >>= 7 {
trailer[i] = byte(n | 0x80)
i++
@@ -1545,10 +1546,10 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// last element. This will cause the code below to
// memclr the dead section of the final array element,
// so that scanobject can stop early in the final element.
- totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
+ totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize
}
endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
- endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
+ endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte))
memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
}
@@ -1556,7 +1557,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// size the size of the region described by prog, in bytes.
// The resulting bitvector will have no more than size/sys.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
- n := (size/sys.PtrSize + 7) / 8
+ n := (size/goarch.PtrSize + 7) / 8
x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
x[len(x)-1] = 0xa1 // overflow check sentinel
n = runGCProg(prog, nil, &x[0], 1)
@@ -1691,7 +1692,7 @@ Run:
// the pattern to a bit buffer holding at most 7 bits (a partial byte)
// it will not overflow.
src := dst
- const maxBits = sys.PtrSize*8 - 7
+ const maxBits = goarch.PtrSize*8 - 7
if n <= maxBits {
// Start with bits in output buffer.
pattern := bits
@@ -1744,7 +1745,7 @@ Run:
nb := npattern
if nb+nb <= maxBits {
// Double pattern until the whole uintptr is filled.
- for nb <= sys.PtrSize*8 {
+ for nb <= goarch.PtrSize*8 {
b |= b << nb
nb += nb
}
@@ -1872,7 +1873,7 @@ Run:
// The result must be deallocated with dematerializeGCProg.
func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
// Each word of ptrdata needs one bit in the bitmap.
- bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
+ bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
// Compute the number of pages needed for bitmapBytes.
pages := divRoundUp(bitmapBytes, pageSize)
s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
@@ -1945,7 +1946,7 @@ func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
func reflect_gcbits(x interface{}) []byte {
ret := getgcmask(x)
typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
- nptr := typ.ptrdata / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
ret = ret[:len(ret)-1]
}
@@ -1965,10 +1966,10 @@ func getgcmask(ep interface{}) (mask []byte) {
if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
bitmap := datap.gcdatamask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.data) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.data) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
@@ -1977,10 +1978,10 @@ func getgcmask(ep interface{}) (mask []byte) {
if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
bitmap := datap.gcbssmask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.bss) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
@@ -1990,13 +1991,13 @@ func getgcmask(ep interface{}) (mask []byte) {
if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
hbits := heapBitsForAddr(base)
n := s.elemsize
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
if hbits.isPointer() {
- mask[i/sys.PtrSize] = 1
+ mask[i/goarch.PtrSize] = 1
}
if !hbits.morePointers() {
- mask = mask[:i/sys.PtrSize]
+ mask = mask[:i/goarch.PtrSize]
break
}
hbits = hbits.next()
@@ -2015,12 +2016,12 @@ func getgcmask(ep interface{}) (mask []byte) {
if locals.n == 0 {
return
}
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
- mask[i/sys.PtrSize] = locals.ptrbit(off)
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = locals.ptrbit(off)
}
}
return
diff --git a/src/runtime/mcheckmark.go b/src/runtime/mcheckmark.go
index ba80ac1bdf..1dd28585f1 100644
--- a/src/runtime/mcheckmark.go
+++ b/src/runtime/mcheckmark.go
@@ -13,8 +13,8 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -24,7 +24,7 @@ import (
// allocation.
//
//go:notinheap
-type checkmarksMap [heapArenaBytes / sys.PtrSize / 8]uint8
+type checkmarksMap [heapArenaBytes / goarch.PtrSize / 8]uint8
// If useCheckmark is true, marking of an object uses the checkmark
// bits instead of the standard mark bits.
diff --git a/src/runtime/memclr_amd64.s b/src/runtime/memclr_amd64.s
index a10f57bd8c..918a4b9e0e 100644
--- a/src/runtime/memclr_amd64.s
+++ b/src/runtime/memclr_amd64.s
@@ -13,14 +13,9 @@
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
// ABIInternal for performance.
TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB), NOSPLIT, $0-16
-#ifdef GOEXPERIMENT_regabiargs
// AX = ptr
// BX = n
MOVQ AX, DI // DI = ptr
-#else
- MOVQ ptr+0(FP), DI
- MOVQ n+8(FP), BX
-#endif
XORQ AX, AX
// MOVOU seems always faster than REP STOSQ.
@@ -37,9 +32,6 @@ tail:
JE _8
CMPQ BX, $16
JBE _9through16
-#ifndef GOEXPERIMENT_regabig
- PXOR X15, X15
-#endif
CMPQ BX, $32
JBE _17through32
CMPQ BX, $64
diff --git a/src/runtime/memclr_arm64.s b/src/runtime/memclr_arm64.s
index c1a0dcef58..b80cca6a1c 100644
--- a/src/runtime/memclr_arm64.s
+++ b/src/runtime/memclr_arm64.s
@@ -8,9 +8,11 @@
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
// Also called from assembly in sys_windows_arm64.s without g (but using Go stack convention).
-TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
+TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB),NOSPLIT,$0-16
+#ifndef GOEXPERIMENT_regabiargs
MOVD ptr+0(FP), R0
MOVD n+8(FP), R1
+#endif
CMP $16, R1
// If n is equal to 16 bytes, use zero_exact_16 to zero
diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s
index 24c6529f58..fa0c0e414f 100644
--- a/src/runtime/memmove_amd64.s
+++ b/src/runtime/memmove_amd64.s
@@ -34,18 +34,12 @@
// func memmove(to, from unsafe.Pointer, n uintptr)
// ABIInternal for performance.
TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT, $0-24
-#ifdef GOEXPERIMENT_regabiargs
// AX = to
// BX = from
// CX = n
MOVQ AX, DI
MOVQ BX, SI
MOVQ CX, BX
-#else
- MOVQ to+0(FP), DI
- MOVQ from+8(FP), SI
- MOVQ n+16(FP), BX
-#endif
// REP instructions have a high startup cost, so we handle small sizes
// with some straightline code. The REP MOVSQ instruction is really fast
@@ -254,10 +248,8 @@ move_129through256:
MOVOU X13, -48(DI)(BX*1)
MOVOU X14, -32(DI)(BX*1)
MOVOU X15, -16(DI)(BX*1)
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
RET
move_256through2048:
SUBQ $256, BX
@@ -297,10 +289,8 @@ move_256through2048:
LEAQ 256(SI), SI
LEAQ 256(DI), DI
JGE move_256through2048
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
JMP tail
avxUnaligned:
diff --git a/src/runtime/memmove_arm64.s b/src/runtime/memmove_arm64.s
index 43d27629e5..bee3b00c47 100644
--- a/src/runtime/memmove_arm64.s
+++ b/src/runtime/memmove_arm64.s
@@ -26,10 +26,12 @@
// The loop tail is handled by always copying 64 bytes from the end.
// func memmove(to, from unsafe.Pointer, n uintptr)
-TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24
+TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24
+#ifndef GOEXPERIMENT_regabiargs
MOVD to+0(FP), R0
MOVD from+8(FP), R1
MOVD n+16(FP), R2
+#endif
CBZ R2, copy0
// Small copies: 1..16 bytes
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index c134a0f22d..3cdb81e2fb 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -8,8 +8,8 @@ package runtime
import (
"internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -26,14 +26,14 @@ type finblock struct {
next *finblock
cnt uint32
_ int32
- fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
+ fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
var finlock mutex // protects the following variables
var fing *g // goroutine that runs finalizers
var finq *finblock // list of finalizers that are to be executed
var finc *finblock // cache of free blocks
-var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
+var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
var fingwait bool
var fingwake bool
var allfin *finblock // list of all blocks
@@ -95,12 +95,12 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
if finptrmask[0] == 0 {
// Build pointer mask for Finalizer array in block.
// Check assumptions made in finalizer1 array above.
- if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
+ if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
unsafe.Offsetof(finalizer{}.fn) != 0 ||
- unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
+ unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
throw("finalizer out of sync")
}
for i := range finptrmask {
@@ -432,7 +432,7 @@ okarg:
for _, t := range ft.out() {
nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
}
- nret = alignUp(nret, sys.PtrSize)
+ nret = alignUp(nret, goarch.PtrSize)
// make sure we have a finalizer goroutine
createfing()
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 4585663535..34b5b482a3 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -167,22 +167,17 @@ func gcinit() {
lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
}
-// Temporary in order to enable register ABI work.
-// TODO(register args): convert back to local chan in gcenabled, passed to "go" stmts.
-var gcenable_setup chan int
-
// gcenable is called after the bulk of the runtime initialization,
// just before we're about to start letting user code run.
// It kicks off the background sweeper goroutine, the background
// scavenger goroutine, and enables GC.
func gcenable() {
// Kick off sweeping and scavenging.
- gcenable_setup = make(chan int, 2)
- go bgsweep()
- go bgscavenge()
- <-gcenable_setup
- <-gcenable_setup
- gcenable_setup = nil
+ c := make(chan int, 2)
+ go bgsweep(c)
+ go bgscavenge(c)
+ <-c
+ <-c
memstats.enablegc = true // now that runtime is initialized, GC is okay
}
@@ -1563,19 +1558,17 @@ func clearpools() {
sched.sudogcache = nil
unlock(&sched.sudoglock)
- // Clear central defer pools.
+ // Clear central defer pool.
// Leave per-P pools alone, they have strictly bounded size.
lock(&sched.deferlock)
- for i := range sched.deferpool {
- // disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- var d, dlink *_defer
- for d = sched.deferpool[i]; d != nil; d = dlink {
- dlink = d.link
- d.link = nil
- }
- sched.deferpool[i] = nil
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var d, dlink *_defer
+ for d = sched.deferpool; d != nil; d = dlink {
+ dlink = d.link
+ d.link = nil
}
+ sched.deferpool = nil
unlock(&sched.deferlock)
}
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 1fd0732d62..874d910720 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -7,8 +7,8 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -245,7 +245,7 @@ func markroot(gcw *gcWork, i uint32) {
//
//go:nowritebarrier
func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
- if rootBlockBytes%(8*sys.PtrSize) != 0 {
+ if rootBlockBytes%(8*goarch.PtrSize) != 0 {
// This is necessary to pick byte offsets in ptrmask0.
throw("rootBlockBytes must be a multiple of 8*ptrSize")
}
@@ -258,7 +258,7 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
return
}
b := b0 + off
- ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
+ ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
n := uintptr(rootBlockBytes)
if off+n > n0 {
n = n0 - off
@@ -372,7 +372,7 @@ func markrootSpans(gcw *gcWork, shard int) {
scanobject(p, gcw)
// The special itself is a root.
- scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
+ scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
}
unlock(&s.speciallock)
}
@@ -737,7 +737,7 @@ func scanstack(gp *g, gcw *gcWork) {
// register that gets moved back and forth between the
// register and sched.ctxt without a write barrier.
if gp.sched.ctxt != nil {
- scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Scan the stack. Accumulate a list of stack objects.
@@ -750,26 +750,23 @@ func scanstack(gp *g, gcw *gcWork) {
// Find additional pointers that point into the stack from the heap.
// Currently this includes defers and panics. See also function copystack.
- // Find and trace all defer arguments.
- tracebackdefers(gp, scanframe, nil)
-
// Find and trace other pointers in defer records.
for d := gp._defer; d != nil; d = d.link {
if d.fn != nil {
- // tracebackdefers above does not scan the func value, which could
- // be a stack allocated closure. See issue 30453.
- scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ // Scan the func value, which could be a stack allocated closure.
+ // See issue 30453.
+ scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
if d.link != nil {
// The link field of a stack-allocated defer record might point
// to a heap-allocated defer record. Keep that heap record live.
- scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Retain defers records themselves.
// Defer records might not be reachable from the G through regular heap
// tracing because the defer linked list might weave between the stack and the heap.
if d.heap {
- scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
}
if gp._panic != nil {
@@ -913,13 +910,13 @@ func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
// Scan local variables if stack frame has been allocated.
if locals.n > 0 {
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
}
// Scan arguments.
if args.n > 0 {
- scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
+ scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
}
// Add all stack objects to the stack object list.
@@ -1172,9 +1169,9 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
for i := uintptr(0); i < n; {
// Find bits for the next word.
- bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
+ bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
if bits == 0 {
- i += sys.PtrSize * 8
+ i += goarch.PtrSize * 8
continue
}
for j := 0; j < 8 && i < n; j++ {
@@ -1190,7 +1187,7 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
}
}
bits >>= 1
- i += sys.PtrSize
+ i += goarch.PtrSize
}
}
}
@@ -1251,7 +1248,7 @@ func scanobject(b uintptr, gcw *gcWork) {
}
var i uintptr
- for i = 0; i < n; i, hbits = i+sys.PtrSize, hbits.next() {
+ for i = 0; i < n; i, hbits = i+goarch.PtrSize, hbits.next() {
// Load bits once. See CL 22712 and issue 16973 for discussion.
bits := hbits.bits()
if bits&bitScan == 0 {
@@ -1300,7 +1297,7 @@ func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackSca
print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
hexdumpWords(b, b+n, func(p uintptr) byte {
if ptrmask != nil {
- word := (p - b) / sys.PtrSize
+ word := (p - b) / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if (bits>>(word%8))&1 == 0 {
return '$'
@@ -1325,9 +1322,9 @@ func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackSca
printunlock()
}
- for i := uintptr(0); i < n; i += sys.PtrSize {
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
if ptrmask != nil {
- word := i / sys.PtrSize
+ word := i / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if bits == 0 {
// Skip 8 words (the loop increment will do the 8th)
@@ -1336,10 +1333,10 @@ func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackSca
// seen this word of ptrmask, so i
// must be 8-word-aligned, but check
// our reasoning just in case.
- if i%(sys.PtrSize*8) != 0 {
+ if i%(goarch.PtrSize*8) != 0 {
throw("misaligned mask")
}
- i += sys.PtrSize*8 - sys.PtrSize
+ i += goarch.PtrSize*8 - goarch.PtrSize
continue
}
if (bits>>(word%8))&1 == 0 {
@@ -1401,7 +1398,7 @@ func shade(b uintptr) {
//go:nowritebarrierrec
func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
// obj should be start of allocation, and so must be at least pointer-aligned.
- if obj&(sys.PtrSize-1) != 0 {
+ if obj&(goarch.PtrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
}
mbits := span.markBitsForIndex(objIndex)
@@ -1473,13 +1470,13 @@ func gcDumpObject(label string, obj, off uintptr) {
// We're printing something from a stack frame. We
// don't know how big it is, so just show up to an
// including off.
- size = off + sys.PtrSize
+ size = off + goarch.PtrSize
}
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
// For big objects, just print the beginning (because
// that usually hints at the object's type) and the
// fields around off.
- if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
+ if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
skipped = true
continue
}
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index 7578129f9d..2bb19985db 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -56,6 +56,7 @@
package runtime
import (
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -90,7 +91,7 @@ const (
//
// This ratio is used as part of multiplicative factor to help the scavenger account
// for the additional costs of using scavenged memory in its pacing.
- scavengeCostRatio = 0.7 * (sys.GoosDarwin + sys.GoosIos)
+ scavengeCostRatio = 0.7 * (goos.IsDarwin + goos.IsIos)
// scavengeReservationShards determines the amount of memory the scavenger
// should reserve for scavenging at a time. Specifically, the amount of
@@ -249,7 +250,7 @@ func scavengeSleep(ns int64) int64 {
// The background scavenger maintains the RSS of the application below
// the line described by the proportional scavenging statistics in
// the mheap struct.
-func bgscavenge() {
+func bgscavenge(c chan int) {
scavenge.g = getg()
lockInit(&scavenge.lock, lockRankScavenge)
@@ -261,7 +262,7 @@ func bgscavenge() {
wakeScavenger()
}
- gcenable_setup <- 1
+ c <- 1
goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
// Exponentially-weighted moving average of the fraction of time this
diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go
index 92d58485f6..49dc54e165 100644
--- a/src/runtime/mgcstack.go
+++ b/src/runtime/mgcstack.go
@@ -95,7 +95,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -107,7 +107,7 @@ const stackTraceDebug = false
//go:notinheap
type stackWorkBuf struct {
stackWorkBufHdr
- obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / sys.PtrSize]uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
}
// Header declaration must come after the buf declaration above, because of issue #14620.
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 8fe3a65340..1812644623 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -153,13 +153,13 @@ func finishsweep_m() {
nextMarkBitArenaEpoch()
}
-func bgsweep() {
+func bgsweep(c chan int) {
sweep.g = getg()
lockInit(&sweep.lock, lockRankSweep)
lock(&sweep.lock)
sweep.parked = true
- gcenable_setup <- 1
+ c <- 1
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
for {
diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go
index 667c7afa97..8787d93d87 100644
--- a/src/runtime/mgcwork.go
+++ b/src/runtime/mgcwork.go
@@ -5,8 +5,8 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -322,7 +322,7 @@ type workbufhdr struct {
type workbuf struct {
workbufhdr
// account for the above fields
- obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
}
// workbuf factory routines. These funcs are used to manage the
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 84c00ce8f8..b78f752ded 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -10,8 +10,8 @@ package runtime
import (
"internal/cpu"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -497,13 +497,13 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
assertLockHeld(&h.lock)
if len(h.allspans) >= cap(h.allspans) {
- n := 64 * 1024 / sys.PtrSize
+ n := 64 * 1024 / goarch.PtrSize
if n < cap(h.allspans)*3/2 {
n = cap(h.allspans) * 3 / 2
}
var new []*mspan
sp := (*slice)(unsafe.Pointer(&new))
- sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
+ sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
if sp.array == nil {
throw("runtime: cannot allocate memory")
}
@@ -1822,7 +1822,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p
scanobject(base, gcw)
// Mark the finalizer itself, since the
// special isn't part of the GC'd heap.
- scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
+ scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
releasem(mp)
}
return true
diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go
index da191cc594..f036745092 100644
--- a/src/runtime/mkduff.go
+++ b/src/runtime/mkduff.go
@@ -154,7 +154,7 @@ func zeroARM64(w io.Writer) {
// ZR: always zero
// R20: ptr to memory to be zeroed
// On return, R20 points to the last zeroed dword.
- fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
for i := 0; i < 63; i++ {
fmt.Fprintln(w, "\tSTP.P\t(ZR, ZR), 16(R20)")
}
@@ -167,7 +167,7 @@ func copyARM64(w io.Writer) {
// R21: ptr to destination memory
// R26, R27 (aka REGTMP): scratch space
// R20 and R21 are updated as a side effect
- fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
for i := 0; i < 64; i++ {
fmt.Fprintln(w, "\tLDP.P\t16(R20), (R26, R27)")
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 6c980540f5..f2b90307ca 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -128,8 +128,7 @@ func header(arch string) {
}
fmt.Fprintf(out, "#include \"go_asm.h\"\n")
fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
- fmt.Fprintf(out, "// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.\n")
- fmt.Fprintf(out, "TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0\n")
+ fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
}
func p(f string, args ...interface{}) {
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 5235b898e4..0ba415ba5a 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -621,7 +622,7 @@ func record(r *MemProfileRecord, b *bucket) {
r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.active.frees)
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(MemProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
@@ -674,7 +675,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
}
r.Cycles = bp.cycles
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(BlockProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
diff --git a/src/runtime/mranges.go b/src/runtime/mranges.go
index 84a2c06dbb..e0be1e134e 100644
--- a/src/runtime/mranges.go
+++ b/src/runtime/mranges.go
@@ -10,7 +10,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -167,7 +167,7 @@ func (a *addrRanges) init(sysStat *sysMemStat) {
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = 0
ranges.cap = 16
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
a.sysStat = sysStat
a.totalBytes = 0
}
@@ -294,7 +294,7 @@ func (a *addrRanges) add(r addrRange) {
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = len(oldRanges) + 1
ranges.cap = cap(oldRanges) * 2
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
// Copy in the old array, but make space for the new range.
copy(a.ranges[:i], oldRanges[:i])
@@ -364,7 +364,7 @@ func (a *addrRanges) cloneInto(b *addrRanges) {
ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
ranges.len = 0
ranges.cap = cap(a.ranges)
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
}
b.ranges = b.ranges[:len(a.ranges)]
b.totalBytes = a.totalBytes
diff --git a/src/runtime/mspanset.go b/src/runtime/mspanset.go
index 10d2596c38..29f14910cc 100644
--- a/src/runtime/mspanset.go
+++ b/src/runtime/mspanset.go
@@ -6,8 +6,8 @@ package runtime
import (
"internal/cpu"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -82,7 +82,7 @@ func (b *spanSet) push(s *mspan) {
retry:
if top < spineLen {
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, sys.PtrSize*top)
+ blockp := add(spine, goarch.PtrSize*top)
block = (*spanSetBlock)(atomic.Loadp(blockp))
} else {
// Add a new block to the spine, potentially growing
@@ -102,11 +102,11 @@ retry:
if newCap == 0 {
newCap = spanSetInitSpineCap
}
- newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
+ newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
- memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
+ memmove(newSpine, b.spine, b.spineCap*goarch.PtrSize)
}
// Spine is allocated off-heap, so no write barrier.
atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
@@ -124,7 +124,7 @@ retry:
block = spanSetBlockPool.alloc()
// Add it to the spine.
- blockp := add(b.spine, sys.PtrSize*top)
+ blockp := add(b.spine, goarch.PtrSize*top)
// Blocks are allocated off-heap, so no write barrier.
atomic.StorepNoWB(blockp, unsafe.Pointer(block))
atomic.Storeuintptr(&b.spineLen, spineLen+1)
@@ -181,7 +181,7 @@ claimLoop:
// grows monotonically and we've already verified it, we'll definitely
// be reading from a valid block.
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, sys.PtrSize*uintptr(top))
+ blockp := add(spine, goarch.PtrSize*uintptr(top))
// Given that the spine length is correct, we know we will never
// see a nil block here, since the length is always updated after
@@ -241,7 +241,7 @@ func (b *spanSet) reset() {
// since it may be pushed into again. In order to avoid leaking
// memory since we're going to reset the head and tail, clean
// up such a block now, if it exists.
- blockp := (**spanSetBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
+ blockp := (**spanSetBlock)(add(b.spine, goarch.PtrSize*uintptr(top)))
block := *blockp
if block != nil {
// Sanity check the popped value.
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index eeb2a7b4bc..341ba9a936 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -7,8 +7,8 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -713,7 +713,7 @@ type heapStatsDelta struct {
// Add a uint32 to ensure this struct is a multiple of 8 bytes in size.
// Only necessary on 32-bit platforms.
- _ [(sys.PtrSize / 4) % 2]uint32
+ _ [(goarch.PtrSize / 4) % 2]uint32
}
// merge adds in the deltas from b into a.
diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go
index 6efc00007d..78d9382620 100644
--- a/src/runtime/mwbbuf.go
+++ b/src/runtime/mwbbuf.go
@@ -23,8 +23,8 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -145,7 +145,7 @@ func (b *wbBuf) putFast(old, new uintptr) bool {
p := (*[2]uintptr)(unsafe.Pointer(b.next))
p[0] = old
p[1] = new
- b.next += 2 * sys.PtrSize
+ b.next += 2 * goarch.PtrSize
return b.next != b.end
}
diff --git a/src/runtime/norace_linux_test.go b/src/runtime/norace_linux_test.go
index 94b7c7a467..b199aa633c 100644
--- a/src/runtime/norace_linux_test.go
+++ b/src/runtime/norace_linux_test.go
@@ -9,6 +9,7 @@
package runtime_test
import (
+ "internal/abi"
"runtime"
"testing"
"time"
@@ -25,7 +26,7 @@ func newOSProcCreated() {
// Can't be run with -race because it inserts calls into newOSProcCreated()
// that require a valid G/M.
func TestNewOSProc0(t *testing.T) {
- runtime.NewOSProc0(0x800000, unsafe.Pointer(runtime.FuncPC(newOSProcCreated)))
+ runtime.NewOSProc0(0x800000, unsafe.Pointer(abi.FuncPCABIInternal(newOSProcCreated)))
check := time.NewTicker(100 * time.Millisecond)
defer check.Stop()
end := time.After(5 * time.Second)
diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go
index c5dc23de8b..a06d74e279 100644
--- a/src/runtime/os3_plan9.go
+++ b/src/runtime/os3_plan9.go
@@ -5,7 +5,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -92,15 +93,15 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
if usesLR {
c.setlr(pc)
} else {
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.setsp(sp)
}
}
if usesLR {
- c.setpc(funcPC(sigpanictramp))
+ c.setpc(abi.FuncPCABI0(sigpanictramp))
} else {
- c.setpc(funcPC(sigpanic0))
+ c.setpc(abi.FuncPCABI0(sigpanic0))
}
return _NCONT
}
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 39ef831acf..84194a3050 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -5,7 +5,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -172,7 +173,7 @@ func newosproc(mp *m) {
// Disable signals during create, so that the new thread starts
// with signals disabled. It will enable them in minit.
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret = pthread_create(&tid, &attr, funcPC(tstart_sysvicall), unsafe.Pointer(mp))
+ ret = pthread_create(&tid, &attr, abi.FuncPCABI0(tstart_sysvicall), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret != 0 {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
@@ -215,7 +216,7 @@ func miniterrno()
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
- asmcgocall(unsafe.Pointer(funcPC(miniterrno)), unsafe.Pointer(&libc____errno))
+ asmcgocall(unsafe.Pointer(abi.FuncPCABI0(miniterrno)), unsafe.Pointer(&libc____errno))
minitSignals()
@@ -241,8 +242,8 @@ func setsig(i uint32, fn uintptr) {
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
*((*uintptr)(unsafe.Pointer(&sa._funcptr))) = fn
sigaction(i, &sa, nil)
@@ -390,6 +391,7 @@ func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (un
}
//go:nosplit
+//go:cgo_unsafe_args
func doMmap(addr, n, prot, flags, fd, off uintptr) (uintptr, uintptr) {
var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(&libc_mmap))
@@ -598,7 +600,7 @@ func sysargs(argc int32, argv **byte) {
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index 4fb1c8e845..478dde2fc3 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"unsafe"
)
@@ -267,7 +268,7 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
fn = uintptr(unsafe.Pointer(&sigtramp))
}
sa.sa_handler = fn
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 079be107d7..ca61f20e8a 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -369,7 +369,7 @@ func setsig(i uint32, fn uintptr) {
var sa usigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = ^uint32(0)
- if fn == funcPC(sighandler) { // funcPC(sighandler) matches the callers in signal_unix.go
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
fn = abi.FuncPCABI0(cgoSigtramp)
} else {
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index 5c688a3109..191a560667 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -5,7 +5,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -147,14 +148,14 @@ func lwp_start(uintptr)
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", funcPC(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", abi.FuncPCABI0(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
params := lwpparams{
- start_func: funcPC(lwp_start),
+ start_func: abi.FuncPCABI0(lwp_start),
arg: unsafe.Pointer(mp),
stack: uintptr(stk),
tid1: nil, // minit will record tid
@@ -226,8 +227,8 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
@@ -278,7 +279,7 @@ func sysargs(argc int32, argv **byte) {
// skip NULL separator
n++
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index 09dd50ce59..5a8121a420 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -5,7 +5,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -116,8 +117,8 @@ func getncpu() int32 {
}
maskSize := int(maxcpus+7) / 8
- if maskSize < sys.PtrSize {
- maskSize = sys.PtrSize
+ if maskSize < goarch.PtrSize {
+ maskSize = goarch.PtrSize
}
if maskSize > len(mask) {
maskSize = len(mask)
@@ -197,11 +198,11 @@ func thr_start()
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", funcPC(thr_start), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", abi.FuncPCABI0(thr_start), " id=", mp.id, " ostk=", &mp, "\n")
}
param := thrparam{
- start_func: funcPC(thr_start),
+ start_func: abi.FuncPCABI0(thr_start),
arg: unsafe.Pointer(mp),
stack_base: mp.g0.stack.lo,
stack_size: uintptr(stk) - mp.g0.stack.lo,
@@ -236,7 +237,7 @@ func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
// However, newosproc0 is currently unreachable because builds
// utilizing c-shared/c-archive force external linking.
param := thrparam{
- start_func: funcPC(fn),
+ start_func: uintptr(fn),
arg: nil,
stack_base: uintptr(stack), //+stacksize?
stack_size: stacksize,
@@ -391,7 +392,7 @@ func sysargs(argc int32, argv **byte) {
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
diff --git a/src/runtime/os_freebsd2.go b/src/runtime/os_freebsd2.go
index fde6fbf1b1..7e266dc27e 100644
--- a/src/runtime/os_freebsd2.go
+++ b/src/runtime/os_freebsd2.go
@@ -7,14 +7,16 @@
package runtime
+import "internal/abi"
+
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_handler = fn
sigaction(i, &sa, nil)
diff --git a/src/runtime/os_freebsd_amd64.go b/src/runtime/os_freebsd_amd64.go
index dc0bb9ff96..b179383eac 100644
--- a/src/runtime/os_freebsd_amd64.go
+++ b/src/runtime/os_freebsd_amd64.go
@@ -4,6 +4,8 @@
package runtime
+import "internal/abi"
+
func cgoSigtramp()
//go:nosplit
@@ -12,11 +14,11 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
- fn = funcPC(cgoSigtramp)
+ fn = abi.FuncPCABI0(cgoSigtramp)
} else {
- fn = funcPC(sigtramp)
+ fn = abi.FuncPCABI0(sigtramp)
}
}
sa.sa_handler = fn
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index c8b29e396c..88c16f7163 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -5,7 +5,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -142,14 +143,14 @@ func newosproc(mp *m) {
* note: strace gets confused if we use CLONE_PTRACE here.
*/
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", funcPC(clone), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", abi.FuncPCABI0(clone), " id=", mp.id, " ostk=", &mp, "\n")
}
// Disable signals during clone, so that the new thread starts
// with signals disabled. It will enable them in minit.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(funcPC(mstart)))
+ ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(abi.FuncPCABI0(mstart)))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
@@ -205,7 +206,7 @@ func sysargs(argc int32, argv **byte) {
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
if sysauxv(auxv[:]) != 0 {
return
}
@@ -429,13 +430,13 @@ func setsig(i uint32, fn uintptr) {
// should not be used". x86_64 kernel requires it. Only use it on
// x86.
if GOARCH == "386" || GOARCH == "amd64" {
- sa.sa_restorer = funcPC(sigreturn)
+ sa.sa_restorer = abi.FuncPCABI0(sigreturn)
}
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
- fn = funcPC(cgoSigtramp)
+ fn = abi.FuncPCABI0(cgoSigtramp)
} else {
- fn = funcPC(sigtramp)
+ fn = abi.FuncPCABI0(sigtramp)
}
}
sa.sa_handler = fn
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index 6fbb3aa694..2c20ee2173 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -5,8 +5,9 @@
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -215,7 +216,7 @@ func newosproc(mp *m) {
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, funcPC(netbsdMstart))
+ lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, abi.FuncPCABI0(netbsdMstart))
ret := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
sigprocmask(_SIG_SETMASK, &oset, nil)
@@ -318,8 +319,8 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
@@ -371,7 +372,7 @@ func sysargs(argc int32, argv **byte) {
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
diff --git a/src/runtime/os_netbsd_386.go b/src/runtime/os_netbsd_386.go
index 037f7e36dc..ac89b9852c 100644
--- a/src/runtime/os_netbsd_386.go
+++ b/src/runtime/os_netbsd_386.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_EIP] = uint32(funcPC(lwp_tramp))
+ mc.__gregs[_REG_EIP] = uint32(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_UESP] = uint32(uintptr(stk))
mc.__gregs[_REG_EBX] = uint32(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_EDX] = uint32(uintptr(unsafe.Pointer(gp)))
diff --git a/src/runtime/os_netbsd_amd64.go b/src/runtime/os_netbsd_amd64.go
index 5118b0c4ff..74eea0ceab 100644
--- a/src/runtime/os_netbsd_amd64.go
+++ b/src/runtime/os_netbsd_amd64.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_RIP] = uint64(funcPC(lwp_tramp))
+ mc.__gregs[_REG_RIP] = uint64(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_RSP] = uint64(uintptr(stk))
mc.__gregs[_REG_R8] = uint64(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_R9] = uint64(uintptr(unsafe.Pointer(gp)))
diff --git a/src/runtime/os_netbsd_arm.go b/src/runtime/os_netbsd_arm.go
index b5ec23e45b..5fb4e08d66 100644
--- a/src/runtime/os_netbsd_arm.go
+++ b/src/runtime/os_netbsd_arm.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_R15] = uint32(funcPC(lwp_tramp))
+ mc.__gregs[_REG_R15] = uint32(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_R13] = uint32(uintptr(stk))
mc.__gregs[_REG_R0] = uint32(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_R1] = uint32(uintptr(unsafe.Pointer(gp)))
diff --git a/src/runtime/os_netbsd_arm64.go b/src/runtime/os_netbsd_arm64.go
index 8d21b0a430..2dda9c9274 100644
--- a/src/runtime/os_netbsd_arm64.go
+++ b/src/runtime/os_netbsd_arm64.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_ELR] = uint64(funcPC(lwp_tramp))
+ mc.__gregs[_REG_ELR] = uint64(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_X31] = uint64(uintptr(stk))
mc.__gregs[_REG_X0] = uint64(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_X1] = uint64(uintptr(unsafe.Pointer(mp.g0)))
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 6259b96c22..54f36c6ebf 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -191,8 +192,8 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = uint32(sigset_all)
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
diff --git a/src/runtime/os_openbsd_libc.go b/src/runtime/os_openbsd_libc.go
index 0a342e5533..981e49827f 100644
--- a/src/runtime/os_openbsd_libc.go
+++ b/src/runtime/os_openbsd_libc.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"unsafe"
)
@@ -48,7 +49,7 @@ func newosproc(mp *m) {
// setup and then calls mstart.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- err := pthread_create(&attr, funcPC(mstart_stub), unsafe.Pointer(mp))
+ err := pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if err != 0 {
write(2, unsafe.Pointer(&failThreadCreate[0]), int32(len(failThreadCreate)))
diff --git a/src/runtime/os_openbsd_syscall.go b/src/runtime/os_openbsd_syscall.go
index 3cdcb6c707..1ddee1864e 100644
--- a/src/runtime/os_openbsd_syscall.go
+++ b/src/runtime/os_openbsd_syscall.go
@@ -8,7 +8,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -28,12 +29,12 @@ func newosproc(mp *m) {
param := tforkt{
tf_tcb: unsafe.Pointer(&mp.tls[0]),
tf_tid: nil, // minit will record tid
- tf_stack: uintptr(stk) - sys.PtrSize,
+ tf_stack: uintptr(stk) - goarch.PtrSize,
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, funcPC(mstart))
+ ret := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, abi.FuncPCABI0(mstart))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go
index 4d428346f0..975d460a7d 100644
--- a/src/runtime/os_plan9.go
+++ b/src/runtime/os_plan9.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -346,7 +347,7 @@ func getRandomData(r []byte) {
func initsig(preinit bool) {
if !preinit {
- notify(unsafe.Pointer(funcPC(sigtramp)))
+ notify(unsafe.Pointer(abi.FuncPCABI0(sigtramp)))
}
}
diff --git a/src/runtime/os_solaris.go b/src/runtime/os_solaris.go
index 89129e5f1a..8ac1b08f69 100644
--- a/src/runtime/os_solaris.go
+++ b/src/runtime/os_solaris.go
@@ -179,6 +179,7 @@ func sysvicall3Err(fn *libcFunc, a1, a2, a3 uintptr) (r1, err uintptr) {
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
@@ -208,6 +209,7 @@ func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
@@ -237,6 +239,7 @@ func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index f0935264ac..648239fb36 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -5,8 +5,9 @@
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -543,7 +544,7 @@ func initLongPathSupport() {
}
func osinit() {
- asmstdcallAddr = unsafe.Pointer(funcPC(asmstdcall))
+ asmstdcallAddr = unsafe.Pointer(abi.FuncPCABI0(asmstdcall))
setBadSignalMsg()
@@ -906,7 +907,7 @@ func semacreate(mp *m) {
func newosproc(mp *m) {
// We pass 0 for the stack size to use the default for this binary.
thandle := stdcall6(_CreateThread, 0, 0,
- funcPC(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
+ abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
0, 0)
if thandle == 0 {
@@ -1385,14 +1386,14 @@ func preemptM(mp *m) {
if gp != nil && wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, c.ip(), c.sp(), c.lr()); ok {
// Inject call to asyncPreempt
- targetPC := funcPC(asyncPreempt)
+ targetPC := abi.FuncPCABI0(asyncPreempt)
switch GOARCH {
default:
throw("unsupported architecture")
case "386", "amd64":
// Make it look like the thread called targetPC.
sp := c.sp()
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = newpc
c.set_sp(sp)
c.set_ip(targetPC)
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index f6c38aafcc..e4bdceb32f 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -5,8 +5,7 @@
package runtime
import (
- "internal/abi"
- "internal/goexperiment"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -31,7 +30,7 @@ import (
// pc should be the program counter of the compiler-generated code that
// triggered this panic.
func panicCheck1(pc uintptr, msg string) {
- if sys.GoarchWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
+ if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
// Note: wasm can't tail call, so we can't get the original caller's pc.
throw(msg)
}
@@ -226,47 +225,27 @@ func panicmemAddr(addr uintptr) {
panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
}
-// Create a new deferred function fn with siz bytes of arguments.
+// Create a new deferred function fn, which has no arguments and results.
// The compiler turns a defer statement into a call to this.
-//go:nosplit
-func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
+func deferproc(fn func()) {
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
- if goexperiment.RegabiDefer && siz != 0 {
- // TODO: Make deferproc just take a func().
- throw("defer with non-empty frame")
- }
-
- // the arguments of fn are in a perilous state. The stack map
- // for deferproc does not describe them. So we can't let garbage
- // collection or stack copying trigger until we've copied them out
- // to somewhere safe. The memmove below does that.
- // Until the copy completes, we can only call nosplit routines.
- sp := getcallersp()
- argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
- callerpc := getcallerpc()
-
- d := newdefer(siz)
+ d := newdefer()
if d._panic != nil {
throw("deferproc: d.panic != nil after newdefer")
}
d.link = gp._defer
gp._defer = d
d.fn = fn
- d.pc = callerpc
- d.sp = sp
- switch siz {
- case 0:
- // Do nothing.
- case sys.PtrSize:
- *(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
- default:
- memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
- }
+ d.pc = getcallerpc()
+ // We must not be preempted between calling getcallersp and
+ // storing it to d.sp because getcallersp's result is a
+ // uintptr stack pointer.
+ d.sp = getcallersp()
// deferproc returns 0 normally.
// a deferred func that stops a panic
@@ -280,12 +259,10 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
}
// deferprocStack queues a new deferred function with a defer record on the stack.
-// The defer record must have its siz and fn fields initialized.
+// The defer record must have its fn field initialized.
// All other fields can contain junk.
-// The defer record must be immediately followed in memory by
-// the arguments of the defer.
-// Nosplit because the arguments on the stack won't be scanned
-// until the defer record is spliced into the gp._defer list.
+// Nosplit because of the uninitialized pointer fields on the stack.
+//
//go:nosplit
func deferprocStack(d *_defer) {
gp := getg()
@@ -293,10 +270,7 @@ func deferprocStack(d *_defer) {
// go code on the system stack can't defer
throw("defer on system stack")
}
- if goexperiment.RegabiDefer && d.siz != 0 {
- throw("defer with non-empty frame")
- }
- // siz and fn are already set.
+ // fn is already set.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
d.started = false
@@ -327,132 +301,37 @@ func deferprocStack(d *_defer) {
// been set and must not be clobbered.
}
-// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
-// Each P holds a pool for defers with small arg sizes.
-// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
-
-const (
- deferHeaderSize = unsafe.Sizeof(_defer{})
- minDeferAlloc = (deferHeaderSize + 15) &^ 15
- minDeferArgs = minDeferAlloc - deferHeaderSize
-)
-
-// defer size class for arg size sz
-//go:nosplit
-func deferclass(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return 0
- }
- return (siz - minDeferArgs + 15) / 16
-}
-
-// total size of memory block for defer with arg size sz
-func totaldefersize(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return minDeferAlloc
- }
- return deferHeaderSize + siz
-}
-
-// Ensure that defer arg sizes that map to the same defer size class
-// also map to the same malloc size class.
-func testdefersizes() {
- var m [len(p{}.deferpool)]int32
-
- for i := range m {
- m[i] = -1
- }
- for i := uintptr(0); ; i++ {
- defersc := deferclass(i)
- if defersc >= uintptr(len(m)) {
- break
- }
- siz := roundupsize(totaldefersize(i))
- if m[defersc] < 0 {
- m[defersc] = int32(siz)
- continue
- }
- if m[defersc] != int32(siz) {
- print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
- throw("bad defer size class")
- }
- }
-}
-
-// The arguments associated with a deferred call are stored
-// immediately after the _defer header in memory.
-//go:nosplit
-func deferArgs(d *_defer) unsafe.Pointer {
- if d.siz == 0 {
- // Avoid pointer past the defer allocation.
- return nil
- }
- return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
-}
-
-// deferFunc returns d's deferred function. This is temporary while we
-// support both modes of GOEXPERIMENT=regabidefer. Once we commit to
-// that experiment, we should change the type of d.fn.
-//go:nosplit
-func deferFunc(d *_defer) func() {
- if !goexperiment.RegabiDefer {
- throw("requires GOEXPERIMENT=regabidefer")
- }
- var fn func()
- *(**funcval)(unsafe.Pointer(&fn)) = d.fn
- return fn
-}
-
-var deferType *_type // type of _defer struct
-
-func init() {
- var x interface{}
- x = (*_defer)(nil)
- deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
-}
+// Each P holds a pool for defers.
// Allocate a Defer, usually using per-P pool.
// Each defer must be released with freedefer. The defer is not
// added to any defer chain yet.
-//
-// This must not grow the stack because there may be a frame without
-// stack map information when this is called.
-//
-//go:nosplit
-func newdefer(siz int32) *_defer {
+func newdefer() *_defer {
var d *_defer
- sc := deferclass(uintptr(siz))
- gp := getg()
- if sc < uintptr(len(p{}.deferpool)) {
- pp := gp.m.p.ptr()
- if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
- // Take the slow path on the system stack so
- // we don't grow newdefer's stack.
- systemstack(func() {
- lock(&sched.deferlock)
- for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
- d := sched.deferpool[sc]
- sched.deferpool[sc] = d.link
- d.link = nil
- pp.deferpool[sc] = append(pp.deferpool[sc], d)
- }
- unlock(&sched.deferlock)
- })
- }
- if n := len(pp.deferpool[sc]); n > 0 {
- d = pp.deferpool[sc][n-1]
- pp.deferpool[sc][n-1] = nil
- pp.deferpool[sc] = pp.deferpool[sc][:n-1]
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.deferpool) == 0 && sched.deferpool != nil {
+ lock(&sched.deferlock)
+ for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
+ d := sched.deferpool
+ sched.deferpool = d.link
+ d.link = nil
+ pp.deferpool = append(pp.deferpool, d)
}
+ unlock(&sched.deferlock)
+ }
+ if n := len(pp.deferpool); n > 0 {
+ d = pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
}
+ releasem(mp)
+ mp, pp = nil, nil
+
if d == nil {
- // Allocate new defer+args.
- systemstack(func() {
- total := roundupsize(totaldefersize(uintptr(siz)))
- d = (*_defer)(mallocgc(total, deferType, true))
- })
+ // Allocate new defer.
+ d = new(_defer)
}
- d.siz = siz
d.heap = true
return d
}
@@ -460,11 +339,16 @@ func newdefer(siz int32) *_defer {
// Free the given defer.
// The defer cannot be used after this call.
//
-// This must not grow the stack because there may be a frame without a
-// stack map when this is called.
+// This is nosplit because the incoming defer is in a perilous state.
+// It's not on any defer list, so stack copying won't adjust stack
+// pointers in it (namely, d.link). Hence, if we were to copy the
+// stack, d could then contain a stale pointer.
//
//go:nosplit
func freedefer(d *_defer) {
+ d.link = nil
+ // After this point we can copy the stack.
+
if d._panic != nil {
freedeferpanic()
}
@@ -474,53 +358,36 @@ func freedefer(d *_defer) {
if !d.heap {
return
}
- sc := deferclass(uintptr(d.siz))
- if sc >= uintptr(len(p{}.deferpool)) {
- return
- }
- pp := getg().m.p.ptr()
- if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
+
+ mp := acquirem()
+ pp := mp.p.ptr()
+ if len(pp.deferpool) == cap(pp.deferpool) {
// Transfer half of local cache to the central cache.
- //
- // Take this slow path on the system stack so
- // we don't grow freedefer's stack.
- systemstack(func() {
- var first, last *_defer
- for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
- n := len(pp.deferpool[sc])
- d := pp.deferpool[sc][n-1]
- pp.deferpool[sc][n-1] = nil
- pp.deferpool[sc] = pp.deferpool[sc][:n-1]
- if first == nil {
- first = d
- } else {
- last.link = d
- }
- last = d
+ var first, last *_defer
+ for len(pp.deferpool) > cap(pp.deferpool)/2 {
+ n := len(pp.deferpool)
+ d := pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
+ if first == nil {
+ first = d
+ } else {
+ last.link = d
}
- lock(&sched.deferlock)
- last.link = sched.deferpool[sc]
- sched.deferpool[sc] = first
- unlock(&sched.deferlock)
- })
+ last = d
+ }
+ lock(&sched.deferlock)
+ last.link = sched.deferpool
+ sched.deferpool = first
+ unlock(&sched.deferlock)
}
- // These lines used to be simply `*d = _defer{}` but that
- // started causing a nosplit stack overflow via typedmemmove.
- d.siz = 0
- d.started = false
- d.openDefer = false
- d.sp = 0
- d.pc = 0
- d.framepc = 0
- d.varp = 0
- d.fd = nil
- // d._panic and d.fn must be nil already.
- // If not, we would have called freedeferpanic or freedeferfn above,
- // both of which throw.
- d.link = nil
+ *d = _defer{}
- pp.deferpool[sc] = append(pp.deferpool[sc], d)
+ pp.deferpool = append(pp.deferpool, d)
+
+ releasem(mp)
+ mp, pp = nil, nil
}
// Separate function so that it can split stack.
@@ -535,66 +402,39 @@ func freedeferfn() {
throw("freedefer with d.fn != nil")
}
-// Run a deferred function if there is one.
+// deferreturn runs deferred functions for the caller's frame.
// The compiler inserts a call to this at the end of any
// function which calls defer.
-// If there is a deferred function, this will call runtime·jmpdefer,
-// which will jump to the deferred function such that it appears
-// to have been called by the caller of deferreturn at the point
-// just before deferreturn was called. The effect is that deferreturn
-// is called again and again until there are no more deferred functions.
-//
-// Declared as nosplit, because the function should not be preempted once we start
-// modifying the caller's frame in order to reuse the frame to call the deferred
-// function.
-//
-//go:nosplit
func deferreturn() {
gp := getg()
- d := gp._defer
- if d == nil {
- return
- }
- sp := getcallersp()
- if d.sp != sp {
- return
- }
- if d.openDefer {
- done := runOpenDeferFrame(gp, d)
- if !done {
- throw("unfinished open-coded defers in deferreturn")
+ for {
+ d := gp._defer
+ if d == nil {
+ return
}
+ sp := getcallersp()
+ if d.sp != sp {
+ return
+ }
+ if d.openDefer {
+ done := runOpenDeferFrame(gp, d)
+ if !done {
+ throw("unfinished open-coded defers in deferreturn")
+ }
+ gp._defer = d.link
+ freedefer(d)
+ // If this frame uses open defers, then this
+ // must be the only defer record for the
+ // frame, so we can just return.
+ return
+ }
+
+ fn := d.fn
+ d.fn = nil
gp._defer = d.link
freedefer(d)
- return
+ fn()
}
-
- // Moving arguments around.
- //
- // Everything called after this point must be recursively
- // nosplit because the garbage collector won't know the form
- // of the arguments until the jmpdefer can flip the PC over to
- // fn.
- argp := getcallersp() + sys.MinFrameSize
- switch d.siz {
- case 0:
- // Do nothing.
- case sys.PtrSize:
- *(*uintptr)(unsafe.Pointer(argp)) = *(*uintptr)(deferArgs(d))
- default:
- memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz))
- }
- fn := d.fn
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- // If the defer function pointer is nil, force the seg fault to happen
- // here rather than in jmpdefer. gentraceback() throws an error if it is
- // called with a callback on an LR architecture and jmpdefer is on the
- // stack, because the stack trace can be incorrect in that case - see
- // issue #8153).
- _ = fn.fn
- jmpdefer(fn, argp)
}
// Goexit terminates the goroutine that calls it. No other goroutine is affected.
@@ -655,15 +495,9 @@ func Goexit() {
addOneOpenDeferFrame(gp, 0, nil)
}
} else {
- if goexperiment.RegabiDefer {
- // Save the pc/sp in deferCallSave(), so we can "recover" back to this
- // loop if necessary.
- deferCallSave(&p, deferFunc(d))
- } else {
- // Save the pc/sp in reflectcallSave(), so we can "recover" back to this
- // loop if necessary.
- reflectcallSave(&p, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz))
- }
+ // Save the pc/sp in deferCallSave(), so we can "recover" back to this
+ // loop if necessary.
+ deferCallSave(&p, d.fn)
}
if p.aborted {
// We had a recursive panic in the defer d we started, and
@@ -783,8 +617,7 @@ func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
throw("missing deferreturn")
}
- maxargsize, _ := readvarintUnsafe(fd)
- d1 := newdefer(int32(maxargsize))
+ d1 := newdefer()
d1.openDefer = true
d1._panic = nil
// These are the pc/sp to set after we've
@@ -845,57 +678,29 @@ func runOpenDeferFrame(gp *g, d *_defer) bool {
done := true
fd := d.fd
- // Skip the maxargsize
- _, fd = readvarintUnsafe(fd)
deferBitsOffset, fd := readvarintUnsafe(fd)
nDefers, fd := readvarintUnsafe(fd)
deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
for i := int(nDefers) - 1; i >= 0; i-- {
// read the funcdata info for this defer
- var argWidth, closureOffset, nArgs uint32
- argWidth, fd = readvarintUnsafe(fd)
+ var closureOffset uint32
closureOffset, fd = readvarintUnsafe(fd)
- nArgs, fd = readvarintUnsafe(fd)
- if goexperiment.RegabiDefer && argWidth != 0 {
- throw("defer with non-empty frame")
- }
if deferBits&(1<<i) == 0 {
- for j := uint32(0); j < nArgs; j++ {
- _, fd = readvarintUnsafe(fd)
- _, fd = readvarintUnsafe(fd)
- _, fd = readvarintUnsafe(fd)
- }
continue
}
- closure := *(**funcval)(unsafe.Pointer(d.varp - uintptr(closureOffset)))
+ closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset)))
d.fn = closure
- deferArgs := deferArgs(d)
- // If there is an interface receiver or method receiver, it is
- // described/included as the first arg.
- for j := uint32(0); j < nArgs; j++ {
- var argOffset, argLen, argCallOffset uint32
- argOffset, fd = readvarintUnsafe(fd)
- argLen, fd = readvarintUnsafe(fd)
- argCallOffset, fd = readvarintUnsafe(fd)
- memmove(unsafe.Pointer(uintptr(deferArgs)+uintptr(argCallOffset)),
- unsafe.Pointer(d.varp-uintptr(argOffset)),
- uintptr(argLen))
- }
deferBits = deferBits &^ (1 << i)
*(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
p := d._panic
- if goexperiment.RegabiDefer {
- deferCallSave(p, deferFunc(d))
- } else {
- reflectcallSave(p, unsafe.Pointer(closure), deferArgs, argWidth)
- }
+ // Call the defer. Note that this can change d.varp if
+ // the stack moves.
+ deferCallSave(p, d.fn)
if p != nil && p.aborted {
break
}
d.fn = nil
- // These args are just a copy, so can be cleared immediately
- memclrNoHeapPointers(deferArgs, uintptr(argWidth))
if d._panic != nil && d._panic.recovered {
done = deferBits == 0
break
@@ -905,32 +710,6 @@ func runOpenDeferFrame(gp *g, d *_defer) bool {
return done
}
-// reflectcallSave calls reflectcall after saving the caller's pc and sp in the
-// panic record. This allows the runtime to return to the Goexit defer processing
-// loop, in the unusual case where the Goexit may be bypassed by a successful
-// recover.
-//
-// This is marked as a wrapper by the compiler so it doesn't appear in
-// tracebacks.
-func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) {
- if goexperiment.RegabiDefer {
- throw("not allowed with GOEXPERIMENT=regabidefer")
- }
- if p != nil {
- p.argp = unsafe.Pointer(getargp())
- p.pc = getcallerpc()
- p.sp = unsafe.Pointer(getcallersp())
- }
- // Pass a dummy RegArgs since we'll only take this path if
- // we're not using the register ABI.
- var regs abi.RegArgs
- reflectcall(nil, fn, arg, argsize, argsize, argsize, &regs)
- if p != nil {
- p.pc = 0
- p.sp = unsafe.Pointer(nil)
- }
-}
-
// deferCallSave calls fn() after saving the caller's pc and sp in the
// panic record. This allows the runtime to return to the Goexit defer
// processing loop, in the unusual case where the Goexit may be
@@ -939,9 +718,6 @@ func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) {
// This is marked as a wrapper by the compiler so it doesn't appear in
// tracebacks.
func deferCallSave(p *_panic, fn func()) {
- if !goexperiment.RegabiDefer {
- throw("only allowed with GOEXPERIMENT=regabidefer")
- }
if p != nil {
p.argp = unsafe.Pointer(getargp())
p.pc = getcallerpc()
@@ -1041,16 +817,7 @@ func gopanic(e interface{}) {
}
} else {
p.argp = unsafe.Pointer(getargp())
-
- if goexperiment.RegabiDefer {
- fn := deferFunc(d)
- fn()
- } else {
- // Pass a dummy RegArgs since we'll only take this path if
- // we're not using the register ABI.
- var regs abi.RegArgs
- reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), &regs)
- }
+ d.fn()
}
p.argp = nil
diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go
index 99eda10f1c..000abf935c 100644
--- a/src/runtime/pprof/pprof.go
+++ b/src/runtime/pprof/pprof.go
@@ -76,6 +76,7 @@ import (
"bufio"
"bytes"
"fmt"
+ "internal/abi"
"io"
"runtime"
"sort"
@@ -289,7 +290,7 @@ func (p *Profile) Add(value interface{}, skip int) {
stk = stk[:n]
if len(stk) == 0 {
// The value for skip is too large, and there's no stack trace to record.
- stk = []uintptr{funcPC(lostProfileEvent)}
+ stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent)}
}
p.mu.Lock()
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 7c71d8263b..e0d32a0f54 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -11,6 +11,7 @@ import (
"bytes"
"context"
"fmt"
+ "internal/abi"
"internal/profile"
"internal/testenv"
"io"
@@ -116,7 +117,7 @@ func containsInlinedCall(f interface{}, maxBytes int) bool {
// findInlinedCall returns the PC of an inlined function call within
// the function body for the function f if any.
func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
- fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
+ fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
if fFunc == nil || fFunc.Entry() == 0 {
panic("failed to locate function entry")
}
diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go
index bdb4454b6e..6862513956 100644
--- a/src/runtime/pprof/proto.go
+++ b/src/runtime/pprof/proto.go
@@ -8,6 +8,7 @@ import (
"bytes"
"compress/gzip"
"fmt"
+ "internal/abi"
"io"
"os"
"runtime"
@@ -21,11 +22,6 @@ import (
// (The name shows up in the pprof graphs.)
func lostProfileEvent() { lostProfileEvent() }
-// funcPC returns the PC for the func value f.
-func funcPC(f interface{}) uintptr {
- return *(*[2]*uintptr)(unsafe.Pointer(&f))[1]
-}
-
// A profileBuilder writes a profile incrementally from a
// stream of profile samples delivered by the runtime.
type profileBuilder struct {
@@ -325,7 +321,7 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error
// gentraceback guarantees that PCs in the
// stack can be unconditionally decremented and
// still be valid, so we must do the same.
- uint64(funcPC(lostProfileEvent) + 1),
+ uint64(abi.FuncPCABIInternal(lostProfileEvent) + 1),
}
}
b.m.lookup(stk, tag).count += int64(count)
diff --git a/src/runtime/pprof/proto_test.go b/src/runtime/pprof/proto_test.go
index 5eb1aab140..d052b9fa42 100644
--- a/src/runtime/pprof/proto_test.go
+++ b/src/runtime/pprof/proto_test.go
@@ -8,6 +8,7 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "internal/abi"
"internal/profile"
"internal/testenv"
"os"
@@ -97,11 +98,11 @@ func testPCs(t *testing.T) (addr1, addr2 uint64, map1, map2 *profile.Mapping) {
map2 = mprof.Mapping[1]
map2.BuildID, _ = elfBuildID(map2.File)
case "js":
- addr1 = uint64(funcPC(f1))
- addr2 = uint64(funcPC(f2))
+ addr1 = uint64(abi.FuncPCABIInternal(f1))
+ addr2 = uint64(abi.FuncPCABIInternal(f2))
default:
- addr1 = uint64(funcPC(f1))
- addr2 = uint64(funcPC(f2))
+ addr1 = uint64(abi.FuncPCABIInternal(f1))
+ addr2 = uint64(abi.FuncPCABIInternal(f2))
// Fake mapping - HasFunctions will be true because two PCs from Go
// will be fully symbolized.
fake := &profile.Mapping{ID: 1, HasFunctions: true}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index 1d5aae1363..a38ab79398 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -53,8 +53,9 @@
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -315,12 +316,12 @@ func asyncPreempt2() {
var asyncPreemptStack = ^uintptr(0)
func init() {
- f := findfunc(funcPC(asyncPreempt))
+ f := findfunc(abi.FuncPCABI0(asyncPreempt))
total := funcMaxSPDelta(f)
- f = findfunc(funcPC(asyncPreempt2))
+ f = findfunc(abi.FuncPCABIInternal(asyncPreempt2))
total += funcMaxSPDelta(f)
// Add some overhead for return PCs, etc.
- asyncPreemptStack = uintptr(total) + 8*sys.PtrSize
+ asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
if asyncPreemptStack > _StackLimit {
// We need more than the nosplit limit. This isn't
// unsafe, but it may limit asynchronous preemption.
diff --git a/src/runtime/preempt_386.s b/src/runtime/preempt_386.s
index a803b24dc6..c3a5fa1f36 100644
--- a/src/runtime/preempt_386.s
+++ b/src/runtime/preempt_386.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
PUSHFL
ADJSP $156
NOP SP
diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s
index dc7af806d3..31f7c8b66f 100644
--- a/src/runtime/preempt_amd64.s
+++ b/src/runtime/preempt_amd64.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
PUSHQ BP
MOVQ SP, BP
// Save flags before clobbering them
diff --git a/src/runtime/preempt_arm.s b/src/runtime/preempt_arm.s
index bbc9fbb1ea..8f243c0dcd 100644
--- a/src/runtime/preempt_arm.s
+++ b/src/runtime/preempt_arm.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVW.W R14, -188(R13)
MOVW R0, 4(R13)
MOVW R1, 8(R13)
diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s
index 2b70a28479..36ee13282c 100644
--- a/src/runtime/preempt_arm64.s
+++ b/src/runtime/preempt_arm64.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R30, -496(RSP)
SUB $496, RSP
#ifdef GOOS_linux
diff --git a/src/runtime/preempt_mips64x.s b/src/runtime/preempt_mips64x.s
index b755425bc5..c1249e382e 100644
--- a/src/runtime/preempt_mips64x.s
+++ b/src/runtime/preempt_mips64x.s
@@ -6,8 +6,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVV R31, -488(R29)
SUBV $488, R29
MOVV R1, 8(R29)
diff --git a/src/runtime/preempt_mipsx.s b/src/runtime/preempt_mipsx.s
index c1bff60859..70b79e05b9 100644
--- a/src/runtime/preempt_mipsx.s
+++ b/src/runtime/preempt_mipsx.s
@@ -6,8 +6,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVW R31, -244(R29)
SUB $244, R29
MOVW R1, 4(R29)
diff --git a/src/runtime/preempt_ppc64x.s b/src/runtime/preempt_ppc64x.s
index 70bd91982b..7ed4021dde 100644
--- a/src/runtime/preempt_ppc64x.s
+++ b/src/runtime/preempt_ppc64x.s
@@ -6,8 +6,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R31, -488(R1)
MOVD LR, R31
MOVDU R31, -520(R1)
diff --git a/src/runtime/preempt_riscv64.s b/src/runtime/preempt_riscv64.s
index d4f9cc277f..eb68dcba2b 100644
--- a/src/runtime/preempt_riscv64.s
+++ b/src/runtime/preempt_riscv64.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOV X1, -472(X2)
ADD $-472, X2
MOV X3, 8(X2)
diff --git a/src/runtime/preempt_s390x.s b/src/runtime/preempt_s390x.s
index c6f11571df..ca9e47cde1 100644
--- a/src/runtime/preempt_s390x.s
+++ b/src/runtime/preempt_s390x.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
IPM R10
MOVD R14, -248(R15)
ADD $-248, R15
diff --git a/src/runtime/preempt_wasm.s b/src/runtime/preempt_wasm.s
index da90e8aa6d..0cf57d3d22 100644
--- a/src/runtime/preempt_wasm.s
+++ b/src/runtime/preempt_wasm.s
@@ -3,7 +3,6 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
// No async preemption on wasm
UNDEF
diff --git a/src/runtime/print.go b/src/runtime/print.go
index f15296cf02..59a91203b9 100644
--- a/src/runtime/print.go
+++ b/src/runtime/print.go
@@ -5,8 +5,8 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -271,7 +271,7 @@ func hexdumpWords(p, end uintptr, mark func(uintptr) byte) {
var markbuf [1]byte
markbuf[0] = ' '
minhexdigits = int(unsafe.Sizeof(uintptr(0)) * 2)
- for i := uintptr(0); p+i < end; i += sys.PtrSize {
+ for i := uintptr(0); p+i < end; i += goarch.PtrSize {
if i%16 == 0 {
if i != 0 {
println()
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 7bc2a92590..ec4be31db3 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -7,7 +7,7 @@ package runtime
import (
"internal/abi"
"internal/cpu"
- "internal/goexperiment"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -152,7 +152,7 @@ func main() {
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
@@ -466,18 +466,6 @@ func releaseSudog(s *sudog) {
releasem(mp)
}
-// funcPC returns the entry PC of the function f.
-// It assumes that f is a func value. Otherwise the behavior is undefined.
-// CAREFUL: In programs with plugins, funcPC can return different values
-// for the same function (because there are actually multiple copies of
-// the same function in the address space). To be safe, don't use the
-// results of this function in any == expression. It is only safe to
-// use the result as an address at which to start executing code.
-//go:nosplit
-func funcPC(f interface{}) uintptr {
- return *(*uintptr)(efaceOf(&f).data)
-}
-
// called from assembly
func badmcall(fn func(*g)) {
throw("runtime: mcall called on m->g0 stack")
@@ -568,7 +556,7 @@ func atomicAllG() (**g, uintptr) {
// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
func atomicAllGIndex(ptr **g, i uintptr) *g {
- return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
+ return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
}
// forEachG calls fn on every G from allgs.
@@ -2027,7 +2015,7 @@ func oneNewExtraM() {
gp := malg(4096)
gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
gp.sched.sp = gp.stack.hi
- gp.sched.sp -= 4 * sys.PtrSize // extra space in case of reads slightly beyond frame
+ gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
gp.sched.lr = 0
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.syscallpc = gp.sched.pc
@@ -2045,7 +2033,7 @@ func oneNewExtraM() {
gp.lockedm.set(mp)
gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
if raceenabled {
- gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
+ gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
}
// put on allg for garbage collector
allgadd(gp)
@@ -2238,7 +2226,7 @@ func newm1(mp *m) {
}
ts.g.set(mp.g0)
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
- ts.fn = unsafe.Pointer(funcPC(mstart))
+ ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
if msanenabled {
msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
}
@@ -4232,27 +4220,14 @@ func malg(stacksize int32) *g {
return newg
}
-// Create a new g running fn with siz bytes of arguments.
+// Create a new g running fn.
// Put it on the queue of g's waiting to run.
// The compiler turns a go statement into a call to this.
-//
-// The stack layout of this call is unusual: it assumes that the
-// arguments to pass to fn are on the stack sequentially immediately
-// after &fn. Hence, they are logically part of newproc's argument
-// frame, even though they don't appear in its signature (and can't
-// because their types differ between call sites).
-//
-// This must be nosplit because this stack layout means there are
-// untyped arguments in newproc's argument frame. Stack copies won't
-// be able to adjust them and stack splits won't be able to copy them.
-//
-//go:nosplit
-func newproc(siz int32, fn *funcval) {
- argp := add(unsafe.Pointer(&fn), sys.PtrSize)
+func newproc(fn *funcval) {
gp := getg()
pc := getcallerpc()
systemstack(func() {
- newg := newproc1(fn, argp, siz, gp, pc)
+ newg := newproc1(fn, gp, pc)
_p_ := getg().m.p.ptr()
runqput(_p_, newg, true)
@@ -4263,24 +4238,10 @@ func newproc(siz int32, fn *funcval) {
})
}
-// Create a new g in state _Grunnable, starting at fn, with narg bytes
-// of arguments starting at argp. callerpc is the address of the go
-// statement that created this. The caller is responsible for adding
-// the new g to the scheduler.
-//
-// This must run on the system stack because it's the continuation of
-// newproc, which cannot split the stack.
-//
-//go:systemstack
-func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g {
- if goexperiment.RegabiDefer && narg != 0 {
- // TODO: When we commit to GOEXPERIMENT=regabidefer,
- // rewrite the comments for newproc and newproc1.
- // newproc will no longer have a funny stack layout or
- // need to be nosplit.
- throw("go with non-empty frame")
- }
-
+// Create a new g in state _Grunnable, starting at fn. callerpc is the
+// address of the go statement that created this. The caller is responsible
+// for adding the new g to the scheduler.
+func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
_g_ := getg()
if fn == nil {
@@ -4288,16 +4249,6 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
throw("go of nil func value")
}
acquirem() // disable preemption because it can be holding p in a local var
- siz := narg
- siz = (siz + 7) &^ 7
-
- // We could allocate a larger initial stack if necessary.
- // Not worth it: this is almost always an error.
- // 4*PtrSize: extra space added below
- // PtrSize: caller's LR (arm) or return address (x86, in gostartcall).
- if siz >= _StackMin-4*sys.PtrSize-sys.PtrSize {
- throw("newproc: function arguments too large for new goroutine")
- }
_p_ := _g_.m.p.ptr()
newg := gfget(_p_)
@@ -4314,8 +4265,8 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
throw("newproc1: new g is not Gdead")
}
- totalSize := 4*sys.PtrSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
- totalSize += -totalSize & (sys.StackAlign - 1) // align to StackAlign
+ totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
+ totalSize = alignUp(totalSize, sys.StackAlign)
sp := newg.stack.hi - totalSize
spArg := sp
if usesLR {
@@ -4324,24 +4275,6 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
prepGoExitFrame(sp)
spArg += sys.MinFrameSize
}
- if narg > 0 {
- memmove(unsafe.Pointer(spArg), argp, uintptr(narg))
- // This is a stack-to-stack copy. If write barriers
- // are enabled and the source stack is grey (the
- // destination is always black), then perform a
- // barrier copy. We do this *after* the memmove
- // because the destination stack may have garbage on
- // it.
- if writeBarrier.needed && !_g_.m.curg.gcscandone {
- f := findfunc(fn.fn)
- stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
- if stkmap.nbit > 0 {
- // We're in the prologue, so it's always stack map index 0.
- bv := stackmapdata(stkmap, 0)
- bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
- }
- }
- }
memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
newg.sched.sp = sp
@@ -4751,16 +4684,16 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// If all of the above has failed, account it against abstract "System" or "GC".
n = 2
if inVDSOPage(pc) {
- pc = funcPC(_VDSO) + sys.PCQuantum
+ pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
} else if pc > firstmoduledata.etext {
// "ExternalCode" is better than "etext".
- pc = funcPC(_ExternalCode) + sys.PCQuantum
+ pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" {
- stk[1] = funcPC(_GC) + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
- stk[1] = funcPC(_System) + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
}
}
@@ -4804,7 +4737,7 @@ func sigprofNonGoPC(pc uintptr) {
if prof.hz != 0 {
stk := []uintptr{
pc,
- funcPC(_ExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
cpuprof.addNonGo(stk)
}
@@ -4854,9 +4787,7 @@ func (pp *p) init(id int32) {
pp.id = id
pp.status = _Pgcstop
pp.sudogcache = pp.sudogbuf[:0]
- for i := range pp.deferpool {
- pp.deferpool[i] = pp.deferpoolbuf[i][:0]
- }
+ pp.deferpool = pp.deferpoolbuf[:0]
pp.wbBuf.reset()
if pp.mcache == nil {
if id == 0 {
@@ -4933,12 +4864,10 @@ func (pp *p) destroy() {
pp.sudogbuf[i] = nil
}
pp.sudogcache = pp.sudogbuf[:0]
- for i := range pp.deferpool {
- for j := range pp.deferpoolbuf[i] {
- pp.deferpoolbuf[i][j] = nil
- }
- pp.deferpool[i] = pp.deferpoolbuf[i][:0]
+ for j := range pp.deferpoolbuf {
+ pp.deferpoolbuf[j] = nil
}
+ pp.deferpool = pp.deferpoolbuf[:0]
systemstack(func() {
for i := 0; i < pp.mspancache.len; i++ {
// Safe to call since the world is stopped.
@@ -6463,7 +6392,7 @@ func doInit(t *initTask) {
t.state = 1 // initialization in progress
for i := uintptr(0); i < t.ndeps; i++ {
- p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
+ p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
t2 := *(**initTask)(p)
doInit(t2)
}
@@ -6484,9 +6413,9 @@ func doInit(t *initTask) {
before = inittrace
}
- firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize)
+ firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
for i := uintptr(0); i < t.nfns; i++ {
- p := add(firstFunc, i*sys.PtrSize)
+ p := add(firstFunc, i*goarch.PtrSize)
f := *(*func())(unsafe.Pointer(&p))
f()
}
@@ -6496,7 +6425,8 @@ func doInit(t *initTask) {
// Load stats non-atomically since tracinit is updated only by this init goroutine.
after := inittrace
- pkg := funcpkgpath(findfunc(funcPC(firstFunc)))
+ f := *(*func())(unsafe.Pointer(&firstFunc))
+ pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
var sbuf [24]byte
print("init ", pkg, " @")
diff --git a/src/runtime/race.go b/src/runtime/race.go
index ce6b5b5468..7eaa9d2b72 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"unsafe"
)
@@ -361,7 +362,7 @@ func raceinit() (gctx, pctx uintptr) {
throw("raceinit: race build must use cgo")
}
- racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
+ racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
// Round data segment to page boundaries, because it's used in mmap().
start := ^uintptr(0)
diff --git a/src/runtime/race/output_test.go b/src/runtime/race/output_test.go
index 99052071d0..63fcd847dc 100644
--- a/src/runtime/race/output_test.go
+++ b/src/runtime/race/output_test.go
@@ -148,7 +148,7 @@ exit status 66
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
@@ -162,7 +162,7 @@ func main() {
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
@@ -178,7 +178,7 @@ func main() {
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index 8d4813eadd..d42e415dca 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -46,11 +46,7 @@
// Defined as ABIInternal so as to avoid introducing a wrapper,
// which would render runtime.getcallerpc ineffective.
TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
-#ifdef GOEXPERIMENT_regabiargs
MOVQ AX, RARG1
-#else
- MOVQ addr+0(FP), RARG1
-#endif
MOVQ (SP), RARG2
// void __tsan_read(ThreadState *thr, void *addr, void *pc);
MOVQ $__tsan_read(SB), AX
@@ -76,11 +72,7 @@ TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
// Defined as ABIInternal so as to avoid introducing a wrapper,
// which would render runtime.getcallerpc ineffective.
TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
-#ifdef GOEXPERIMENT_regabiargs
MOVQ AX, RARG1
-#else
- MOVQ addr+0(FP), RARG1
-#endif
MOVQ (SP), RARG2
// void __tsan_write(ThreadState *thr, void *addr, void *pc);
MOVQ $__tsan_write(SB), AX
@@ -131,13 +123,8 @@ TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
// Defined as ABIInternal so as to avoid introducing a wrapper,
// which would render runtime.getcallerpc ineffective.
TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
-#ifdef GOEXPERIMENT_regabiargs
MOVQ AX, RARG1
MOVQ BX, RARG2
-#else
- MOVQ addr+0(FP), RARG1
- MOVQ size+8(FP), RARG2
-#endif
MOVQ (SP), RARG3
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVQ $__tsan_write_range(SB), AX
@@ -161,10 +148,6 @@ TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
// If addr (RARG1) is out of range, do nothing.
// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
TEXT racecalladdr<>(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
CMPQ RARG1, runtime·racearenastart(SB)
@@ -192,10 +175,6 @@ TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
// R11 = caller's return address
TEXT racefuncenter<>(SB), NOSPLIT, $0-0
MOVQ DX, BX // save function entry context (for closures)
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ R11, RARG1
// void __tsan_func_enter(ThreadState *thr, void *pc);
@@ -208,10 +187,6 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0
// func runtime·racefuncexit()
// Called from instrumented code.
TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// void __tsan_func_exit(ThreadState *thr);
MOVQ $__tsan_func_exit(SB), AX
@@ -370,10 +345,6 @@ racecallatomic_data:
JAE racecallatomic_ignore
racecallatomic_ok:
// Addr is within the good range, call the atomic function.
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ 8(SP), RARG1 // caller pc
MOVQ (SP), RARG2 // pc
@@ -385,10 +356,6 @@ racecallatomic_ignore:
// An attempt to synchronize on the address would cause crash.
MOVQ AX, BX // remember the original function
MOVQ $__tsan_go_ignore_sync_begin(SB), AX
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
CALL racecall<>(SB)
MOVQ BX, AX // restore the original function
@@ -416,10 +383,6 @@ TEXT runtime·racecall(SB), NOSPLIT, $0-0
// Switches SP to g0 stack and calls (AX). Arguments already set.
TEXT racecall<>(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_m(R14), R13
// Switch to g0 stack.
MOVQ SP, R12 // callee-saved, preserved across the CALL
@@ -441,9 +404,7 @@ call:
// The overall effect of Go->C->Go call chain is similar to that of mcall.
// RARG0 contains command code. RARG1 contains command-specific context.
// See racecallback for command codes.
-// Defined as ABIInternal so as to avoid introducing a wrapper,
-// because its address is passed to C via funcPC.
-TEXT runtime·racecallbackthunk<ABIInternal>(SB), NOSPLIT, $0-0
+TEXT runtime·racecallbackthunk(SB), NOSPLIT, $0-0
// Handle command raceGetProcCmd (0) here.
// First, code below assumes that we are on curg, while raceGetProcCmd
// can be executed on g0. Second, it is called frequently, so will
diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s
index c6d5b91edc..2b2413b6b7 100644
--- a/src/runtime/race_arm64.s
+++ b/src/runtime/race_arm64.s
@@ -43,8 +43,14 @@
// func runtime·raceread(addr uintptr)
// Called from instrumented code.
-TEXT runtime·raceread(SB), NOSPLIT, $0-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
+#endif
MOVD LR, R2
// void __tsan_read(ThreadState *thr, void *addr, void *pc);
MOVD $__tsan_read(SB), R9
@@ -66,8 +72,14 @@ TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
// func runtime·racewrite(addr uintptr)
// Called from instrumented code.
-TEXT runtime·racewrite(SB), NOSPLIT, $0-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
+#endif
MOVD LR, R2
// void __tsan_write(ThreadState *thr, void *addr, void *pc);
MOVD $__tsan_write(SB), R9
@@ -89,9 +101,16 @@ TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
// func runtime·racereadrange(addr, size uintptr)
// Called from instrumented code.
-TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
MOVD size+8(FP), R2
+#endif
MOVD LR, R3
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVD $__tsan_read_range(SB), R9
@@ -114,9 +133,16 @@ TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
// func runtime·racewriterange(addr, size uintptr)
// Called from instrumented code.
-TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
MOVD size+8(FP), R2
+#endif
MOVD LR, R3
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVD $__tsan_write_range(SB), R9
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index b238da8f51..b6c3cbfff4 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -6,8 +6,8 @@ package runtime
import (
"internal/bytealg"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -55,7 +55,7 @@ var (
// nosplit for use in linux startup sysargs
//go:nosplit
func argv_index(argv **byte, i int32) *byte {
- return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
+ return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
}
func args(c int32, v **byte) {
@@ -190,10 +190,10 @@ func check() {
if unsafe.Sizeof(j) != 8 {
throw("bad j")
}
- if unsafe.Sizeof(k) != sys.PtrSize {
+ if unsafe.Sizeof(k) != goarch.PtrSize {
throw("bad k")
}
- if unsafe.Sizeof(l) != sys.PtrSize {
+ if unsafe.Sizeof(l) != goarch.PtrSize {
throw("bad l")
}
if unsafe.Sizeof(x1) != 1 {
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 5051ec4d3e..e4e9ee50b8 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -5,8 +5,8 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -505,7 +505,7 @@ const (
// tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
// like Windows.
tlsSlots = 6
- tlsSize = tlsSlots * sys.PtrSize
+ tlsSize = tlsSlots * goarch.PtrSize
)
type m struct {
@@ -613,8 +613,8 @@ type p struct {
pcache pageCache
raceprocctx uintptr
- deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
- deferpoolbuf [5][32]*_defer
+ deferpool []*_defer // pool of available defer structs (see panic.go)
+ deferpoolbuf [32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
@@ -795,9 +795,9 @@ type schedt struct {
sudoglock mutex
sudogcache *sudog
- // Central pool of available defer structs of different sizes.
+ // Central pool of available defer structs.
deferlock mutex
- deferpool [5]*_defer
+ deferpool *_defer
// freem is the list of m's waiting to be freed when their
// m.exited is set. Linked through m.freelink.
@@ -924,7 +924,7 @@ func extendRandom(r []byte, n int) {
w = 16
}
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
- for i := 0; i < sys.PtrSize && n < len(r); i++ {
+ for i := 0; i < goarch.PtrSize && n < len(r); i++ {
r[n] = byte(h)
n++
h >>= 8
@@ -934,25 +934,24 @@ func extendRandom(r []byte, n int) {
// A _defer holds an entry on the list of deferred calls.
// If you add a field here, add code to clear it in freedefer and deferProcStack
-// This struct must match the code in cmd/compile/internal/reflectdata/reflect.go:deferstruct
-// and cmd/compile/internal/gc/ssa.go:(*state).call.
+// This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
+// and cmd/compile/internal/ssagen/ssa.go:(*state).call.
// Some defers will be allocated on the stack and some on the heap.
// All defers are logically part of the stack, so write barriers to
// initialize them are not required. All defers must be manually scanned,
// and for heap defers, marked.
type _defer struct {
- siz int32 // includes both arguments and results
started bool
heap bool
// openDefer indicates that this _defer is for a frame with open-coded
// defers. We have only one defer record for the entire frame (which may
// currently have 0, 1, or more defers active).
openDefer bool
- sp uintptr // sp at time of defer
- pc uintptr // pc at time of defer
- fn *funcval // can be nil for open-coded defers
- _panic *_panic // panic that is running defer
- link *_defer
+ sp uintptr // sp at time of defer
+ pc uintptr // pc at time of defer
+ fn func() // can be nil for open-coded defers
+ _panic *_panic // panic that is running defer
+ link *_defer // next defer on G; can point to either heap or stack!
// If openDefer is true, the fields below record values about the stack
// frame and associated function that has the open-coded defer(s). sp
diff --git a/src/runtime/select.go b/src/runtime/select.go
index 06edb69f42..ee1f95ffa9 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -7,6 +7,7 @@ package runtime
// This file contains the implementation of Go select statements.
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -22,8 +23,8 @@ type scase struct {
}
var (
- chansendpc = funcPC(chansend)
- chanrecvpc = funcPC(chanrecv)
+ chansendpc = abi.FuncPCABIInternal(chansend)
+ chanrecvpc = abi.FuncPCABIInternal(chanrecv)
)
func selectsetpc(pc *uintptr) {
diff --git a/src/runtime/signal_386.go b/src/runtime/signal_386.go
index 5824eaddb5..69a59e6dcf 100644
--- a/src/runtime/signal_386.go
+++ b/src/runtime/signal_386.go
@@ -8,7 +8,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -42,17 +43,17 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
sp := uintptr(c.esp())
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
- c.pushCall(funcPC(sigpanic), pc)
+ c.pushCall(abi.FuncPCABIInternal(sigpanic), pc)
} else {
// Not safe to push the call. Just clobber the frame.
- c.set_eip(uint32(funcPC(sigpanic)))
+ c.set_eip(uint32(abi.FuncPCABIInternal(sigpanic)))
}
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.esp())
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_esp(uint32(sp))
c.set_eip(uint32(targetPC))
diff --git a/src/runtime/signal_aix_ppc64.go b/src/runtime/signal_aix_ppc64.go
index a0becd431e..5999d9dc3d 100644
--- a/src/runtime/signal_aix_ppc64.go
+++ b/src/runtime/signal_aix_ppc64.go
@@ -8,7 +8,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -82,5 +82,5 @@ func (c *sigctxt) set_link(x uint64) { c.regs().lr = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_amd64.go b/src/runtime/signal_amd64.go
index e45fbb4a87..20490cffbf 100644
--- a/src/runtime/signal_amd64.go
+++ b/src/runtime/signal_amd64.go
@@ -9,7 +9,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -70,17 +71,17 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// Go special registers. We inject sigpanic0 (instead of sigpanic),
// which takes care of that.
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
- c.pushCall(funcPC(sigpanic0), pc)
+ c.pushCall(abi.FuncPCABI0(sigpanic0), pc)
} else {
// Not safe to push the call. Just clobber the frame.
- c.set_rip(uint64(funcPC(sigpanic0)))
+ c.set_rip(uint64(abi.FuncPCABI0(sigpanic0)))
}
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.rsp())
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_rsp(uint64(sp))
c.set_rip(uint64(targetPC))
diff --git a/src/runtime/signal_arm.go b/src/runtime/signal_arm.go
index 4d9c6224a2..a0780788f8 100644
--- a/src/runtime/signal_arm.go
+++ b/src/runtime/signal_arm.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func dumpregs(c *sigctxt) {
print("trap ", hex(c.trap()), "\n")
@@ -61,7 +64,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r10(uint32(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint32(funcPC(sigpanic)))
+ c.set_pc(uint32(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_arm64.go b/src/runtime/signal_arm64.go
index f04750084f..9d4a8b8a99 100644
--- a/src/runtime/signal_arm64.go
+++ b/src/runtime/signal_arm64.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -77,7 +78,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r28(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_linux_386.go b/src/runtime/signal_linux_386.go
index 13d9df4071..321518c18e 100644
--- a/src/runtime/signal_linux_386.go
+++ b/src/runtime/signal_linux_386.go
@@ -5,7 +5,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -42,5 +42,5 @@ func (c *sigctxt) set_eip(x uint32) { c.regs().eip = x }
func (c *sigctxt) set_esp(x uint32) { c.regs().esp = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_amd64.go b/src/runtime/signal_linux_amd64.go
index 210e8967e5..573b118397 100644
--- a/src/runtime/signal_linux_amd64.go
+++ b/src/runtime/signal_linux_amd64.go
@@ -5,7 +5,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -52,5 +52,5 @@ func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_arm.go b/src/runtime/signal_linux_arm.go
index 876b505917..eb107d68d1 100644
--- a/src/runtime/signal_linux_arm.go
+++ b/src/runtime/signal_linux_arm.go
@@ -5,7 +5,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -54,5 +54,5 @@ func (c *sigctxt) set_r10(x uint32) { c.regs().r10 = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_arm64.go b/src/runtime/signal_linux_arm64.go
index 2075f253d7..4ccc030792 100644
--- a/src/runtime/signal_linux_arm64.go
+++ b/src/runtime/signal_linux_arm64.go
@@ -5,7 +5,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -67,5 +67,5 @@ func (c *sigctxt) set_lr(x uint64) { c.regs().regs[30] = x }
func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_mips64x.go b/src/runtime/signal_linux_mips64x.go
index f0a75ac3ea..e62d6a93fd 100644
--- a/src/runtime/signal_linux_mips64x.go
+++ b/src/runtime/signal_linux_mips64x.go
@@ -9,7 +9,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -75,5 +75,5 @@ func (c *sigctxt) set_link(x uint64) { c.regs().sc_regs[31] = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_ppc64x.go b/src/runtime/signal_linux_ppc64x.go
index d9d3e55ec2..d2eeb39ead 100644
--- a/src/runtime/signal_linux_ppc64x.go
+++ b/src/runtime/signal_linux_ppc64x.go
@@ -9,7 +9,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -79,5 +79,5 @@ func (c *sigctxt) set_link(x uint64) { c.regs().link = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_riscv64.go b/src/runtime/signal_linux_riscv64.go
index 9f68e5c548..b26450dbfa 100644
--- a/src/runtime/signal_linux_riscv64.go
+++ b/src/runtime/signal_linux_riscv64.go
@@ -5,7 +5,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -64,5 +64,5 @@ func (c *sigctxt) set_gp(x uint64) { c.regs().sc_regs.gp = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
diff --git a/src/runtime/signal_linux_s390x.go b/src/runtime/signal_linux_s390x.go
index 12d5c31593..18c3b115ef 100644
--- a/src/runtime/signal_linux_s390x.go
+++ b/src/runtime/signal_linux_s390x.go
@@ -5,6 +5,8 @@
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/sys"
"unsafe"
)
@@ -53,7 +55,7 @@ func (c *sigctxt) set_sp(x uint64) { c.regs().gregs[15] = x }
func (c *sigctxt) set_pc(x uint64) { c.regs().psw_addr = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
func dumpregs(c *sigctxt) {
@@ -107,7 +109,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r0(0)
c.set_r13(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_mips64x.go b/src/runtime/signal_mips64x.go
index 1616b57027..87dfa724c4 100644
--- a/src/runtime/signal_mips64x.go
+++ b/src/runtime/signal_mips64x.go
@@ -9,7 +9,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -68,7 +69,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
@@ -80,7 +81,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
}
// In case we are panicking from external C code
- sigpanicPC := uint64(funcPC(sigpanic))
+ sigpanicPC := uint64(abi.FuncPCABIInternal(sigpanic))
c.set_r28(sigpanicPC >> 32 << 32) // RSB register
c.set_r30(uint64(uintptr(unsafe.Pointer(gp))))
c.set_pc(sigpanicPC)
diff --git a/src/runtime/signal_mipsx.go b/src/runtime/signal_mipsx.go
index dcc7f1e9dd..5067799bd6 100644
--- a/src/runtime/signal_mipsx.go
+++ b/src/runtime/signal_mipsx.go
@@ -9,6 +9,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -78,7 +79,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r30(uint32(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint32(funcPC(sigpanic)))
+ c.set_pc(uint32(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_ppc64x.go b/src/runtime/signal_ppc64x.go
index f2225da9a1..8a39d59957 100644
--- a/src/runtime/signal_ppc64x.go
+++ b/src/runtime/signal_ppc64x.go
@@ -9,6 +9,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -83,8 +84,8 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r0(0)
c.set_r30(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_r12(uint64(funcPC(sigpanic)))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_r12(uint64(abi.FuncPCABIInternal(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_riscv64.go b/src/runtime/signal_riscv64.go
index e6b1b14130..8a24e4e36a 100644
--- a/src/runtime/signal_riscv64.go
+++ b/src/runtime/signal_riscv64.go
@@ -8,7 +8,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -63,7 +64,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
@@ -76,7 +77,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_gp(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
@@ -84,7 +85,7 @@ func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
// This extra slot is known to gentraceback.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
// Set up PC and LR to pretend the function being signaled
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index 6096760b50..8117582855 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -143,7 +144,7 @@ func initsig(preinit bool) {
}
handlingSig[i] = 1
- setsig(i, funcPC(sighandler))
+ setsig(i, abi.FuncPCABIInternal(sighandler))
}
}
@@ -194,7 +195,7 @@ func sigenable(sig uint32) {
<-maskUpdatedChan
if atomic.Cas(&handlingSig[sig], 0, 1) {
atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
- setsig(sig, funcPC(sighandler))
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
}
}
}
@@ -271,7 +272,7 @@ func setProcessCPUProfiler(hz int32) {
// Enable the Go signal handler if not enabled.
if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
atomic.Storeuintptr(&fwdSig[_SIGPROF], getsig(_SIGPROF))
- setsig(_SIGPROF, funcPC(sighandler))
+ setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
}
var it itimerval
@@ -329,7 +330,7 @@ func doSigPreempt(gp *g, ctxt *sigctxt) {
if wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
// Adjust the PC and inject a call to asyncPreempt.
- ctxt.pushCall(funcPC(asyncPreempt), newpc)
+ ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
}
}
@@ -843,7 +844,7 @@ func raisebadsignal(sig uint32, c *sigctxt) {
// We may receive another instance of the signal before we
// restore the Go handler, but that is not so bad: we know
// that the Go program has been ignoring the signal.
- setsig(sig, funcPC(sighandler))
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
}
//go:nosplit
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index b720ddcf16..3fe352ef57 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -27,15 +28,15 @@ func firstcontinuetramp()
func lastcontinuetramp()
func initExceptionHandler() {
- stdcall2(_AddVectoredExceptionHandler, 1, funcPC(exceptiontramp))
+ stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
if _AddVectoredContinueHandler == nil || GOARCH == "386" {
// use SetUnhandledExceptionFilter for windows-386 or
// if VectoredContinueHandler is unavailable.
// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
- stdcall1(_SetUnhandledExceptionFilter, funcPC(lastcontinuetramp))
+ stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
} else {
- stdcall2(_AddVectoredContinueHandler, 1, funcPC(firstcontinuetramp))
- stdcall2(_AddVectoredContinueHandler, 0, funcPC(lastcontinuetramp))
+ stdcall2(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
+ stdcall2(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
}
}
@@ -133,7 +134,7 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
// The exception is not from asyncPreempt, so not to push a
// sigpanic call to make it look like that. Instead, just
// overwrite the PC. (See issue #35773)
- if r.ip() != 0 && r.ip() != funcPC(asyncPreempt) {
+ if r.ip() != 0 && r.ip() != abi.FuncPCABI0(asyncPreempt) {
sp := unsafe.Pointer(r.sp())
delta := uintptr(sys.StackAlign)
sp = add(sp, -delta)
@@ -145,7 +146,7 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
*((*uintptr)(sp)) = r.ip()
}
}
- r.set_ip(funcPC(sigpanic0))
+ r.set_ip(abi.FuncPCABI0(sigpanic0))
return _EXCEPTION_CONTINUE_EXECUTION
}
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 01cdcaeee3..e8267be885 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -5,6 +5,8 @@
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/math"
"runtime/internal/sys"
"unsafe"
@@ -68,7 +70,7 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(makeslicecopy)
+ pc := abi.FuncPCABIInternal(makeslicecopy)
racereadrangepc(from, copymem, callerpc, pc)
}
if msanenabled {
@@ -162,7 +164,7 @@ func panicunsafeslicelen() {
func growslice(et *_type, old slice, cap int) slice {
if raceenabled {
callerpc := getcallerpc()
- racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice))
+ racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
}
if msanenabled {
msanread(old.array, uintptr(old.len*int(et.size)))
@@ -212,15 +214,15 @@ func growslice(et *_type, old slice, cap int) slice {
capmem = roundupsize(uintptr(newcap))
overflow = uintptr(newcap) > maxAlloc
newcap = int(capmem)
- case et.size == sys.PtrSize:
- lenmem = uintptr(old.len) * sys.PtrSize
- newlenmem = uintptr(cap) * sys.PtrSize
- capmem = roundupsize(uintptr(newcap) * sys.PtrSize)
- overflow = uintptr(newcap) > maxAlloc/sys.PtrSize
- newcap = int(capmem / sys.PtrSize)
+ case et.size == goarch.PtrSize:
+ lenmem = uintptr(old.len) * goarch.PtrSize
+ newlenmem = uintptr(cap) * goarch.PtrSize
+ capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
+ overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
+ newcap = int(capmem / goarch.PtrSize)
case isPowerOfTwo(et.size):
var shift uintptr
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// Mask shift for better code generation.
shift = uintptr(sys.Ctz64(uint64(et.size))) & 63
} else {
@@ -298,7 +300,7 @@ func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen
size := uintptr(n) * width
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(slicecopy)
+ pc := abi.FuncPCABIInternal(slicecopy)
racereadrangepc(fromPtr, size, callerpc, pc)
racewriterangepc(toPtr, size, callerpc, pc)
}
diff --git a/src/runtime/softfloat64.go b/src/runtime/softfloat64.go
index 13bee6c1d7..084aa132d9 100644
--- a/src/runtime/softfloat64.go
+++ b/src/runtime/softfloat64.go
@@ -562,36 +562,38 @@ func f64toint64(x uint64) int64 {
return val
}
-func f64touint64(x float64) uint64 {
- if x < float64(1<<63) {
- return uint64(int64(x))
+func f64touint64(x uint64) uint64 {
+ var m uint64 = 0x43e0000000000000 // float64 1<<63
+ if fgt64(m, x) {
+ return uint64(f64toint64(x))
}
- y := x - float64(1<<63)
- z := uint64(int64(y))
+ y := fadd64(x, -m)
+ z := uint64(f64toint64(y))
return z | (1 << 63)
}
-func f32touint64(x float32) uint64 {
- if x < float32(1<<63) {
- return uint64(int64(x))
+func f32touint64(x uint32) uint64 {
+ var m uint32 = 0x5f000000 // float32 1<<63
+ if fgt32(m, x) {
+ return uint64(f32toint64(x))
}
- y := x - float32(1<<63)
- z := uint64(int64(y))
+ y := fadd32(x, -m)
+ z := uint64(f32toint64(y))
return z | (1 << 63)
}
-func fuint64to64(x uint64) float64 {
+func fuint64to64(x uint64) uint64 {
if int64(x) >= 0 {
- return float64(int64(x))
+ return fint64to64(int64(x))
}
- // See ../cmd/compile/internal/gc/ssa.go:uint64Tofloat
+ // See ../cmd/compile/internal/ssagen/ssa.go:uint64Tofloat
y := x & 1
z := x >> 1
z = z | y
- r := float64(int64(z))
- return r + r
+ r := fint64to64(int64(z))
+ return fadd64(r, r)
}
-func fuint64to32(x uint64) float32 {
- return float32(fuint64to64(x))
+func fuint64to32(x uint64) uint32 {
+ return f64to32(fuint64to64(x))
}
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 6e0d157630..54ad66dca2 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -7,6 +7,8 @@ package runtime
import (
"internal/abi"
"internal/cpu"
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -67,7 +69,7 @@ const (
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack.
- _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024
+ _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
// The minimum size of stack used by Go code
_StackMin = 2048
@@ -125,7 +127,7 @@ const (
)
const (
- uintptrMask = 1<<(8*sys.PtrSize) - 1
+ uintptrMask = 1<<(8*goarch.PtrSize) - 1
// The values below can be stored to g.stackguard0 to force
// the next stack check to fail.
@@ -599,14 +601,14 @@ func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f
for i := uintptr(0); i < num; i += 8 {
if stackDebug >= 4 {
for j := uintptr(0); j < 8; j++ {
- print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
+ print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
}
}
b := *(addb(bv.bytedata, i/8))
for b != 0 {
j := uintptr(sys.Ctz8(b))
b &= b - 1
- pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
+ pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
retry:
p := *pp
if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
@@ -655,13 +657,13 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
// Adjust local variables if stack frame has been allocated.
if locals.n > 0 {
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
}
// Adjust saved base pointer if there is one.
// TODO what about arm64 frame pointer adjustment?
- if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.PtrSize {
+ if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
if stackDebug >= 3 {
print(" saved bp\n")
}
@@ -710,8 +712,8 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
s = materializeGCProg(ptrdata, gcdata)
gcdata = (*byte)(unsafe.Pointer(s.startAddr))
}
- for i := uintptr(0); i < ptrdata; i += sys.PtrSize {
- if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
+ for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
+ if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
adjustpointer(adjinfo, unsafe.Pointer(p+i))
}
}
@@ -753,11 +755,6 @@ func adjustdefers(gp *g, adjinfo *adjustinfo) {
adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
}
-
- // Adjust defer argument blocks the same way we adjust active stack frames.
- // Note: this code is after the loop above, so that if a defer record is
- // stack allocated, we work on the copy in the new stack.
- tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
}
func adjustpanics(gp *g, adjinfo *adjustinfo) {
@@ -1017,9 +1014,9 @@ func newstack() {
throw("missing stack in newstack")
}
sp := gp.sched.sp
- if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
+ if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
// The call to morestack cost a word.
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
}
if stackDebug >= 1 || sp < gp.stack.lo {
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
@@ -1114,7 +1111,7 @@ func gostartcallfn(gobuf *gobuf, fv *funcval) {
if fv != nil {
fn = unsafe.Pointer(fv.fn)
} else {
- fn = unsafe.Pointer(funcPC(nilfunc))
+ fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
}
gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
@@ -1262,8 +1259,8 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args
// Local variables.
size := frame.varp - frame.sp
var minsize uintptr
- switch sys.ArchFamily {
- case sys.ARM64:
+ switch goarch.ArchFamily {
+ case goarch.ARM64:
minsize = sys.StackAlign
default:
minsize = sys.MinFrameSize
@@ -1298,7 +1295,7 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args
// In this case, arglen specifies how much of the args section is actually live.
// (It could be either all the args + results, or just the args.)
args = *frame.argmap
- n := int32(frame.arglen / sys.PtrSize)
+ n := int32(frame.arglen / goarch.PtrSize)
if n < args.n {
args.n = n // Don't use more of the arguments than arglen.
}
@@ -1320,17 +1317,17 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args
}
// stack objects.
- if GOARCH == "amd64" && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
+ if (GOARCH == "amd64" || GOARCH == "arm64") && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
// We don't actually use argmap in this case, but we need to fake the stack object
- // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset
- // on amd64.
+ // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset.
+ // This offset matches the assembly code on amd64 and arm64.
objs = methodValueCallFrameObjs
} else {
p := funcdata(f, _FUNCDATA_StackObjects)
if p != nil {
n := *(*uintptr)(p)
- p = add(p, sys.PtrSize)
+ p = add(p, goarch.PtrSize)
*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
// Note: the noescape above is needed to keep
// getStackMap from "leaking param content:
diff --git a/src/runtime/string.go b/src/runtime/string.go
index d6030a1dca..d6990dab9a 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -5,8 +5,9 @@
package runtime
import (
+ "internal/abi"
"internal/bytealg"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -88,14 +89,14 @@ func slicebytetostring(buf *tmpBuf, ptr *byte, n int) (str string) {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
getcallerpc(),
- funcPC(slicebytetostring))
+ abi.FuncPCABIInternal(slicebytetostring))
}
if msanenabled {
msanread(unsafe.Pointer(ptr), uintptr(n))
}
if n == 1 {
p := unsafe.Pointer(&staticuint64s[*ptr])
- if sys.BigEndian {
+ if goarch.BigEndian {
p = add(p, 7)
}
stringStructOf(&str).str = p
@@ -152,7 +153,7 @@ func slicebytetostringtmp(ptr *byte, n int) (str string) {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
getcallerpc(),
- funcPC(slicebytetostringtmp))
+ abi.FuncPCABIInternal(slicebytetostringtmp))
}
if msanenabled && n > 0 {
msanread(unsafe.Pointer(ptr), uintptr(n))
@@ -203,7 +204,7 @@ func slicerunetostring(buf *tmpBuf, a []rune) string {
racereadrangepc(unsafe.Pointer(&a[0]),
uintptr(len(a))*unsafe.Sizeof(a[0]),
getcallerpc(),
- funcPC(slicerunetostring))
+ abi.FuncPCABIInternal(slicerunetostring))
}
if msanenabled && len(a) > 0 {
msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 16d7583202..8a520d7839 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -6,6 +6,7 @@ package runtime
import (
"internal/abi"
+ "internal/goarch"
"internal/goexperiment"
"unsafe"
)
@@ -176,8 +177,6 @@ func cgocallback(fn, frame, ctxt uintptr)
func gogo(buf *gobuf)
-//go:noescape
-func jmpdefer(fv *funcval, argp uintptr)
func asminit()
func setg(gg *g)
func breakpoint()
@@ -421,12 +420,5 @@ func sigpanic0()
// structure that is at least large enough to hold the
// registers the system supports.
//
-// Currently it's set to zero because using the actual
-// constant will break every part of the toolchain that
-// uses finalizers or Windows callbacks to call functions
-// The value that is currently commented out there should be
-// the actual value once we're ready to use the register ABI
-// everywhere.
-//
// Protected by finlock.
-var intArgRegs = abi.IntArgRegs * goexperiment.RegabiArgsInt
+var intArgRegs = abi.IntArgRegs * (goexperiment.RegabiArgsInt | goarch.IsAmd64)
diff --git a/src/runtime/stubs_arm64.go b/src/runtime/stubs_arm64.go
index f5e3bb4854..bd0533d158 100644
--- a/src/runtime/stubs_arm64.go
+++ b/src/runtime/stubs_arm64.go
@@ -14,3 +14,10 @@ func save_g()
func asmcgocall_no_g(fn, arg unsafe.Pointer)
func emptyfunc()
+
+// Used by reflectcall and the reflect package.
+//
+// Spills/loads arguments in registers to/from an internal/abi.RegArgs
+// respectively. Does not follow the Go ABI.
+func spillArgs()
+func unspillArgs()
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 999300a58e..d08aa0b320 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -330,7 +331,6 @@ const (
funcID_gogo
funcID_gopanic
funcID_handleAsyncEvent
- funcID_jmpdefer
funcID_mcall
funcID_morestack
funcID_mstart
@@ -568,7 +568,7 @@ const debugPcln = false
func moduledataverify1(datap *moduledata) {
// Check that the pclntab's format is valid.
hdr := datap.pcHeader
- if hdr.magic != 0xfffffffa || hdr.pad1 != 0 || hdr.pad2 != 0 || hdr.minLC != sys.PCQuantum || hdr.ptrSize != sys.PtrSize {
+ if hdr.magic != 0xfffffffa || hdr.pad1 != 0 || hdr.pad2 != 0 || hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize {
print("runtime: function symbol table header:", hex(hdr.magic), hex(hdr.pad1), hex(hdr.pad2), hex(hdr.minLC), hex(hdr.ptrSize))
if datap.pluginpath != "" {
print(", plugin:", datap.pluginpath)
@@ -786,7 +786,7 @@ type pcvalueCacheEnt struct {
// For now, align to sys.PtrSize and reduce mod the number of entries.
// In practice, this appears to be fairly randomly and evenly distributed.
func pcvalueCacheKey(targetpc uintptr) uintptr {
- return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries))
+ return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
}
// Returns the PCData value, and the PC where this value starts.
@@ -955,7 +955,7 @@ func funcline(f funcInfo, targetpc uintptr) (file string, line int32) {
func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
x, _ := pcvalue(f, f.pcsp, targetpc, cache, true)
- if x&(sys.PtrSize-1) != 0 {
+ if x&(goarch.PtrSize-1) != 0 {
print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
}
return x
@@ -1014,13 +1014,13 @@ func funcdata(f funcInfo, i uint8) unsafe.Pointer {
return nil
}
p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4)
- if sys.PtrSize == 8 && uintptr(p)&4 != 0 {
+ if goarch.PtrSize == 8 && uintptr(p)&4 != 0 {
if uintptr(unsafe.Pointer(f._func))&4 != 0 {
println("runtime: misaligned func", f._func)
}
p = add(p, 4)
}
- return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize))
+ return *(*unsafe.Pointer)(add(p, uintptr(i)*goarch.PtrSize))
}
// step advances to the next pc, value pair in the encoded table.
diff --git a/src/runtime/sys_darwin_arm64.go b/src/runtime/sys_darwin_arm64.go
index 9c14f33a1c..e6d4c1be48 100644
--- a/src/runtime/sys_darwin_arm64.go
+++ b/src/runtime/sys_darwin_arm64.go
@@ -5,7 +5,8 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -14,14 +15,14 @@ import (
//go:nosplit
//go:cgo_unsafe_args
func g0_pthread_key_create(k *pthreadkey, destructor uintptr) int32 {
- return asmcgocall(unsafe.Pointer(funcPC(pthread_key_create_trampoline)), unsafe.Pointer(&k))
+ return asmcgocall(unsafe.Pointer(abi.FuncPCABI0(pthread_key_create_trampoline)), unsafe.Pointer(&k))
}
func pthread_key_create_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func g0_pthread_setspecific(k pthreadkey, value uintptr) int32 {
- return asmcgocall(unsafe.Pointer(funcPC(pthread_setspecific_trampoline)), unsafe.Pointer(&k))
+ return asmcgocall(unsafe.Pointer(abi.FuncPCABI0(pthread_setspecific_trampoline)), unsafe.Pointer(&k))
}
func pthread_setspecific_trampoline()
@@ -53,7 +54,7 @@ func tlsinit(tlsg *uintptr, tlsbase *[_PTHREAD_KEYS_MAX]uintptr) {
for i, x := range tlsbase {
if x == magic {
- *tlsg = uintptr(i * sys.PtrSize)
+ *tlsg = uintptr(i * goarch.PtrSize)
g0_pthread_setspecific(k, 0)
return
}
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index 33cc670b64..64ddc2354e 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -215,13 +215,7 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), BX // BX unchanged by C code.
-#else
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), BX // BX unchanged by C code.
-#endif
// Set vdsoPC and vdsoSP for SIGPROF traceback.
// Save the old values on stack and restore them on exit,
@@ -236,11 +230,7 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
MOVQ CX, m_vdsoPC(BX)
MOVQ DX, m_vdsoSP(BX)
-#ifdef GOEXPERIMENT_regabig
CMPQ R14, m_curg(BX) // Only switch if on curg.
-#else
- CMPQ AX, m_curg(BX) // Only switch if on curg.
-#endif
JNE noswitch
MOVQ m_g0(BX), DX
@@ -328,9 +318,8 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
POPQ BP
RET
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
// Called using C ABI.
-TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
@@ -348,8 +337,7 @@ TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
// Used instead of sigtramp in programs that use cgo.
// Arguments from kernel are in DI, SI, DX.
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·cgoSigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
// If no traceback function, do usual sigtramp.
MOVQ runtime·cgoTraceback(SB), AX
TESTQ AX, AX
@@ -392,12 +380,12 @@ TEXT runtime·cgoSigtramp<ABIInternal>(SB),NOSPLIT,$0
// The first three arguments, and the fifth, are already in registers.
// Set the two remaining arguments now.
MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigtramp<ABIInternal>(SB), R9
+ MOVQ $runtime·sigtramp(SB), R9
MOVQ _cgo_callers(SB), AX
JMP AX
sigtramp:
- JMP runtime·sigtramp<ABIInternal>(SB)
+ JMP runtime·sigtramp(SB)
sigtrampnog:
// Signal arrived on a non-Go thread. If this is SIGPROF, get a
@@ -428,8 +416,7 @@ sigtrampnog:
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/x86_64/sigaction.c
// The code that cares about the precise instructions used is:
// https://gcc.gnu.org/viewcvs/gcc/trunk/libgcc/config/i386/linux-unwind.h?revision=219188&view=markup
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·sigreturn<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigreturn(SB),NOSPLIT,$0
MOVQ $SYS_rt_sigreturn, AX
SYSCALL
INT $3 // not reached
diff --git a/src/runtime/sys_openbsd.go b/src/runtime/sys_openbsd.go
index ab3149558b..15888619b1 100644
--- a/src/runtime/sys_openbsd.go
+++ b/src/runtime/sys_openbsd.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// The *_trampoline functions convert from the Go calling convention to the C calling convention
// and then call the underlying libc function. These are defined in sys_openbsd_$ARCH.s.
@@ -15,35 +18,35 @@ import "unsafe"
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_init(attr *pthreadattr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_init_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_destroy(attr *pthreadattr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_destroy_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_destroy_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_destroy_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_getstacksize_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_setdetachstate_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_create_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_create_trampoline)), unsafe.Pointer(&attr))
}
func pthread_create_trampoline()
diff --git a/src/runtime/sys_openbsd1.go b/src/runtime/sys_openbsd1.go
index cb5d35879c..b4e9f54538 100644
--- a/src/runtime/sys_openbsd1.go
+++ b/src/runtime/sys_openbsd1.go
@@ -7,31 +7,34 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
//go:nosplit
//go:cgo_unsafe_args
func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 {
- return libcCall(unsafe.Pointer(funcPC(thrsleep_trampoline)), unsafe.Pointer(&ident))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrsleep_trampoline)), unsafe.Pointer(&ident))
}
func thrsleep_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func thrwakeup(ident uintptr, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(thrwakeup_trampoline)), unsafe.Pointer(&ident))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrwakeup_trampoline)), unsafe.Pointer(&ident))
}
func thrwakeup_trampoline()
//go:nosplit
func osyield() {
- libcCall(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
}
func sched_yield_trampoline()
//go:nosplit
func osyield_no_g() {
- asmcgocall_no_g(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
}
//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so"
diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go
index cd1a4e879f..190ee4716a 100644
--- a/src/runtime/sys_openbsd2.go
+++ b/src/runtime/sys_openbsd2.go
@@ -7,21 +7,24 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// This is exported via linkname to assembly in runtime/cgo.
//go:linkname exit
//go:nosplit
//go:cgo_unsafe_args
func exit(code int32) {
- libcCall(unsafe.Pointer(funcPC(exit_trampoline)), unsafe.Pointer(&code))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code))
}
func exit_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func getthrid() (tid int32) {
- libcCall(unsafe.Pointer(funcPC(getthrid_trampoline)), unsafe.Pointer(&tid))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(getthrid_trampoline)), unsafe.Pointer(&tid))
return
}
func getthrid_trampoline()
@@ -29,14 +32,14 @@ func getthrid_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func raiseproc(sig uint32) {
- libcCall(unsafe.Pointer(funcPC(raiseproc_trampoline)), unsafe.Pointer(&sig))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig))
}
func raiseproc_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func thrkill(tid int32, sig int) {
- libcCall(unsafe.Pointer(funcPC(thrkill_trampoline)), unsafe.Pointer(&tid))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(thrkill_trampoline)), unsafe.Pointer(&tid))
}
func thrkill_trampoline()
@@ -53,7 +56,7 @@ func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (un
ret1 unsafe.Pointer
ret2 int
}{addr, n, prot, flags, fd, off, nil, 0}
- libcCall(unsafe.Pointer(funcPC(mmap_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args))
return args.ret1, args.ret2
}
func mmap_trampoline()
@@ -61,42 +64,42 @@ func mmap_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func munmap(addr unsafe.Pointer, n uintptr) {
- libcCall(unsafe.Pointer(funcPC(munmap_trampoline)), unsafe.Pointer(&addr))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr))
}
func munmap_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
- libcCall(unsafe.Pointer(funcPC(madvise_trampoline)), unsafe.Pointer(&addr))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr))
}
func madvise_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func open(name *byte, mode, perm int32) (ret int32) {
- return libcCall(unsafe.Pointer(funcPC(open_trampoline)), unsafe.Pointer(&name))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name))
}
func open_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func closefd(fd int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(close_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd))
}
func close_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func read(fd int32, p unsafe.Pointer, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(read_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd))
}
func read_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(write_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd))
}
func write_trampoline()
@@ -110,7 +113,7 @@ func pipe2(flags int32) (r, w int32, errno int32) {
p unsafe.Pointer
flags int32
}{noescape(unsafe.Pointer(&p)), flags}
- errno = libcCall(unsafe.Pointer(funcPC(pipe2_trampoline)), unsafe.Pointer(&args))
+ errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe2_trampoline)), unsafe.Pointer(&args))
return p[0], p[1], errno
}
func pipe2_trampoline()
@@ -118,34 +121,34 @@ func pipe2_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func setitimer(mode int32, new, old *itimerval) {
- libcCall(unsafe.Pointer(funcPC(setitimer_trampoline)), unsafe.Pointer(&mode))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode))
}
func setitimer_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func usleep(usec uint32) {
- libcCall(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
}
func usleep_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func usleep_no_g(usec uint32) {
- asmcgocall_no_g(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
}
//go:nosplit
//go:cgo_unsafe_args
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 {
- return libcCall(unsafe.Pointer(funcPC(sysctl_trampoline)), unsafe.Pointer(&mib))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib))
}
func sysctl_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func fcntl(fd, cmd, arg int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(fcntl_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&fd))
}
func fcntl_trampoline()
@@ -156,7 +159,7 @@ func nanotime1() int64 {
clock_id int32
tp unsafe.Pointer
}{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)}
- libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args))
return ts.tv_sec*1e9 + int64(ts.tv_nsec)
}
func clock_gettime_trampoline()
@@ -168,42 +171,42 @@ func walltime() (int64, int32) {
clock_id int32
tp unsafe.Pointer
}{_CLOCK_REALTIME, unsafe.Pointer(&ts)}
- libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args))
return ts.tv_sec, int32(ts.tv_nsec)
}
//go:nosplit
//go:cgo_unsafe_args
func kqueue() int32 {
- return libcCall(unsafe.Pointer(funcPC(kqueue_trampoline)), nil)
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil)
}
func kqueue_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
- return libcCall(unsafe.Pointer(funcPC(kevent_trampoline)), unsafe.Pointer(&kq))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq))
}
func kevent_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigaction(sig uint32, new *sigactiont, old *sigactiont) {
- libcCall(unsafe.Pointer(funcPC(sigaction_trampoline)), unsafe.Pointer(&sig))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig))
}
func sigaction_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigprocmask(how uint32, new *sigset, old *sigset) {
- libcCall(unsafe.Pointer(funcPC(sigprocmask_trampoline)), unsafe.Pointer(&how))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how))
}
func sigprocmask_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigaltstack(new *stackt, old *stackt) {
- libcCall(unsafe.Pointer(funcPC(sigaltstack_trampoline)), unsafe.Pointer(&new))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new))
}
func sigaltstack_trampoline()
diff --git a/src/runtime/sys_openbsd3.go b/src/runtime/sys_openbsd3.go
index 8d77a4b216..a917ebde61 100644
--- a/src/runtime/sys_openbsd3.go
+++ b/src/runtime/sys_openbsd3.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// The X versions of syscall expect the libc call to return a 64-bit result.
// Otherwise (the non-X version) expects a 32-bit result.
@@ -20,7 +23,7 @@ import "unsafe"
//go:cgo_unsafe_args
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -31,7 +34,7 @@ func syscall()
//go:cgo_unsafe_args
func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscallX)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -42,7 +45,7 @@ func syscallX()
//go:cgo_unsafe_args
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -53,7 +56,7 @@ func syscall6()
//go:cgo_unsafe_args
func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -64,7 +67,7 @@ func syscall6X()
//go:cgo_unsafe_args
func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall10)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -75,7 +78,7 @@ func syscall10()
//go:cgo_unsafe_args
func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -85,7 +88,7 @@ func syscall10X()
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
return
}
@@ -93,7 +96,7 @@ func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
return
}
@@ -101,7 +104,7 @@ func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintpt
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
return
}
@@ -109,6 +112,6 @@ func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintp
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
return
}
diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s
index 522e98cf4f..fc89ee6cbb 100644
--- a/src/runtime/sys_openbsd_amd64.s
+++ b/src/runtime/sys_openbsd_amd64.s
@@ -58,7 +58,7 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
RET
// Called using C ABI.
-TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
diff --git a/src/runtime/sys_plan9_386.s b/src/runtime/sys_plan9_386.s
index b3d2f1376d..bdcb98e19e 100644
--- a/src/runtime/sys_plan9_386.s
+++ b/src/runtime/sys_plan9_386.s
@@ -250,3 +250,7 @@ TEXT runtime·errstr(SB),NOSPLIT,$8-8
MOVL 0(SP), AX
MOVL AX, ret_base+0(FP)
RET
+
+// never called on this platform
+TEXT ·sigpanictramp(SB),NOSPLIT,$0-0
+ UNDEF
diff --git a/src/runtime/sys_plan9_amd64.s b/src/runtime/sys_plan9_amd64.s
index 731306ab44..39fc4c68e4 100644
--- a/src/runtime/sys_plan9_amd64.s
+++ b/src/runtime/sys_plan9_amd64.s
@@ -251,3 +251,7 @@ TEXT runtime·errstr(SB),NOSPLIT,$16-16
MOVQ 0(SP), AX
MOVQ AX, ret_base+0(FP)
RET
+
+// never called on this platform
+TEXT ·sigpanictramp(SB),NOSPLIT,$0-0
+ UNDEF
diff --git a/src/runtime/sys_wasm.go b/src/runtime/sys_wasm.go
index 057ed4ccd9..e6e7f471ee 100644
--- a/src/runtime/sys_wasm.go
+++ b/src/runtime/sys_wasm.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/sys"
"unsafe"
)
@@ -30,7 +31,7 @@ func wasmExit(code int32)
// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s
index 0b3933502a..cf3a439523 100644
--- a/src/runtime/sys_windows_386.s
+++ b/src/runtime/sys_windows_386.s
@@ -8,7 +8,7 @@
#include "time_windows.h"
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT,$0
MOVL fn+0(FP), BX
// SetLastError(0).
@@ -147,21 +147,21 @@ done:
BYTE $0xC2; WORD $4
RET // unreached; make assembler happy
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT,$0
MOVL $runtime·exceptionhandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT,$0-0
// is never called
INT $3
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT,$0-0
MOVL $runtime·lastcontinuehandler(SB), AX
JMP sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·callbackasm1(SB),NOSPLIT,$0
MOVL 0(SP), AX // will use to find our callback context
// remove return address from stack, we are not returning to callbackasm, but to its caller.
@@ -180,7 +180,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$0
CLD
// determine index into runtime·cbs table
- SUBL $runtime·callbackasm<ABIInternal>(SB), AX
+ SUBL $runtime·callbackasm(SB), AX
MOVL $0, DX
MOVL $5, BX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
DIVL BX
@@ -250,7 +250,7 @@ TEXT tstart<>(SB),NOSPLIT,$0
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
MOVL newm+0(FP), BX
PUSHL BX
diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s
index e7782846b2..6cc5bba2b7 100644
--- a/src/runtime/sys_windows_amd64.s
+++ b/src/runtime/sys_windows_amd64.s
@@ -13,7 +13,7 @@
#define maxargs 18
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
// asmcgocall will put first argument into CX.
PUSHQ CX // save for later
MOVQ libcall_fn(CX), AX
@@ -179,15 +179,15 @@ done:
RET
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
MOVQ $runtime·exceptionhandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
MOVQ $runtime·firstcontinuehandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
MOVQ $runtime·lastcontinuehandler(SB), AX
JMP sigtramp<>(SB)
@@ -212,7 +212,7 @@ TEXT runtime·callbackasm1(SB),NOSPLIT,$0
ADDQ $8, SP
// determine index into runtime·cbs table
- MOVQ $runtime·callbackasm<ABIInternal>(SB), DX
+ MOVQ $runtime·callbackasm(SB), DX
SUBQ DX, AX
MOVQ $0, DX
MOVQ $5, CX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
@@ -245,7 +245,7 @@ TEXT runtime·callbackasm1(SB),NOSPLIT,$0
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
// Switch from the host ABI to the Go ABI.
PUSH_REGS_HOST_TO_ABI0()
diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s
index 48f8c7dedf..c9e96cb652 100644
--- a/src/runtime/sys_windows_arm.s
+++ b/src/runtime/sys_windows_arm.s
@@ -10,7 +10,7 @@
// Note: For system ABI, R0-R3 are args, R4-R11 are callee-save.
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr}
MOVW R0, R4 // put libcall * in r4
MOVW R13, R5 // save stack pointer in r5
@@ -222,21 +222,21 @@ TEXT sigresume<>(SB),NOSPLIT|NOFRAME,$0
MOVW R0, R13
B (R1)
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·exceptionhandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·firstcontinuehandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·lastcontinuehandler(SB), R1
B sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm1(SB),NOSPLIT|NOFRAME,$0
// On entry, the trampoline in zcallback_windows_arm.s left
// the callback index in R12 (which is volatile in the C ABI).
@@ -275,7 +275,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
B (R12) // return
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT|NOFRAME,$0
MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr}
MOVW m_g0(R0), g
diff --git a/src/runtime/sys_windows_arm64.s b/src/runtime/sys_windows_arm64.s
index 7a2e11f5ae..44145c53fb 100644
--- a/src/runtime/sys_windows_arm64.s
+++ b/src/runtime/sys_windows_arm64.s
@@ -18,7 +18,7 @@
// load_g and save_g (in tls_arm64.s) clobber R27 (REGTMP) and R0.
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
STP.W (R29, R30), -32(RSP) // allocate C ABI stack frame
STP (R19, R20), 16(RSP) // save old R19, R20
MOVD R0, R19 // save libcall pointer
@@ -290,21 +290,21 @@ TEXT sigresume<>(SB),NOSPLIT|NOFRAME,$0
MOVD R0, RSP
B (R1)
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·exceptionhandler<ABIInternal>(SB), R1
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·exceptionhandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·firstcontinuehandler<ABIInternal>(SB), R1
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·firstcontinuehandler(SB), R1
B sigtramp<>(SB)
TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·lastcontinuehandler<ABIInternal>(SB), R1
+ MOVD $runtime·lastcontinuehandler(SB), R1
B sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$208-0
+TEXT runtime·callbackasm1(SB),NOSPLIT,$208-0
NO_LOCAL_POINTERS
// On entry, the trampoline in zcallback_windows_arm64.s left
@@ -339,7 +339,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$208-0
MOVD R0, callbackArgs_result(R13) // result
// Call cgocallback, which will call callbackWrap(frame).
- MOVD $·callbackWrap(SB), R0 // PC of function to call
+ MOVD $·callbackWrap<ABIInternal>(SB), R0 // PC of function to call, cgocallback takes an ABIInternal entry-point
MOVD R13, R1 // frame (&callbackArgs{...})
MOVD $0, R2 // context
MOVD R0, (1*8)(RSP)
@@ -356,7 +356,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$208-0
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$96-0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$96-0
SAVE_R19_TO_R28(-10*8)
MOVD m_g0(R0), g
diff --git a/src/runtime/sys_x86.go b/src/runtime/sys_x86.go
index 0866e3140e..856c73a2f6 100644
--- a/src/runtime/sys_x86.go
+++ b/src/runtime/sys_x86.go
@@ -8,7 +8,7 @@
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -16,7 +16,7 @@ import (
// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
diff --git a/src/runtime/syscall_solaris.go b/src/runtime/syscall_solaris.go
index 094516927f..15be8e1c61 100644
--- a/src/runtime/syscall_solaris.go
+++ b/src/runtime/syscall_solaris.go
@@ -35,6 +35,7 @@ func pipe1() // declared for vet; do NOT call
//go:nosplit
//go:linkname syscall_sysvicall6
+//go:cgo_unsafe_args
func syscall_sysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: fn,
@@ -49,6 +50,7 @@ func syscall_sysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err
//go:nosplit
//go:linkname syscall_rawsysvicall6
+//go:cgo_unsafe_args
func syscall_rawsysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: fn,
@@ -104,6 +106,7 @@ func syscall_dup2(oldfd, newfd uintptr) (val, err uintptr) {
//go:nosplit
//go:linkname syscall_execve
+//go:cgo_unsafe_args
func syscall_execve(path, argv, envp uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_execve)),
@@ -123,6 +126,7 @@ func syscall_exit(code uintptr) {
//go:nosplit
//go:linkname syscall_fcntl
+//go:cgo_unsafe_args
func syscall_fcntl(fd, cmd, arg uintptr) (val, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_fcntl)),
@@ -181,6 +185,7 @@ func syscall_getpid() (pid, err uintptr) {
//go:nosplit
//go:linkname syscall_ioctl
+//go:cgo_unsafe_args
func syscall_ioctl(fd, req, arg uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_ioctl)),
@@ -234,6 +239,7 @@ func syscall_setgid(gid uintptr) (err uintptr) {
//go:nosplit
//go:linkname syscall_setgroups
+//go:cgo_unsafe_args
func syscall_setgroups(ngid, gid uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_setgroups)),
@@ -270,6 +276,7 @@ func syscall_setuid(uid uintptr) (err uintptr) {
//go:nosplit
//go:linkname syscall_setpgid
+//go:cgo_unsafe_args
func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_setpgid)),
@@ -281,6 +288,7 @@ func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
}
//go:linkname syscall_syscall
+//go:cgo_unsafe_args
func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_syscall)),
@@ -294,6 +302,7 @@ func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
}
//go:linkname syscall_wait4
+//go:cgo_unsafe_args
func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.Pointer) (wpid int, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_wait4)),
@@ -308,6 +317,7 @@ func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.
//go:nosplit
//go:linkname syscall_write
+//go:cgo_unsafe_args
func syscall_write(fd, buf, nbyte uintptr) (n, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_write)),
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index 4763a440e7..e872d74e97 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -6,7 +6,7 @@ package runtime
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
@@ -73,7 +73,7 @@ type abiDesc struct {
}
func (p *abiDesc) assignArg(t *_type) {
- if t.size > sys.PtrSize {
+ if t.size > goarch.PtrSize {
// We don't support this right now. In
// stdcall/cdecl, 64-bit ints and doubles are
// passed as two words (little endian); and
@@ -146,7 +146,7 @@ func (p *abiDesc) assignArg(t *_type) {
// cdecl, stdcall, fastcall, and arm pad arguments to word size.
// TODO(rsc): On arm and arm64 do we need to skip the caller's saved LR?
- p.srcStackSize += sys.PtrSize
+ p.srcStackSize += goarch.PtrSize
}
// tryRegAssignArg tries to register-assign a value of type t.
@@ -162,7 +162,7 @@ func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool {
return p.assignReg(t.size, offset)
case kindInt64, kindUint64:
// Only register-assign if the registers are big enough.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
return p.assignReg(t.size, offset)
}
case kindArray:
@@ -232,10 +232,10 @@ func callbackasmAddr(i int) uintptr {
// followed by a branch instruction
entrySize = 8
}
- return funcPC(callbackasm) + uintptr(i*entrySize)
+ return abi.FuncPCABI0(callbackasm) + uintptr(i*entrySize)
}
-const callbackMaxFrame = 64 * sys.PtrSize
+const callbackMaxFrame = 64 * goarch.PtrSize
// compileCallback converts a Go function fn into a C function pointer
// that can be passed to Windows APIs.
@@ -263,13 +263,13 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) {
}
// The Go ABI aligns the result to the word size. src is
// already aligned.
- abiMap.dstStackSize = alignUp(abiMap.dstStackSize, sys.PtrSize)
+ abiMap.dstStackSize = alignUp(abiMap.dstStackSize, goarch.PtrSize)
abiMap.retOffset = abiMap.dstStackSize
if len(ft.out()) != 1 {
panic("compileCallback: expected function with one uintptr-sized result")
}
- if ft.out()[0].size != sys.PtrSize {
+ if ft.out()[0].size != goarch.PtrSize {
panic("compileCallback: expected function with one uintptr-sized result")
}
if k := ft.out()[0].kind & kindMask; k == kindFloat32 || k == kindFloat64 {
@@ -282,12 +282,12 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) {
// Make room for the uintptr-sized result.
// If there are argument registers, the return value will
// be passed in the first register.
- abiMap.dstStackSize += sys.PtrSize
+ abiMap.dstStackSize += goarch.PtrSize
}
// TODO(mknyszek): Remove dstSpill from this calculation when we no longer have
// caller reserved spill space.
- frameSize := alignUp(abiMap.dstStackSize, sys.PtrSize)
+ frameSize := alignUp(abiMap.dstStackSize, goarch.PtrSize)
frameSize += abiMap.dstSpill
if frameSize > callbackMaxFrame {
panic("compileCallback: function argument frame too large")
@@ -370,7 +370,7 @@ func callbackWrap(a *callbackArgs) {
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
- frameSize := alignUp(c.abiMap.dstStackSize, sys.PtrSize)
+ frameSize := alignUp(c.abiMap.dstStackSize, goarch.PtrSize)
frameSize += c.abiMap.dstSpill
// Even though this is copying back results, we can pass a nil
diff --git a/src/runtime/time.go b/src/runtime/time.go
index 666b242316..ad267c3365 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -7,6 +7,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -819,7 +820,7 @@ func runOneTimer(pp *p, t *timer, now int64) {
if raceenabled {
ppcur := getg().m.p.ptr()
if ppcur.timerRaceCtx == 0 {
- ppcur.timerRaceCtx = racegostart(funcPC(runtimer) + sys.PCQuantum)
+ ppcur.timerRaceCtx = racegostart(abi.FuncPCABIInternal(runtimer) + sys.PCQuantum)
}
raceacquirectx(ppcur.timerRaceCtx, unsafe.Pointer(t))
}
diff --git a/src/runtime/time_linux_amd64.s b/src/runtime/time_linux_amd64.s
index 0dd7919896..c88e92bd0c 100644
--- a/src/runtime/time_linux_amd64.s
+++ b/src/runtime/time_linux_amd64.s
@@ -15,13 +15,7 @@
TEXT time·now(SB),NOSPLIT,$16-24
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), BX // BX unchanged by C code.
-#else
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), BX // BX unchanged by C code.
-#endif
// Store CLOCK_REALTIME results directly to return space.
LEAQ sec+0(FP), SI
@@ -38,11 +32,7 @@ TEXT time·now(SB),NOSPLIT,$16-24
MOVQ CX, m_vdsoPC(BX)
MOVQ SI, m_vdsoSP(BX)
-#ifdef GOEXPERIMENT_regabig
CMPQ R14, m_curg(BX) // Only switch if on curg.
-#else
- CMPQ AX, m_curg(BX) // Only switch if on curg.
-#endif
JNE noswitch
MOVQ m_g0(BX), DX
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 1530178c85..00544e4283 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -13,6 +13,7 @@
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -85,7 +86,7 @@ const (
// and ppc64le.
// Tracing won't work reliably for architectures where cputicks is emulated
// by nanotime, so the value doesn't matter for those architectures.
- traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
+ traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
// Maximum number of PCs in a single stack trace.
// Since events contain only stack id rather than whole stack trace,
// we can allow quite large values here.
@@ -829,7 +830,7 @@ Search:
// newStack allocates a new stack of size n.
func (tab *traceStackTable) newStack(n int) *traceStack {
- return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
+ return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
}
// allFrames returns all of the Frames corresponding to pcs.
@@ -929,7 +930,7 @@ type traceAlloc struct {
//go:notinheap
type traceAllocBlock struct {
next traceAllocBlockPtr
- data [64<<10 - sys.PtrSize]byte
+ data [64<<10 - goarch.PtrSize]byte
}
// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
@@ -940,7 +941,7 @@ func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(u
// alloc allocates n-byte block.
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
- n = alignUp(n, sys.PtrSize)
+ n = alignUp(n, goarch.PtrSize)
if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
if n > uintptr(len(a.head.ptr().data)) {
throw("trace: alloc too large")
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 814c323634..addfa6faac 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -6,6 +6,7 @@ package runtime
import (
"internal/bytealg"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -21,41 +22,6 @@ import (
const usesLR = sys.MinFrameSize > 0
-// Traceback over the deferred function calls.
-// Report them like calls that have been invoked but not started executing yet.
-func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) {
- var frame stkframe
- for d := gp._defer; d != nil; d = d.link {
- fn := d.fn
- if fn == nil {
- // Defer of nil function. Args don't matter.
- frame.pc = 0
- frame.fn = funcInfo{}
- frame.argp = 0
- frame.arglen = 0
- frame.argmap = nil
- } else {
- frame.pc = fn.fn
- f := findfunc(frame.pc)
- if !f.valid() {
- print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
- throw("unknown pc")
- }
- frame.fn = f
- frame.argp = uintptr(deferArgs(d))
- var ok bool
- frame.arglen, frame.argmap, ok = getArgInfoFast(f, true)
- if !ok {
- frame.arglen, frame.argmap = getArgInfo(&frame, f, true, fn)
- }
- }
- frame.continpc = frame.pc
- if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
- return
- }
- }
-}
-
// Generic traceback. Handles runtime stack prints (pcbuf == nil),
// the runtime.Callers function (pcbuf != nil), as well as the garbage
// collector (callback != nil). A little clunky to merge these, but avoids
@@ -126,7 +92,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.lr = 0
} else {
frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
- frame.sp += sys.PtrSize
+ frame.sp += goarch.PtrSize
}
}
@@ -207,7 +173,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache))
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.fp += sys.PtrSize
+ frame.fp += goarch.PtrSize
}
}
var flr funcInfo
@@ -248,7 +214,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
}
} else {
if frame.lr == 0 {
- lrPtr = frame.fp - sys.PtrSize
+ lrPtr = frame.fp - goarch.PtrSize
frame.lr = uintptr(*(*uintptr)(unsafe.Pointer(lrPtr)))
}
}
@@ -279,7 +245,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.varp = frame.fp
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.varp -= sys.PtrSize
+ frame.varp -= goarch.PtrSize
}
// For architectures with frame pointers, if there's
@@ -300,7 +266,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
// And it happens to end up mimicking the x86 layout.
// Other architectures may make different decisions.
if frame.varp > frame.sp && framepointer_enabled {
- frame.varp -= sys.PtrSize
+ frame.varp -= goarch.PtrSize
}
// Derive size of arguments.
@@ -603,7 +569,7 @@ func printArgs(f funcInfo, argp unsafe.Pointer) {
// mask out irrelavant bits
if sz < 8 {
shift := 64 - sz*8
- if sys.BigEndian {
+ if goarch.BigEndian {
x = x >> shift
} else {
x = x << shift >> shift
@@ -700,16 +666,16 @@ func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (ar
// Figure out whether the return values are valid.
// Reflect will update this value after it copies
// in the return values.
- retValid = *(*bool)(unsafe.Pointer(arg0 + 4*sys.PtrSize))
+ retValid = *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
}
if mv.fn != f.entry {
print("runtime: confused by ", funcname(f), "\n")
throw("reflect mismatch")
}
bv := mv.stack
- arglen = uintptr(bv.n * sys.PtrSize)
+ arglen = uintptr(bv.n * goarch.PtrSize)
if !retValid {
- arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1)
+ arglen = uintptr(mv.argLen) &^ (goarch.PtrSize - 1)
}
argmap = bv
}
@@ -1045,8 +1011,8 @@ func tracebackothers(me *g) {
// for debugging purposes. If the address bad is included in the
// hexdumped range, it will mark it as well.
func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
- const expand = 32 * sys.PtrSize
- const maxExpand = 256 * sys.PtrSize
+ const expand = 32 * goarch.PtrSize
+ const maxExpand = 256 * goarch.PtrSize
// Start around frame.sp.
lo, hi := frame.sp, frame.sp
// Expand to include frame.fp.
diff --git a/src/runtime/type.go b/src/runtime/type.go
index 4039273695..ad01d5095e 100644
--- a/src/runtime/type.go
+++ b/src/runtime/type.go
@@ -6,7 +6,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// tflag is documented in reflect/type.go.
//
@@ -262,7 +265,7 @@ func (t *_type) textOff(off textOff) unsafe.Pointer {
if off == -1 {
// -1 is the sentinel value for unreachable code.
// See cmd/link/internal/ld/data.go:relocsym.
- return unsafe.Pointer(funcPC(unreachableMethod))
+ return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
}
base := uintptr(unsafe.Pointer(t))
var md *moduledata
diff --git a/src/runtime/wincallback.go b/src/runtime/wincallback.go
index a7a787d8f6..73f1e567ce 100644
--- a/src/runtime/wincallback.go
+++ b/src/runtime/wincallback.go
@@ -33,7 +33,7 @@ func genasm386Amd64() {
// CALL instruction in runtime·callbackasm. This determines
// which Go callback function is executed later on.
-TEXT runtime·callbackasm<ABIInternal>(SB),7,$0
+TEXT runtime·callbackasm(SB),7,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString("\tCALL\truntime·callbackasm1(SB)\n")
@@ -61,7 +61,7 @@ func genasmArm() {
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString(fmt.Sprintf("\tMOVW\t$%d, R12\n", i))
@@ -89,7 +89,7 @@ func genasmArm64() {
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString(fmt.Sprintf("\tMOVD\t$%d, R12\n", i))
diff --git a/src/runtime/zcallback_windows.s b/src/runtime/zcallback_windows.s
index e451c2b9d0..561527c90d 100644
--- a/src/runtime/zcallback_windows.s
+++ b/src/runtime/zcallback_windows.s
@@ -11,7 +11,7 @@
// CALL instruction in runtime·callbackasm. This determines
// which Go callback function is executed later on.
-TEXT runtime·callbackasm<ABIInternal>(SB),7,$0
+TEXT runtime·callbackasm(SB),7,$0
CALL runtime·callbackasm1(SB)
CALL runtime·callbackasm1(SB)
CALL runtime·callbackasm1(SB)
diff --git a/src/runtime/zcallback_windows_arm.s b/src/runtime/zcallback_windows_arm.s
index a73a813acb..f943d84cbf 100644
--- a/src/runtime/zcallback_windows_arm.s
+++ b/src/runtime/zcallback_windows_arm.s
@@ -9,7 +9,7 @@
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
MOVW $0, R12
B runtime·callbackasm1(SB)
MOVW $1, R12
diff --git a/src/runtime/zcallback_windows_arm64.s b/src/runtime/zcallback_windows_arm64.s
index 2a6bda0990..69fb05788c 100644
--- a/src/runtime/zcallback_windows_arm64.s
+++ b/src/runtime/zcallback_windows_arm64.s
@@ -9,7 +9,7 @@
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
MOVD $0, R12
B runtime·callbackasm1(SB)
MOVD $1, R12
diff --git a/test/chan/perm.go b/test/chan/perm.go
index 4c94ab7ffa..04046723a4 100644
--- a/test/chan/perm.go
+++ b/test/chan/perm.go
@@ -66,5 +66,5 @@ func main() {
close(c)
close(cs)
close(cr) // ERROR "receive"
- close(n) // ERROR "invalid operation.*non-chan type|must be channel|not a channel"
+ close(n) // ERROR "invalid operation.*non-chan type|must be channel|non-channel"
}
diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go
index a27a17f6e1..eb0f338036 100644
--- a/test/codegen/arithmetic.go
+++ b/test/codegen/arithmetic.go
@@ -202,7 +202,7 @@ func ConstDivs(n1 uint, n2 int) (uint, int) {
// amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ"
// 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL"
- // arm64:`MOVD`,`SMULH`,-`DIV`
+ // arm64:`SMULH`,-`DIV`
// arm:`MOVW`,`MUL`,-`.*udiv`
b := n2 / 17 // signed
@@ -266,7 +266,7 @@ func ConstMods(n1 uint, n2 int) (uint, int) {
// amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ"
// 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL"
- // arm64:`MOVD`,`SMULH`,-`DIV`
+ // arm64:`SMULH`,-`DIV`
// arm:`MOVW`,`MUL`,-`.*udiv`
b := n2 % 17 // signed
diff --git a/test/codegen/clobberdead.go b/test/codegen/clobberdead.go
index f8d964cba6..c490790bb6 100644
--- a/test/codegen/clobberdead.go
+++ b/test/codegen/clobberdead.go
@@ -1,6 +1,6 @@
// asmcheck -gcflags=-clobberdead
-// +build amd64
+// +build amd64 arm64
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -13,15 +13,18 @@ type T [2]*int // contain pointer, not SSA-able (so locals are not registerized)
var p1, p2, p3 T
func F() {
- // 3735936685 is 0xdeaddead
+ // 3735936685 is 0xdeaddead. On ARM64 R27 is REGTMP.
// clobber x, y at entry. not clobber z (stack object).
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`, -`MOVL\t\$3735936685, ""\.z`
+ // arm64:`MOVW\tR27, ""\.x`, `MOVW\tR27, ""\.y`, -`MOVW\tR27, ""\.z`
x, y, z := p1, p2, p3
addrTaken(&z)
// x is dead at the call (the value of x is loaded before the CALL), y is not
// amd64:`MOVL\t\$3735936685, ""\.x`, -`MOVL\t\$3735936685, ""\.y`
+ // arm64:`MOVW\tR27, ""\.x`, -`MOVW\tR27, ""\.y`
use(x)
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`
+ // arm64:`MOVW\tR27, ""\.x`, `MOVW\tR27, ""\.y`
use(y)
}
diff --git a/test/complit1.go b/test/complit1.go
index 7c2a4e2996..8cbcd63ee0 100644
--- a/test/complit1.go
+++ b/test/complit1.go
@@ -46,20 +46,20 @@ var (
_ = &T{0, 0, "", nil} // ok
_ = &T{i: 0, f: 0, s: "", next: {}} // ERROR "missing type in composite literal|omit types within composite literal"
_ = &T{0, 0, "", {}} // ERROR "missing type in composite literal|omit types within composite literal"
- _ = TP{i: 0, f: 0, s: "", next: {}} // ERROR "invalid composite literal type TP|omit types within composite literal"
+ _ = TP{i: 0, f: 0, s: ""} // ERROR "invalid composite literal type TP"
_ = &Ti{} // ERROR "invalid composite literal type Ti|expected.*type for composite literal"
)
type M map[T]T
var (
- _ = M{{i:1}: {i:2}}
- _ = M{T{i:1}: {i:2}}
- _ = M{{i:1}: T{i:2}}
- _ = M{T{i:1}: T{i:2}}
+ _ = M{{i: 1}: {i: 2}}
+ _ = M{T{i: 1}: {i: 2}}
+ _ = M{{i: 1}: T{i: 2}}
+ _ = M{T{i: 1}: T{i: 2}}
)
-type S struct { s [1]*M1 }
+type S struct{ s [1]*M1 }
type M1 map[S]int
-var _ = M1{{s:[1]*M1{&M1{{}:1}}}:2}
+var _ = M1{{s: [1]*M1{&M1{{}: 1}}}: 2}
diff --git a/test/ddd1.go b/test/ddd1.go
index ad49b347f4..f7381b7c94 100644
--- a/test/ddd1.go
+++ b/test/ddd1.go
@@ -17,8 +17,8 @@ var (
_ = sum(1, 2, 3)
_ = sum()
_ = sum(1.0, 2.0)
- _ = sum(1.5) // ERROR "integer"
- _ = sum("hello") // ERROR ".hello. .type untyped string. as type int|incompatible"
+ _ = sum(1.5) // ERROR "1\.5 .untyped float constant. as int|integer"
+ _ = sum("hello") // ERROR ".hello. (.untyped string constant. as int|.type untyped string. as type int)|incompatible"
_ = sum([]int{1}) // ERROR "\[\]int{...}.*as type int|incompatible"
)
@@ -27,9 +27,9 @@ func tuple() (int, int, int) { return 1, 2, 3 }
var (
_ = sum(tuple())
- _ = sum(tuple()...) // ERROR "multiple-value"
+ _ = sum(tuple()...) // ERROR "\.{3} with 3-valued|multiple-value"
_ = sum3(tuple())
- _ = sum3(tuple()...) // ERROR "multiple-value" ERROR "invalid use of .*[.][.][.]"
+ _ = sum3(tuple()...) // ERROR "\.{3} in call to non-variadic|multiple-value|invalid use of .*[.][.][.]"
)
type T []T
@@ -60,5 +60,5 @@ func bad(args ...int) {
_ = [...]byte("foo") // ERROR "[.][.][.]"
_ = [...][...]int{{1,2,3},{4,5,6}} // ERROR "[.][.][.]"
- Foo(x...) // ERROR "invalid use of .*[.][.][.]"
+ Foo(x...) // ERROR "\.{3} in call to non-variadic|invalid use of .*[.][.][.]"
}
diff --git a/test/devirt.go b/test/devirt.go
index e0149d8229..d5c815222e 100644
--- a/test/devirt.go
+++ b/test/devirt.go
@@ -31,9 +31,8 @@ func main() {
panic("not 3")
}
- // Can't do types that aren't "direct" interfaces (yet).
r = indirectiface{3, 4, 5}
- if r.Value() != 12 {
+ if r.Value() != 12 { // ERROR "de-virtualizing call$"
panic("not 12")
}
}
diff --git a/test/escape2.go b/test/escape2.go
index b9b723d866..e3e5904cde 100644
--- a/test/escape2.go
+++ b/test/escape2.go
@@ -59,7 +59,7 @@ func foo8(xx, yy *int) int { // ERROR "xx does not escape$" "yy does not escape$
return *xx
}
-func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r2 level=0$" "leaking param: yy to result ~r2 level=0$"
+func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r0 level=0$" "leaking param: yy to result ~r0 level=0$"
xx = yy
return xx
}
@@ -343,11 +343,11 @@ func indaddr1(x int) *int { // ERROR "moved to heap: x$"
return &x
}
-func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *&x
}
-func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *(**int)(unsafe.Pointer(&x))
}
@@ -374,11 +374,11 @@ func float64bitsptr(f float64) *uint64 { // ERROR "moved to heap: f$"
return (*uint64)(unsafe.Pointer(&f))
}
-func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r1 level=0$"
+func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(f))
}
-func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch val := i.(type) {
case *int:
return val
@@ -389,7 +389,7 @@ func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level
return nil
}
-func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch j := i; *j + 110 {
case 12:
return j
@@ -401,7 +401,7 @@ func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
}
// assigning to an array element is like assigning to the array
-func foo60(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo60(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
var a [12]*int
a[0] = i
return a[1]
@@ -414,7 +414,7 @@ func foo60a(i *int) *int { // ERROR "i does not escape$"
}
// assigning to a struct field is like assigning to the struct
-func foo61(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo61(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
type S struct {
a, b *int
}
@@ -611,11 +611,11 @@ func foo74c() {
}
}
-func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r2 level=0$" "x does not escape$"
+func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r0 level=0$" "x does not escape$"
return y
}
-func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r2 level=0$" "y does not escape$"
+func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r0 level=0$" "y does not escape$"
return &x[0]
}
@@ -667,13 +667,13 @@ func foo76e() {
func foo76f() {
for {
// TODO: This one really only escapes its scope, but we don't distinguish yet.
- defer myprint(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
func foo76g() {
for {
- defer myprint1(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint1(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
@@ -770,7 +770,7 @@ func foo91(x *int) map[*int]*int { // ERROR "leaking param: x$"
return map[*int]*int{x: nil} // ERROR "map\[\*int\]\*int{...} escapes to heap$"
}
-func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r0 level=0$"
return [2]*int{x, nil}
}
@@ -783,7 +783,7 @@ func foo93(c chan *int) *int { // ERROR "c does not escape$"
}
// does not leak m
-func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r2 level=1"
+func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r0 level=1"
for k, v := range m {
if b {
return k
@@ -799,12 +799,12 @@ func foo95(m map[*int]*int, x *int) { // ERROR "m does not escape$" "leaking par
}
// does not leak m but does leak content
-func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
return m[0]
}
// does leak m
-func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[0]
}
@@ -814,12 +814,12 @@ func foo98(m map[int]*int) *int { // ERROR "m does not escape$"
}
// does leak m
-func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[:]
}
// does not leak m
-func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
for _, v := range m {
return v
}
@@ -827,7 +827,7 @@ func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
}
// does leak m
-func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
for _, v := range m {
return v
}
@@ -890,27 +890,27 @@ func foo110(x *int) *int { // ERROR "leaking param: x$"
return m[nil]
}
-func foo111(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0"
+func foo111(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0"
m := []*int{x} // ERROR "\[\]\*int{...} does not escape$"
return m[0]
}
-func foo112(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo112(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := [1]*int{x}
return m[0]
}
-func foo113(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo113(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := Bar{ii: x}
return m.ii
}
-func foo114(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo114(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := &Bar{ii: x} // ERROR "&Bar{...} does not escape$"
return m.ii
}
-func foo115(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo115(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*int)(unsafe.Pointer(uintptr(unsafe.Pointer(x)) + 1))
}
@@ -1148,16 +1148,16 @@ L100:
func foo121() {
for i := 0; i < 10; i++ {
- defer myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
// same as foo121 but check across import
func foo121b() {
for i := 0; i < 10; i++ {
- defer fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
diff --git a/test/escape2n.go b/test/escape2n.go
index 7c8208aa73..57cc1a0163 100644
--- a/test/escape2n.go
+++ b/test/escape2n.go
@@ -59,7 +59,7 @@ func foo8(xx, yy *int) int { // ERROR "xx does not escape$" "yy does not escape$
return *xx
}
-func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r2 level=0$" "leaking param: yy to result ~r2 level=0$"
+func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r0 level=0$" "leaking param: yy to result ~r0 level=0$"
xx = yy
return xx
}
@@ -343,11 +343,11 @@ func indaddr1(x int) *int { // ERROR "moved to heap: x$"
return &x
}
-func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *&x
}
-func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *(**int)(unsafe.Pointer(&x))
}
@@ -374,11 +374,11 @@ func float64bitsptr(f float64) *uint64 { // ERROR "moved to heap: f$"
return (*uint64)(unsafe.Pointer(&f))
}
-func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r1 level=0$"
+func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(f))
}
-func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch val := i.(type) {
case *int:
return val
@@ -389,7 +389,7 @@ func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level
return nil
}
-func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch j := i; *j + 110 {
case 12:
return j
@@ -401,7 +401,7 @@ func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
}
// assigning to an array element is like assigning to the array
-func foo60(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo60(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
var a [12]*int
a[0] = i
return a[1]
@@ -414,7 +414,7 @@ func foo60a(i *int) *int { // ERROR "i does not escape$"
}
// assigning to a struct field is like assigning to the struct
-func foo61(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo61(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
type S struct {
a, b *int
}
@@ -611,11 +611,11 @@ func foo74c() {
}
}
-func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r2 level=0$" "x does not escape$"
+func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r0 level=0$" "x does not escape$"
return y
}
-func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r2 level=0$" "y does not escape$"
+func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r0 level=0$" "y does not escape$"
return &x[0]
}
@@ -667,13 +667,13 @@ func foo76e() {
func foo76f() {
for {
// TODO: This one really only escapes its scope, but we don't distinguish yet.
- defer myprint(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
func foo76g() {
for {
- defer myprint1(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint1(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
@@ -770,7 +770,7 @@ func foo91(x *int) map[*int]*int { // ERROR "leaking param: x$"
return map[*int]*int{x: nil} // ERROR "map\[\*int\]\*int{...} escapes to heap$"
}
-func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r0 level=0$"
return [2]*int{x, nil}
}
@@ -783,7 +783,7 @@ func foo93(c chan *int) *int { // ERROR "c does not escape$"
}
// does not leak m
-func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r2 level=1"
+func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r0 level=1"
for k, v := range m {
if b {
return k
@@ -799,12 +799,12 @@ func foo95(m map[*int]*int, x *int) { // ERROR "m does not escape$" "leaking par
}
// does not leak m but does leak content
-func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
return m[0]
}
// does leak m
-func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[0]
}
@@ -814,12 +814,12 @@ func foo98(m map[int]*int) *int { // ERROR "m does not escape$"
}
// does leak m
-func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[:]
}
// does not leak m
-func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
for _, v := range m {
return v
}
@@ -827,7 +827,7 @@ func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
}
// does leak m
-func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
for _, v := range m {
return v
}
@@ -890,27 +890,27 @@ func foo110(x *int) *int { // ERROR "leaking param: x$"
return m[nil]
}
-func foo111(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0"
+func foo111(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0"
m := []*int{x} // ERROR "\[\]\*int{...} does not escape$"
return m[0]
}
-func foo112(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo112(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := [1]*int{x}
return m[0]
}
-func foo113(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo113(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := Bar{ii: x}
return m.ii
}
-func foo114(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo114(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := &Bar{ii: x} // ERROR "&Bar{...} does not escape$"
return m.ii
}
-func foo115(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo115(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*int)(unsafe.Pointer(uintptr(unsafe.Pointer(x)) + 1))
}
@@ -1148,16 +1148,16 @@ L100:
func foo121() {
for i := 0; i < 10; i++ {
- defer myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
// same as foo121 but check across import
func foo121b() {
for i := 0; i < 10; i++ {
- defer fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
diff --git a/test/escape5.go b/test/escape5.go
index 82be2c38e7..089130dad5 100644
--- a/test/escape5.go
+++ b/test/escape5.go
@@ -22,19 +22,19 @@ func leaktoret(p *int) *int { // ERROR "leaking param: p to result"
return p
}
-func leaktoret2(p *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: p to result ~r2"
+func leaktoret2(p *int) (*int, *int) { // ERROR "leaking param: p to result ~r0" "leaking param: p to result ~r1"
return p, p
}
-func leaktoret22(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r2" "leaking param: q to result ~r3"
+func leaktoret22(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r0" "leaking param: q to result ~r1"
return p, q
}
-func leaktoret22b(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r3" "leaking param: q to result ~r2"
+func leaktoret22b(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: q to result ~r0"
return leaktoret22(q, p)
}
-func leaktoret22c(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r3" "leaking param: q to result ~r2"
+func leaktoret22c(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: q to result ~r0"
r, s := leaktoret22(q, p)
return r, s
}
@@ -173,15 +173,14 @@ type U int
func (*U) M() {}
func (_ *U) N() {}
-func _() {
+func fbad24305a() {
var u U
u.M()
u.N()
}
-func fbad24305() {
- // BAD u should not be heap allocated
- var u U // ERROR "moved to heap: u"
+func fbad24305b() {
+ var u U
(*U).M(&u)
(*U).N(&u)
}
diff --git a/test/escape_array.go b/test/escape_array.go
index 0d07fd861f..83062c9436 100644
--- a/test/escape_array.go
+++ b/test/escape_array.go
@@ -12,15 +12,15 @@ var Ssink *string
type U [2]*string
-func bar(a, b *string) U { // ERROR "leaking param: a to result ~r2 level=0$" "leaking param: b to result ~r2 level=0$"
+func bar(a, b *string) U { // ERROR "leaking param: a to result ~r0 level=0$" "leaking param: b to result ~r0 level=0$"
return U{a, b}
}
-func foo(x U) U { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo(x U) U { // ERROR "leaking param: x to result ~r0 level=0$"
return U{x[1], x[0]}
}
-func bff(a, b *string) U { // ERROR "leaking param: a to result ~r2 level=0$" "leaking param: b to result ~r2 level=0$"
+func bff(a, b *string) U { // ERROR "leaking param: a to result ~r0 level=0$" "leaking param: b to result ~r0 level=0$"
return foo(foo(bar(a, b)))
}
@@ -41,27 +41,27 @@ func tbff2() *string {
return u[1]
}
-func car(x U) *string { // ERROR "leaking param: x to result ~r1 level=0$"
+func car(x U) *string { // ERROR "leaking param: x to result ~r0 level=0$"
return x[0]
}
// BAD: need fine-grained analysis to track x[0] and x[1] differently.
-func fun(x U, y *string) *string { // ERROR "leaking param: x to result ~r2 level=0$" "leaking param: y to result ~r2 level=0$"
+func fun(x U, y *string) *string { // ERROR "leaking param: x to result ~r0 level=0$" "leaking param: y to result ~r0 level=0$"
x[0] = y
return x[1]
}
-func fup(x *U, y *string) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param: y$"
+func fup(x *U, y *string) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param: y$"
x[0] = y // leaking y to heap is intended
return x[1]
}
-func fum(x *U, y **string) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param content: y$"
+func fum(x *U, y **string) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param content: y$"
x[0] = *y
return x[1]
}
-func fuo(x *U, y *U) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param content: y$"
+func fuo(x *U, y *U) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param content: y$"
x[0] = y[0]
return x[1]
}
diff --git a/test/escape_calls.go b/test/escape_calls.go
index 9e1db5426e..aa7c7f516c 100644
--- a/test/escape_calls.go
+++ b/test/escape_calls.go
@@ -11,7 +11,7 @@
package foo
-func f(buf []byte) []byte { // ERROR "leaking param: buf to result ~r1 level=0$"
+func f(buf []byte) []byte { // ERROR "leaking param: buf to result ~r0 level=0$"
return buf
}
diff --git a/test/escape_closure.go b/test/escape_closure.go
index 9152319fe0..bd6c025476 100644
--- a/test/escape_closure.go
+++ b/test/escape_closure.go
@@ -44,7 +44,7 @@ func ClosureCallArgs3() {
func ClosureCallArgs4() {
x := 0
- _ = func(p *int) *int { // ERROR "leaking param: p to result ~r1" "func literal does not escape"
+ _ = func(p *int) *int { // ERROR "leaking param: p to result ~r0" "func literal does not escape"
return p
}(&x)
}
@@ -111,7 +111,7 @@ func ClosureCallArgs11() {
func ClosureCallArgs12() {
x := 0
- defer func(p *int) *int { // ERROR "leaking param: p to result ~r1" "func literal does not escape"
+ defer func(p *int) *int { // ERROR "leaking param: p to result ~r0" "func literal does not escape"
return p
}(&x)
}
@@ -126,7 +126,7 @@ func ClosureCallArgs13() {
func ClosureCallArgs14() {
x := 0
p := &x
- _ = func(p **int) *int { // ERROR "leaking param: p to result ~r1 level=1" "func literal does not escape"
+ _ = func(p **int) *int { // ERROR "leaking param: p to result ~r0 level=1" "func literal does not escape"
return *p
}(&p)
}
@@ -145,7 +145,7 @@ func ClosureLeak1(s string) string { // ERROR "s does not escape"
}
// See #14409 -- returning part of captured var leaks it.
-func ClosureLeak1a(a ...string) string { // ERROR "leaking param: a to result ~r1 level=1$"
+func ClosureLeak1a(a ...string) string { // ERROR "leaking param: a to result ~r0 level=1$"
return func() string { // ERROR "func literal does not escape"
return a[0]
}()
diff --git a/test/escape_goto.go b/test/escape_goto.go
index f024a9afe3..90da5a2151 100644
--- a/test/escape_goto.go
+++ b/test/escape_goto.go
@@ -10,7 +10,7 @@ package escape
var x bool
-func _() {
+func f1() {
var p *int
loop:
if x {
@@ -22,7 +22,7 @@ loop:
_ = p
}
-func _() {
+func f2() {
var p *int
if x {
loop:
@@ -33,7 +33,7 @@ func _() {
_ = p
}
-func _() {
+func f3() {
var p *int
if x {
loop:
diff --git a/test/escape_param.go b/test/escape_param.go
index dc93f689cf..b630bae88f 100644
--- a/test/escape_param.go
+++ b/test/escape_param.go
@@ -16,7 +16,7 @@ func zero() int { return 0 }
var sink interface{}
// in -> out
-func param0(p *int) *int { // ERROR "leaking param: p to result ~r1"
+func param0(p *int) *int { // ERROR "leaking param: p to result ~r0"
return p
}
@@ -31,7 +31,7 @@ func caller0b() {
}
// in, in -> out, out
-func param1(p1, p2 *int) (*int, *int) { // ERROR "leaking param: p1 to result ~r2" "leaking param: p2 to result ~r3"
+func param1(p1, p2 *int) (*int, *int) { // ERROR "leaking param: p1 to result ~r0" "leaking param: p2 to result ~r1"
return p1, p2
}
@@ -222,7 +222,7 @@ func caller8() {
}
// *in -> out
-func param9(p ***int) **int { // ERROR "leaking param: p to result ~r1 level=1"
+func param9(p ***int) **int { // ERROR "leaking param: p to result ~r0 level=1"
return *p
}
@@ -241,7 +241,7 @@ func caller9b() {
}
// **in -> out
-func param10(p ***int) *int { // ERROR "leaking param: p to result ~r1 level=2"
+func param10(p ***int) *int { // ERROR "leaking param: p to result ~r0 level=2"
return **p
}
@@ -436,6 +436,6 @@ func param14a(x [4]*int) interface{} { // ERROR "leaking param: x$"
// Convert to a direct interface, does not need an allocation.
// So x only leaks to result.
-func param14b(x *int) interface{} { // ERROR "leaking param: x to result ~r1 level=0"
+func param14b(x *int) interface{} { // ERROR "leaking param: x to result ~r0 level=0"
return x
}
diff --git a/test/escape_runtime_atomic.go b/test/escape_runtime_atomic.go
index 62e8fede27..30d1d0c0c1 100644
--- a/test/escape_runtime_atomic.go
+++ b/test/escape_runtime_atomic.go
@@ -13,8 +13,8 @@ import (
"unsafe"
)
-// BAD: should always be "leaking param: addr to result ~r1 level=1$".
-func Loadp(addr unsafe.Pointer) unsafe.Pointer { // ERROR "leaking param: addr( to result ~r1 level=1)?$"
+// BAD: should always be "leaking param: addr to result ~r0 level=1$".
+func Loadp(addr unsafe.Pointer) unsafe.Pointer { // ERROR "leaking param: addr( to result ~r0 level=1)?$"
return atomic.Loadp(addr)
}
diff --git a/test/escape_slice.go b/test/escape_slice.go
index d60414736c..055b60be41 100644
--- a/test/escape_slice.go
+++ b/test/escape_slice.go
@@ -101,7 +101,7 @@ func slice11() {
_ = s
}
-func slice12(x []int) *[1]int { // ERROR "leaking param: x to result ~r1 level=0$"
+func slice12(x []int) *[1]int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*[1]int)(x)
}
@@ -110,7 +110,7 @@ func envForDir(dir string) []string { // ERROR "dir does not escape"
return mergeEnvLists([]string{"PWD=" + dir}, env) // ERROR ".PWD=. \+ dir escapes to heap" "\[\]string{...} does not escape"
}
-func mergeEnvLists(in, out []string) []string { // ERROR "leaking param content: in" "leaking param content: out" "leaking param: out to result ~r2 level=0"
+func mergeEnvLists(in, out []string) []string { // ERROR "leaking param content: in" "leaking param content: out" "leaking param: out to result ~r0 level=0"
NextVar:
for _, inkv := range in {
k := strings.SplitAfterN(inkv, "=", 2)[0]
diff --git a/test/escape_struct_return.go b/test/escape_struct_return.go
index 222ef8bc22..a42ae1e8c9 100644
--- a/test/escape_struct_return.go
+++ b/test/escape_struct_return.go
@@ -15,11 +15,11 @@ type U struct {
_spp **string
}
-func A(sp *string, spp **string) U { // ERROR "leaking param: sp to result ~r2 level=0$" "leaking param: spp to result ~r2 level=0$"
+func A(sp *string, spp **string) U { // ERROR "leaking param: sp to result ~r0 level=0$" "leaking param: spp to result ~r0 level=0$"
return U{sp, spp}
}
-func B(spp **string) U { // ERROR "leaking param: spp to result ~r1 level=0$"
+func B(spp **string) U { // ERROR "leaking param: spp to result ~r0 level=0$"
return U{*spp, spp}
}
diff --git a/test/escape_unsafe.go b/test/escape_unsafe.go
index b34beacccb..cec6674a14 100644
--- a/test/escape_unsafe.go
+++ b/test/escape_unsafe.go
@@ -15,7 +15,7 @@ import (
// (1) Conversion of a *T1 to Pointer to *T2.
-func convert(p *float64) *uint64 { // ERROR "leaking param: p to result ~r1 level=0$"
+func convert(p *float64) *uint64 { // ERROR "leaking param: p to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(p))
}
@@ -39,12 +39,12 @@ func arithMask() unsafe.Pointer {
// (5) Conversion of the result of reflect.Value.Pointer or
// reflect.Value.UnsafeAddr from uintptr to Pointer.
-// BAD: should be "leaking param: p to result ~r1 level=0$"
+// BAD: should be "leaking param: p to result ~r0 level=0$"
func valuePointer(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
return unsafe.Pointer(reflect.ValueOf(p).Pointer())
}
-// BAD: should be "leaking param: p to result ~r1 level=0$"
+// BAD: should be "leaking param: p to result ~r0 level=0$"
func valueUnsafeAddr(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
return unsafe.Pointer(reflect.ValueOf(p).Elem().UnsafeAddr())
}
@@ -52,11 +52,11 @@ func valueUnsafeAddr(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
// (6) Conversion of a reflect.SliceHeader or reflect.StringHeader
// Data field to or from Pointer.
-func fromSliceData(s []int) unsafe.Pointer { // ERROR "leaking param: s to result ~r1 level=0$"
+func fromSliceData(s []int) unsafe.Pointer { // ERROR "leaking param: s to result ~r0 level=0$"
return unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&s)).Data)
}
-func fromStringData(s string) unsafe.Pointer { // ERROR "leaking param: s to result ~r1 level=0$"
+func fromStringData(s string) unsafe.Pointer { // ERROR "leaking param: s to result ~r0 level=0$"
return unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&s)).Data)
}
diff --git a/test/fixedbugs/bug195.go b/test/fixedbugs/bug195.go
index 94f61fff7f..6d8578d6cb 100644
--- a/test/fixedbugs/bug195.go
+++ b/test/fixedbugs/bug195.go
@@ -1,4 +1,4 @@
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/bug248.dir/bug2.go b/test/fixedbugs/bug248.dir/bug2.go
index c0fdecfdb7..92a7974679 100644
--- a/test/fixedbugs/bug248.dir/bug2.go
+++ b/test/fixedbugs/bug248.dir/bug2.go
@@ -50,8 +50,8 @@ var p0i2 p1.I = t0(0) // ERROR "does not implement|incompatible"
func foobar() {
// check that cannot assign one to the other,
// but can convert.
- v0 = v1 // ERROR "assign"
- v1 = v0 // ERROR "assign"
+ v0 = v1 // ERROR "assign|cannot use"
+ v1 = v0 // ERROR "assign|cannot use"
v0 = p0.T(v1)
v1 = p1.T(v0)
diff --git a/test/fixedbugs/bug267.go b/test/fixedbugs/bug267.go
index cf8bf841f8..b61216a9d5 100644
--- a/test/fixedbugs/bug267.go
+++ b/test/fixedbugs/bug267.go
@@ -10,7 +10,7 @@ type T []int
var a []bool
-func _() {
+func f1() {
if a[T{42}[0]] {
}
// if (a[T{42}[0]]) {} // this compiles
diff --git a/test/fixedbugs/bug345.dir/main.go b/test/fixedbugs/bug345.dir/main.go
index b77a2fad5f..a53d3e8586 100644
--- a/test/fixedbugs/bug345.dir/main.go
+++ b/test/fixedbugs/bug345.dir/main.go
@@ -23,7 +23,7 @@ func main() {
// main.go:27: cannot use &x (type *"io".SectionReader) as type *"/Users/rsc/g/go/test/fixedbugs/bug345.dir/io".SectionReader in function argument
var w io.Writer
- bufio.NewWriter(w) // ERROR "[\w.]+[^.]/io|has incompatible type"
+ bufio.NewWriter(w) // ERROR "[\w.]+[^.]/io|has incompatible type|cannot use"
var x goio.SectionReader
- io.SR(&x) // ERROR "[\w.]+[^.]/io|has incompatible type"
+ io.SR(&x) // ERROR "[\w.]+[^.]/io|has incompatible type|cannot use"
}
diff --git a/test/fixedbugs/bug460.dir/b.go b/test/fixedbugs/bug460.dir/b.go
index ef646946cf..5d388fc413 100644
--- a/test/fixedbugs/bug460.dir/b.go
+++ b/test/fixedbugs/bug460.dir/b.go
@@ -9,9 +9,9 @@ import "./a"
var x a.Foo
func main() {
- x.int = 20 // ERROR "unexported field"
- x.int8 = 20 // ERROR "unexported field"
- x.error = nil // ERROR "unexported field"
- x.rune = 'a' // ERROR "unexported field"
- x.byte = 20 // ERROR "unexported field"
+ x.int = 20 // ERROR "unexported field|undefined"
+ x.int8 = 20 // ERROR "unexported field|undefined"
+ x.error = nil // ERROR "unexported field|undefined"
+ x.rune = 'a' // ERROR "unexported field|undefined"
+ x.byte = 20 // ERROR "unexported field|undefined"
}
diff --git a/test/fixedbugs/issue10975.go b/test/fixedbugs/issue10975.go
index 89ef23c1a8..876ea58ef9 100644
--- a/test/fixedbugs/issue10975.go
+++ b/test/fixedbugs/issue10975.go
@@ -1,4 +1,4 @@
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue11614.go b/test/fixedbugs/issue11614.go
index de15f9827f..6ea463b7fe 100644
--- a/test/fixedbugs/issue11614.go
+++ b/test/fixedbugs/issue11614.go
@@ -1,4 +1,4 @@
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue12006.go b/test/fixedbugs/issue12006.go
index 0a2ef8dad0..e878bc48e2 100644
--- a/test/fixedbugs/issue12006.go
+++ b/test/fixedbugs/issue12006.go
@@ -87,7 +87,7 @@ func TFooI() {
FooI(a, b, c) // ERROR "a escapes to heap" "b escapes to heap" "... argument does not escape"
}
-func FooJ(args ...interface{}) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooJ(args ...interface{}) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < len(args); i++ {
switch x := args[i].(type) {
case nil:
@@ -123,7 +123,7 @@ type fakeSlice struct {
a *[4]interface{}
}
-func FooK(args fakeSlice) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooK(args fakeSlice) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < args.l; i++ {
switch x := (*args.a)[i].(type) {
case nil:
@@ -148,7 +148,7 @@ func TFooK2() {
isink = FooK(fs)
}
-func FooL(args []interface{}) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooL(args []interface{}) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < len(args); i++ {
switch x := args[i].(type) {
case nil:
diff --git a/test/fixedbugs/issue12588.go b/test/fixedbugs/issue12588.go
index 950ef36e20..dc8111198c 100644
--- a/test/fixedbugs/issue12588.go
+++ b/test/fixedbugs/issue12588.go
@@ -35,7 +35,7 @@ func g(a *A) int { // ERROR "a does not escape"
return 0
}
-func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
+func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r0 level=1"
for i, x := range &a.b {
if i == 0 {
return x
@@ -44,7 +44,7 @@ func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
return nil
}
-func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
+func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r0 level=1"
p := &a.b
for i, x := range p {
if i == 0 {
@@ -55,7 +55,7 @@ func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
}
// Seems like below should be level=1, not 0.
-func k(a B) *uint64 { // ERROR "leaking param: a to result ~r1 level=0"
+func k(a B) *uint64 { // ERROR "leaking param: a to result ~r0 level=0"
for i, x := range &a.b {
if i == 0 {
return x
diff --git a/test/fixedbugs/issue14652.go b/test/fixedbugs/issue14652.go
index d53b412668..14a223977b 100644
--- a/test/fixedbugs/issue14652.go
+++ b/test/fixedbugs/issue14652.go
@@ -6,4 +6,4 @@
package p
-var x any // ERROR "undefined: any|undefined type .*any.*"
+var x any // ERROR "undefined: any|undefined type .*any.*|cannot use any outside constraint position"
diff --git a/test/fixedbugs/issue14999.go b/test/fixedbugs/issue14999.go
index b648441fc2..a25a50e519 100644
--- a/test/fixedbugs/issue14999.go
+++ b/test/fixedbugs/issue14999.go
@@ -7,11 +7,11 @@
package p
func f(x int) func(int) int {
- return func(y int) int { return x + y } // ERROR "heap-allocated closure, not allowed in runtime"
+ return func(y int) int { return x + y } // ERROR "heap-allocated closure f\.func1, not allowed in runtime"
}
func g(x int) func(int) int { // ERROR "x escapes to heap, not allowed in runtime"
- return func(y int) int { // ERROR "heap-allocated closure, not allowed in runtime"
+ return func(y int) int { // ERROR "heap-allocated closure g\.func1, not allowed in runtime"
x += y
return x + y
}
diff --git a/test/fixedbugs/issue20250.go b/test/fixedbugs/issue20250.go
index 1a513bea56..aed7b25d1b 100644
--- a/test/fixedbugs/issue20250.go
+++ b/test/fixedbugs/issue20250.go
@@ -17,7 +17,7 @@ type T struct {
func f(a T) { // ERROR "live at entry to f: a"
var e interface{} // ERROR "stack object e interface \{\}$"
func() { // ERROR "live at entry to f.func1: a &e"
- e = a.s // ERROR "live at call to convT2E: &e" "stack object a T$"
+ e = a.s // ERROR "live at call to convT: &e" "stack object a T$"
}()
// Before the fix, both a and e were live at the previous line.
_ = e
diff --git a/test/fixedbugs/issue22076.go b/test/fixedbugs/issue22076.go
index 5d628b96bd..b383a674e2 100644
--- a/test/fixedbugs/issue22076.go
+++ b/test/fixedbugs/issue22076.go
@@ -13,12 +13,12 @@ import . "bytes"
var _ Reader // use "bytes" import
-func _() {
+func f1() {
Buffer := 0
_ = Buffer
}
-func _() {
+func f2() {
for Buffer := range []int{} {
_ = Buffer
}
diff --git a/test/fixedbugs/issue24651a.go b/test/fixedbugs/issue24651a.go
index 6c7bf30908..1bfe8ac1ce 100644
--- a/test/fixedbugs/issue24651a.go
+++ b/test/fixedbugs/issue24651a.go
@@ -21,5 +21,5 @@ var x = 5
//go:noinline Provide a clean, constant reason for not inlining main
func main() { // ERROR "cannot inline main: marked go:noinline$"
println("Foo(", x, ")=", Foo(x))
- println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
+ println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar"
}
diff --git a/test/fixedbugs/issue24651b.go b/test/fixedbugs/issue24651b.go
index aa88a6787b..2af54fc4b5 100644
--- a/test/fixedbugs/issue24651b.go
+++ b/test/fixedbugs/issue24651b.go
@@ -19,6 +19,6 @@ var x = 5
//go:noinline Provide a clean, constant reason for not inlining main
func main() { // ERROR "cannot inline main: marked go:noinline$"
- println("Foo(", x, ")=", Foo(x)) // ERROR "inlining call to Foo func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
- println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
+ println("Foo(", x, ")=", Foo(x)) // ERROR "inlining call to Foo"
+ println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar"
}
diff --git a/test/fixedbugs/issue26163.go b/test/fixedbugs/issue26163.go
index d141a2797d..3f3d77859d 100644
--- a/test/fixedbugs/issue26163.go
+++ b/test/fixedbugs/issue26163.go
@@ -1,4 +1,4 @@
-// compile -N -d=softfloat -goexperiment noregabiargs
+// compile -N -d=softfloat
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue27557.go b/test/fixedbugs/issue27557.go
index e35ab5a169..f609b27faa 100644
--- a/test/fixedbugs/issue27557.go
+++ b/test/fixedbugs/issue27557.go
@@ -8,19 +8,19 @@ package p
var sink interface{}
-func _() {
+func f1() {
var t T
f := t.noescape // ERROR "t.noescape does not escape"
f()
}
-func _() {
+func f2() {
var t T // ERROR "moved to heap"
f := t.escape // ERROR "t.escape does not escape"
f()
}
-func _() {
+func f3() {
var t T // ERROR "moved to heap"
f := t.returns // ERROR "t.returns does not escape"
sink = f()
diff --git a/test/fixedbugs/issue28688.go b/test/fixedbugs/issue28688.go
index 8ef0802812..0d2000e149 100644
--- a/test/fixedbugs/issue28688.go
+++ b/test/fixedbugs/issue28688.go
@@ -1,4 +1,4 @@
-// run -gcflags=-d=softfloat -goexperiment noregabiargs
+// run -gcflags=-d=softfloat
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue30862.dir/a.go b/test/fixedbugs/issue30862.dir/a/a.go
index c23f4de1ef..c23f4de1ef 100644
--- a/test/fixedbugs/issue30862.dir/a.go
+++ b/test/fixedbugs/issue30862.dir/a/a.go
diff --git a/test/fixedbugs/issue30862.dir/b.go b/test/fixedbugs/issue30862.dir/b/b.go
index 3e501bb8dc..230221d503 100644
--- a/test/fixedbugs/issue30862.dir/b.go
+++ b/test/fixedbugs/issue30862.dir/b/b.go
@@ -4,7 +4,7 @@
package b
-import "./a"
+import "issue30862.dir/a"
type EmbedImported struct {
a.NoitfStruct
diff --git a/test/fixedbugs/issue30862.dir/main.go b/test/fixedbugs/issue30862.dir/main.go
index 80db0e13a8..1489c5a342 100644
--- a/test/fixedbugs/issue30862.dir/main.go
+++ b/test/fixedbugs/issue30862.dir/main.go
@@ -8,7 +8,7 @@ import (
"fmt"
"os"
- "./b"
+ "issue30862.dir/b"
)
// Test case for issue 30862.
diff --git a/test/fixedbugs/issue30862.go b/test/fixedbugs/issue30862.go
index ba122cc3c8..acac71e2cc 100644
--- a/test/fixedbugs/issue30862.go
+++ b/test/fixedbugs/issue30862.go
@@ -1,4 +1,4 @@
-// rundir
+// runindir -goexperiment fieldtrack
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -9,6 +9,4 @@
// is set when building it, whereas gccgo has field tracking
// enabled by default (hence the build tag below).
-// +build gccgo
-
package ignored
diff --git a/test/fixedbugs/issue30898.go b/test/fixedbugs/issue30898.go
index b6376d3f9e..c7f6f2d371 100644
--- a/test/fixedbugs/issue30898.go
+++ b/test/fixedbugs/issue30898.go
@@ -15,5 +15,5 @@ func debugf(format string, args ...interface{}) { // ERROR "can inline debugf" "
func bar() { // ERROR "can inline bar"
value := 10
- debugf("value is %d", value) // ERROR "inlining call to debugf" "value does not escape" "\[\]interface {}{...} does not escape"
+ debugf("value is %d", value) // ERROR "inlining call to debugf" "value does not escape" "\.\.\. argument does not escape"
}
diff --git a/test/fixedbugs/issue31573.go b/test/fixedbugs/issue31573.go
index 005910e00d..eaab563431 100644
--- a/test/fixedbugs/issue31573.go
+++ b/test/fixedbugs/issue31573.go
@@ -19,31 +19,31 @@ func g() {
defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
go f()
- go f(new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
- go f(new(int), new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
+ go f(new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+ go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
go f(nil...)
- go f([]*int{}...) // ERROR "\[\]\*int{} escapes to heap$"
- go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
- go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
+ go f([]*int{}...) // ERROR "\[\]\*int{} does not escape$"
+ go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+ go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
for {
defer f()
- defer f(new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
- defer f(new(int), new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
+ defer f(new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+ defer f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
defer f(nil...)
- defer f([]*int{}...) // ERROR "\[\]\*int{} escapes to heap$"
- defer f([]*int{new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
- defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
+ defer f([]*int{}...) // ERROR "\[\]\*int{} does not escape$"
+ defer f([]*int{new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+ defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
go f()
- go f(new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
- go f(new(int), new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
+ go f(new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+ go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
go f(nil...)
- go f([]*int{}...) // ERROR "\[\]\*int{} escapes to heap$"
- go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
- go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
+ go f([]*int{}...) // ERROR "\[\]\*int{} does not escape$"
+ go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+ go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
}
}
diff --git a/test/fixedbugs/issue41500.go b/test/fixedbugs/issue41500.go
index 3ec23a0dfe..b0ae7cfd59 100644
--- a/test/fixedbugs/issue41500.go
+++ b/test/fixedbugs/issue41500.go
@@ -13,8 +13,8 @@ type s struct {
func f() {
var x *s
- _ = x == nil || len(x.slice) // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|cannot convert"
- _ = len(x.slice) || x == nil // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|cannot convert"
- _ = x == nil && len(x.slice) // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|cannot convert"
- _ = len(x.slice) && x == nil // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|cannot convert"
+ _ = x == nil || len(x.slice) // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|mismatched types untyped bool and int"
+ _ = len(x.slice) || x == nil // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|mismatched types int and untyped bool"
+ _ = x == nil && len(x.slice) // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|mismatched types untyped bool and int"
+ _ = len(x.slice) && x == nil // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|mismatched types int and untyped bool"
}
diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go
index ffe9310be3..f7fd80bd20 100644
--- a/test/fixedbugs/issue42284.dir/a.go
+++ b/test/fixedbugs/issue42284.dir/a.go
@@ -13,7 +13,7 @@ func E() I { // ERROR "can inline E"
return T(0) // ERROR "T\(0\) escapes to heap"
}
-func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r1 level=0"
+func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r0 level=0"
i = nil
return i
}
diff --git a/test/fixedbugs/issue43762.go b/test/fixedbugs/issue43762.go
index 9f7682ad6a..bf950c8f52 100644
--- a/test/fixedbugs/issue43762.go
+++ b/test/fixedbugs/issue43762.go
@@ -6,6 +6,6 @@
package p
-var _ = true == '\\' // ERROR "invalid operation: true == '\\\\'|cannot convert true"
-var _ = true == '\'' // ERROR "invalid operation: true == '\\''|cannot convert true"
-var _ = true == '\n' // ERROR "invalid operation: true == '\\n'|cannot convert true"
+var _ = true == '\\' // ERROR "invalid operation: (cannot compare true)|(true) == '\\\\' \(mismatched types untyped bool and untyped rune\)"
+var _ = true == '\'' // ERROR "invalid operation: (cannot compare true)|(true) == '\\'' \(mismatched types untyped bool and untyped rune\)"
+var _ = true == '\n' // ERROR "invalid operation: (cannot compare true)|(true) == '\\n' \(mismatched types untyped bool and untyped rune\)"
diff --git a/test/fixedbugs/issue44432.go b/test/fixedbugs/issue44432.go
index c5fb67e0d7..eec53f3000 100644
--- a/test/fixedbugs/issue44432.go
+++ b/test/fixedbugs/issue44432.go
@@ -8,6 +8,6 @@ package p
var m = map[string]int{
"a": 1,
- 1: 1, // ERROR "cannot use 1.*as type string in map key"
- 2: 2, // ERROR "cannot use 2.*as type string in map key"
+ 1: 1, // ERROR "cannot use 1.*as.*string.*in map"
+ 2: 2, // ERROR "cannot use 2.*as.*string.*in map"
}
diff --git a/test/fixedbugs/issue45258.go b/test/fixedbugs/issue45258.go
index f4d6fccf17..b026c0c8f5 100644
--- a/test/fixedbugs/issue45258.go
+++ b/test/fixedbugs/issue45258.go
@@ -22,7 +22,7 @@ func (r *impl) Foo() Barer {
func (r *impl) Bar() {}
-func _() {
+func f1() {
var r Fooer = &impl{}
r.Foo().Bar()
}
diff --git a/test/fixedbugs/issue46556.go b/test/fixedbugs/issue46556.go
new file mode 100644
index 0000000000..b159f61b0c
--- /dev/null
+++ b/test/fixedbugs/issue46556.go
@@ -0,0 +1,16 @@
+// compile
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A = interface{}
+type B interface{}
+
+// Test that embedding both anonymous and defined types is supported.
+type C interface {
+ A
+ B
+}
diff --git a/test/fixedbugs/issue46749.go b/test/fixedbugs/issue46749.go
index 63ed19795e..faf1f884a6 100644
--- a/test/fixedbugs/issue46749.go
+++ b/test/fixedbugs/issue46749.go
@@ -14,13 +14,13 @@ var iface interface{}
var (
_ = "" + b // ERROR "invalid operation.*mismatched types.*untyped string and bool"
_ = "" + i // ERROR "invalid operation.*mismatched types.*untyped string and int"
- _ = "" + nil // ERROR "invalid operation.*mismatched types.*untyped string and nil"
+ _ = "" + nil // ERROR "invalid operation.*mismatched types.*untyped string and nil|(untyped nil)"
)
var (
_ = s + false // ERROR "invalid operation.*mismatched types.*string and untyped bool"
_ = s + 1 // ERROR "invalid operation.*mismatched types.*string and untyped int"
- _ = s + nil // ERROR "invalid operation.*mismatched types.*string and nil"
+ _ = s + nil // ERROR "invalid operation.*mismatched types.*string and nil|(untyped nil)"
)
var (
@@ -31,7 +31,7 @@ var (
var (
_ = b + 1 // ERROR "invalid operation.*mismatched types.*bool and untyped int"
_ = i + false // ERROR "invalid operation.*mismatched types.*int and untyped bool"
- _ = iface + 1 // ERROR "invalid operation.*mismatched types.*interface {} and int"
- _ = iface + 1.0 // ERROR "invalid operation.*mismatched types.*interface {} and float64"
- _ = iface + false // ERROR "invalid operation.*mismatched types.*interface {} and bool"
+ _ = iface + 1 // ERROR "invalid operation.*mismatched types.*interface *{} and int"
+ _ = iface + 1.0 // ERROR "invalid operation.*mismatched types.*interface *{} and float64"
+ _ = iface + false // ERROR "invalid operation.*mismatched types.*interface *{} and bool"
)
diff --git a/test/fixedbugs/issue46903.go b/test/fixedbugs/issue46903.go
new file mode 100644
index 0000000000..3237a583d5
--- /dev/null
+++ b/test/fixedbugs/issue46903.go
@@ -0,0 +1,32 @@
+// run
+//go:build goexperiment.unified
+// +build goexperiment.unified
+
+// TODO(mdempsky): Enable test unconditionally. This test should pass
+// for non-unified mode too.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+//go:notinheap
+type A struct{ B }
+type B struct{ x byte }
+type I interface{ M() *B }
+
+func (p *B) M() *B { return p }
+
+var (
+ a A
+ i I = &a
+)
+
+func main() {
+ got, want := i.M(), &a.B
+ if got != want {
+ println(got, "!=", want)
+ panic("FAIL")
+ }
+}
diff --git a/test/fixedbugs/issue47227.go b/test/fixedbugs/issue47227.go
new file mode 100644
index 0000000000..a14efc9a68
--- /dev/null
+++ b/test/fixedbugs/issue47227.go
@@ -0,0 +1,23 @@
+// run fake-arg-to-force-use-of-go-run
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo
+// +build cgo
+
+package main
+
+// void f(int *p) { *p = 0x12345678; }
+import "C"
+
+func main() {
+ var x C.int
+ func() {
+ defer C.f(&x)
+ }()
+ if x != 0x12345678 {
+ panic("FAIL")
+ }
+}
diff --git a/test/fixedbugs/issue4909b.go b/test/fixedbugs/issue4909b.go
index 0f594e3db6..7d7922701a 100644
--- a/test/fixedbugs/issue4909b.go
+++ b/test/fixedbugs/issue4909b.go
@@ -73,7 +73,7 @@ func writeDot(ns ...int) {
}
fmt.Print(")")
if isIndirect {
- fmt.Print(` // ERROR "indirection"`)
+ fmt.Print(` // ERROR "indirection|embedded via a pointer"`)
}
fmt.Print("\n")
}
diff --git a/test/fixedbugs/issue8042.go b/test/fixedbugs/issue8042.go
index 5639f97bb8..be15ef06cd 100644
--- a/test/fixedbugs/issue8042.go
+++ b/test/fixedbugs/issue8042.go
@@ -9,7 +9,7 @@
package p
-func _() {
+func f1() {
goto L1
const x = 0
L1:
@@ -18,7 +18,7 @@ L1:
L2:
}
-func _() {
+func f2() {
{
goto L1
}
@@ -31,7 +31,7 @@ L1:
L2:
}
-func _(d int) {
+func f3(d int) {
if d > 0 {
goto L1
} else {
diff --git a/test/fixedbugs/issue8761.go b/test/fixedbugs/issue8761.go
index 7f458f7f03..e5130e1ff5 100644
--- a/test/fixedbugs/issue8761.go
+++ b/test/fixedbugs/issue8761.go
@@ -10,17 +10,17 @@
package p
-func _() {
+func f1() {
type C chan int
_ = [1][]C{[]C{make(chan int)}}
}
-func _() {
+func f2() {
type C interface{}
_ = [1][]C{[]C{recover()}}
}
-func _() {
+func f3() {
type C *int
_ = [1][]C{[]C{new(int)}}
}
diff --git a/test/inline.go b/test/inline.go
index 472a941dca..a73c0ba7b1 100644
--- a/test/inline.go
+++ b/test/inline.go
@@ -1,4 +1,4 @@
-// errorcheck -0 -m -d=inlfuncswithclosures=1
+// errorcheckwithauto -0 -m -d=inlfuncswithclosures=1
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -49,7 +49,7 @@ func j(x int) int { // ERROR "can inline j"
}
}
-func _() int { // ERROR "can inline _"
+func f2() int { // ERROR "can inline f2"
tmp1 := h
tmp2 := tmp1
return tmp2(0) // ERROR "inlining call to h"
@@ -167,8 +167,9 @@ func (T) meth(int, int) {} // ERROR "can inline T.meth"
func k() (T, int, int) { return T{}, 0, 0 } // ERROR "can inline k"
-func _() { // ERROR "can inline _"
+func f3() { // ERROR "can inline f3"
T.meth(k()) // ERROR "inlining call to k" "inlining call to T.meth"
+ // ERRORAUTO "inlining call to T.meth"
}
func small1() { // ERROR "can inline small1"
@@ -232,12 +233,13 @@ Loop:
// Issue #18493 - make sure we can do inlining of functions with a method value
type T1 struct{}
-func (a T1) meth(val int) int { // ERROR "can inline T1.meth" "inlining call to T1.meth"
+func (a T1) meth(val int) int { // ERROR "can inline T1.meth"
return val + 5
}
func getMeth(t1 T1) func(int) int { // ERROR "can inline getMeth"
return t1.meth // ERROR "t1.meth escapes to heap"
+ // ERRORAUTO "inlining call to T1.meth"
}
func ii() { // ERROR "can inline ii"
diff --git a/test/inline_big.go b/test/inline_big.go
index 68e1101d3b..83672753f7 100644
--- a/test/inline_big.go
+++ b/test/inline_big.go
@@ -1023,7 +1023,7 @@ func f(a []int) int { // ERROR "cannot inline f:.*" "a does not escape"
a[997] = 0
a[998] = 0
a[999] = 0
- x := small(a) // ERROR "inlining call to small .*"
+ x := small(a) // ERROR "inlining call to small"
y := medium(a) // The crux of this test: medium is not inlined.
return x + y
}
diff --git a/test/inline_variadic.go b/test/inline_variadic.go
index 687048a192..49483d77f7 100644
--- a/test/inline_variadic.go
+++ b/test/inline_variadic.go
@@ -14,6 +14,6 @@ func head(xs ...string) string { // ERROR "can inline head" "leaking param: xs t
}
func f() string { // ERROR "can inline f"
- x := head("hello", "world") // ERROR "inlining call to head" "\[\]string{...} does not escape"
+ x := head("hello", "world") // ERROR "inlining call to head" "\.\.\. argument does not escape"
return x
}
diff --git a/test/live.go b/test/live.go
index bc7b3849cf..6130f7f069 100644
--- a/test/live.go
+++ b/test/live.go
@@ -1,5 +1,5 @@
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
-// +build !ppc64,!ppc64le,!goexperiment.regabi,!goexperiment.regabidefer
+// +build !ppc64,!ppc64le,!goexperiment.regabiargs
// ppc64 needs a better tighten pass to make f18 pass
// rescheduling checks need to be turned off because there are some live variables across the inserted check call
@@ -144,8 +144,8 @@ var i9 interface{}
func f9() bool {
g8()
x := i9
- y := interface{}(g18()) // ERROR "live at call to convT2E: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$"
- i9 = y // make y escape so the line above has to call convT2E
+ y := interface{}(g18()) // ERROR "live at call to convT: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$"
+ i9 = y // make y escape so the line above has to call convT
return x != y
}
@@ -424,7 +424,7 @@ func f27defer(b bool) {
}
defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ .autotmp_[0-9]+"
- return // ERROR "live at call to call27: .autotmp_[0-9]+"
+ return // ERROR "live at indirect call: .autotmp_[0-9]+"
}
// and newproc (go) escapes to the heap
@@ -432,9 +432,9 @@ func f27defer(b bool) {
func f27go(b bool) {
x := 0
if b {
- go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newproc: &x$"
+ go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go
}
- go call27(func() { x++ }) // ERROR "live at call to newobject: &x$"
+ go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go
printnl()
}
@@ -503,7 +503,7 @@ func f31(b1, b2, b3 bool) {
g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
if b2 {
- h31(g18()) // ERROR "live at call to convT2E: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$"
+ h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$"
}
if b3 {
panic(g18())
@@ -688,7 +688,7 @@ type T struct{}
func (*T) Foo(ptr *int) {}
-type R struct{ *T } // ERRORAUTO "live at entry to \(\*R\)\.Foo: \.this ptr" "live at entry to R\.Foo: \.this ptr"
+type R struct{ *T }
// issue 18860: output arguments must be live all the time if there is a defer.
// In particular, at printint r must be live.
diff --git a/test/live_regabi.go b/test/live_regabi.go
index 2b0278ecb8..2883b83bae 100644
--- a/test/live_regabi.go
+++ b/test/live_regabi.go
@@ -1,5 +1,5 @@
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
-// +build amd64,goexperiment.regabidefer,goexperiment.regabiargs
+// +build amd64,goexperiment.regabiargs
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -139,8 +139,8 @@ var i9 interface{}
func f9() bool {
g8()
x := i9
- y := interface{}(g18()) // ERROR "live at call to convT2E: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$"
- i9 = y // make y escape so the line above has to call convT2E
+ y := interface{}(g18()) // ERROR "live at call to convT: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$"
+ i9 = y // make y escape so the line above has to call convT
return x != y
}
@@ -498,7 +498,7 @@ func f31(b1, b2, b3 bool) {
g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
if b2 {
- h31(g18()) // ERROR "live at call to convT2E: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$"
+ h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$"
}
if b3 {
panic(g18())
@@ -683,7 +683,7 @@ type T struct{}
func (*T) Foo(ptr *int) {}
-type R struct{ *T } // ERRORAUTO "live at entry to \(\*R\)\.Foo: \.this ptr" "live at entry to R\.Foo: \.this ptr"
+type R struct{ *T }
// issue 18860: output arguments must be live all the time if there is a defer.
// In particular, at printint r must be live.
diff --git a/test/reflectmethod8.go b/test/reflectmethod8.go
new file mode 100644
index 0000000000..482163bae6
--- /dev/null
+++ b/test/reflectmethod8.go
@@ -0,0 +1,26 @@
+// compile
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure that the compiler can analyze non-reflect
+// Type.{Method,MethodByName} calls.
+
+package p
+
+type I interface {
+ MethodByName(string)
+ Method(int)
+}
+
+type M struct{}
+
+func (M) MethodByName(string) {}
+func (M) Method(int) {}
+
+func f() {
+ var m M
+ I.MethodByName(m, "")
+ I.Method(m, 42)
+}
diff --git a/test/run.go b/test/run.go
index d7f5d02391..6296234d56 100644
--- a/test/run.go
+++ b/test/run.go
@@ -9,6 +9,7 @@ package main
import (
"bytes"
+ "encoding/json"
"errors"
"flag"
"fmt"
@@ -42,11 +43,55 @@ var (
linkshared = flag.Bool("linkshared", false, "")
updateErrors = flag.Bool("update_errors", false, "update error messages in test file based on compiler output")
runoutputLimit = flag.Int("l", defaultRunOutputLimit(), "number of parallel runoutput tests to run")
+ force = flag.Bool("f", false, "ignore expected-failure test lists")
+ generics = flag.String("G", defaultGLevels, "a comma-separated list of -G compiler flags to test with")
shard = flag.Int("shard", 0, "shard index to run. Only applicable if -shards is non-zero.")
shards = flag.Int("shards", 0, "number of shards. If 0, all tests are run. This is used by the continuous build.")
)
+type envVars struct {
+ GOOS string
+ GOARCH string
+ GOEXPERIMENT string
+ CGO_ENABLED string
+}
+
+var env = func() (res envVars) {
+ cmd := exec.Command("go", "env", "-json")
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ log.Fatal("StdoutPipe:", err)
+ }
+ if err := cmd.Start(); err != nil {
+ log.Fatal("Start:", err)
+ }
+ if err := json.NewDecoder(stdout).Decode(&res); err != nil {
+ log.Fatal("Decode:", err)
+ }
+ if err := cmd.Wait(); err != nil {
+ log.Fatal("Wait:", err)
+ }
+ return
+}()
+
+var unifiedEnabled, defaultGLevels = func() (bool, string) {
+ // TODO(mdempsky): This will give false negatives if the unified
+ // experiment is enabled by default, but presumably at that point we
+ // won't need to disable tests for it anymore anyway.
+ enabled := strings.Contains(","+env.GOEXPERIMENT+",", ",unified,")
+
+ // Normal test runs should test with both -G=0 and -G=3 for types2
+ // coverage. But the unified experiment always uses types2, so
+ // testing with -G=3 is redundant.
+ glevels := "0,3"
+ if enabled {
+ glevels = "0"
+ }
+
+ return enabled, glevels
+}()
+
// defaultAllCodeGen returns the default value of the -all_codegen
// flag. By default, we prefer to be fast (returning false), except on
// the linux-amd64 builder that's already very fast, so we get more
@@ -56,12 +101,13 @@ func defaultAllCodeGen() bool {
}
var (
- goos, goarch string
- cgoEnabled bool
+ goos = env.GOOS
+ goarch = env.GOARCH
+ cgoEnabled, _ = strconv.ParseBool(env.CGO_ENABLED)
// dirs are the directories to look for *.go files in.
// TODO(bradfitz): just use all directories?
- dirs = []string{".", "ken", "chan", "interface", "syntax", "dwarf", "fixedbugs", "codegen", "runtime", "abi", "typeparam"}
+ dirs = []string{".", "ken", "chan", "interface", "syntax", "dwarf", "fixedbugs", "codegen", "runtime", "abi", "typeparam", "typeparam/mdempsky"}
// ratec controls the max number of tests running at a time.
ratec chan bool
@@ -82,11 +128,13 @@ const maxTests = 5000
func main() {
flag.Parse()
- goos = getenv("GOOS", runtime.GOOS)
- goarch = getenv("GOARCH", runtime.GOARCH)
- cgoEnv, err := exec.Command(goTool(), "env", "CGO_ENABLED").Output()
- if err == nil {
- cgoEnabled, _ = strconv.ParseBool(strings.TrimSpace(string(cgoEnv)))
+ var glevels []int
+ for _, s := range strings.Split(*generics, ",") {
+ glevel, err := strconv.Atoi(s)
+ if err != nil {
+ log.Fatalf("invalid -G flag: %v", err)
+ }
+ glevels = append(glevels, glevel)
}
findExecCmd()
@@ -113,11 +161,11 @@ func main() {
}
if fi, err := os.Stat(arg); err == nil && fi.IsDir() {
for _, baseGoFile := range goFiles(arg) {
- tests = append(tests, startTest(arg, baseGoFile))
+ tests = append(tests, startTests(arg, baseGoFile, glevels)...)
}
} else if strings.HasSuffix(arg, ".go") {
dir, file := filepath.Split(arg)
- tests = append(tests, startTest(dir, file))
+ tests = append(tests, startTests(dir, file, glevels)...)
} else {
log.Fatalf("can't yet deal with non-directory and non-go file %q", arg)
}
@@ -125,7 +173,7 @@ func main() {
} else {
for _, dir := range dirs {
for _, baseGoFile := range goFiles(dir) {
- tests = append(tests, startTest(dir, baseGoFile))
+ tests = append(tests, startTests(dir, baseGoFile, glevels)...)
}
}
}
@@ -142,8 +190,15 @@ func main() {
status = "FAIL"
}
if test.err != nil {
- status = "FAIL"
errStr = test.err.Error()
+ if test.expectFail {
+ errStr += " (expected)"
+ } else {
+ status = "FAIL"
+ }
+ } else if test.expectFail {
+ status = "FAIL"
+ errStr = "unexpected success"
}
if status == "FAIL" {
failed = true
@@ -151,7 +206,8 @@ func main() {
resCount[status]++
dt := fmt.Sprintf("%.3fs", test.dt.Seconds())
if status == "FAIL" {
- fmt.Printf("# go run run.go -- %s\n%s\nFAIL\t%s\t%s\n",
+ fmt.Printf("# go run run.go -G=%v %s\n%s\nFAIL\t%s\t%s\n",
+ test.glevel,
path.Join(test.dir, test.gofile),
errStr, test.goFileName(), dt)
continue
@@ -270,30 +326,73 @@ type test struct {
dir, gofile string
donec chan bool // closed when done
dt time.Duration
+ glevel int // what -G level this test should use
src string
tempDir string
err error
+
+ // expectFail indicates whether the (overall) test recipe is
+ // expected to fail under the current test configuration (e.g., -G=3
+ // or GOEXPERIMENT=unified).
+ expectFail bool
}
-// startTest
-func startTest(dir, gofile string) *test {
- t := &test{
- dir: dir,
- gofile: gofile,
- donec: make(chan bool, 1),
+// initExpectFail initializes t.expectFail based on the build+test
+// configuration. It should only be called for tests known to use
+// types2.
+func (t *test) initExpectFail() {
+ if *force {
+ return
}
- if toRun == nil {
- toRun = make(chan *test, maxTests)
- go runTests()
+
+ failureSets := []map[string]bool{types2Failures}
+
+ // Note: gccgo supports more 32-bit architectures than this, but
+ // hopefully the 32-bit failures are fixed before this matters.
+ switch goarch {
+ case "386", "arm", "mips", "mipsle":
+ failureSets = append(failureSets, types2Failures32Bit)
}
- select {
- case toRun <- t:
- default:
- panic("toRun buffer size (maxTests) is too small")
+
+ if unifiedEnabled {
+ failureSets = append(failureSets, unifiedFailures)
+ } else {
+ failureSets = append(failureSets, g3Failures)
+ }
+
+ filename := strings.Replace(t.goFileName(), "\\", "/", -1) // goFileName() uses \ on Windows
+
+ for _, set := range failureSets {
+ if set[filename] {
+ t.expectFail = true
+ return
+ }
}
- return t
+}
+
+func startTests(dir, gofile string, glevels []int) []*test {
+ tests := make([]*test, len(glevels))
+ for i, glevel := range glevels {
+ t := &test{
+ dir: dir,
+ gofile: gofile,
+ glevel: glevel,
+ donec: make(chan bool, 1),
+ }
+ if toRun == nil {
+ toRun = make(chan *test, maxTests)
+ go runTests()
+ }
+ select {
+ case toRun <- t:
+ default:
+ panic("toRun buffer size (maxTests) is too small")
+ }
+ tests[i] = t
+ }
+ return tests
}
// runTests runs tests in parallel, but respecting the order they
@@ -480,12 +579,16 @@ func init() { checkShouldTest() }
// This must match the flags used for building the standard library,
// or else the commands will rebuild any needed packages (like runtime)
// over and over.
-func goGcflags() string {
- return "-gcflags=all=" + os.Getenv("GO_GCFLAGS")
+func (t *test) goGcflags() string {
+ flags := os.Getenv("GO_GCFLAGS")
+ if t.glevel != 0 {
+ flags = fmt.Sprintf("%s -G=%v", flags, t.glevel)
+ }
+ return "-gcflags=all=" + flags
}
-func goGcflagsIsEmpty() bool {
- return "" == os.Getenv("GO_GCFLAGS")
+func (t *test) goGcflagsIsEmpty() bool {
+ return "" == os.Getenv("GO_GCFLAGS") && t.glevel == 0
}
var errTimeout = errors.New("command exceeded time limit")
@@ -541,7 +644,11 @@ func (t *test) run() {
singlefilepkgs := false
setpkgpaths := false
localImports := true
- f := strings.Fields(action)
+ f, err := splitQuoted(action)
+ if err != nil {
+ t.err = fmt.Errorf("invalid test recipe: %v", err)
+ return
+ }
if len(f) > 0 {
action = f[0]
args = f[1:]
@@ -569,6 +676,8 @@ func (t *test) run() {
return
}
+ goexp := env.GOEXPERIMENT
+
// collect flags
for len(args) > 0 && strings.HasPrefix(args[0], "-") {
switch args[0] {
@@ -595,7 +704,11 @@ func (t *test) run() {
}
case "-goexperiment": // set GOEXPERIMENT environment
args = args[1:]
- runenv = append(runenv, "GOEXPERIMENT="+args[0])
+ if goexp != "" {
+ goexp += ","
+ }
+ goexp += args[0]
+ runenv = append(runenv, "GOEXPERIMENT="+goexp)
default:
flags = append(flags, args[0])
@@ -616,6 +729,63 @@ func (t *test) run() {
}
}
+ type Tool int
+
+ const (
+ _ Tool = iota
+ AsmCheck
+ Build
+ Run
+ Compile
+ )
+
+ // validForGLevel reports whether the current test is valid to run
+ // at the specified -G level. If so, it may update flags as
+ // necessary to test with -G.
+ validForGLevel := func(tool Tool) bool {
+ hasGFlag := false
+ for _, flag := range flags {
+ if strings.Contains(flag, "-G") {
+ hasGFlag = true
+ }
+ }
+
+ if hasGFlag && t.glevel != 0 {
+ // test provides explicit -G flag already; don't run again
+ if *verbose {
+ fmt.Printf("excl\t%s\n", t.goFileName())
+ }
+ return false
+ }
+
+ if t.glevel == 0 && !hasGFlag && !unifiedEnabled {
+ // tests should always pass when run w/o types2 (i.e., using the
+ // legacy typechecker).
+ return true
+ }
+
+ t.initExpectFail()
+
+ switch tool {
+ case Build, Run:
+ // ok; handled in goGcflags
+
+ case Compile:
+ if !hasGFlag {
+ flags = append(flags, fmt.Sprintf("-G=%v", t.glevel))
+ }
+
+ default:
+ // we don't know how to add -G for this test yet
+ if *verbose {
+ fmt.Printf("excl\t%s\n", t.goFileName())
+ }
+ return false
+ }
+
+ return true
+ }
+
t.makeTempDir()
if !*keep {
defer os.RemoveAll(t.tempDir)
@@ -692,6 +862,10 @@ func (t *test) run() {
t.err = fmt.Errorf("unimplemented action %q", action)
case "asmcheck":
+ if !validForGLevel(AsmCheck) {
+ return
+ }
+
// Compile Go file and match the generated assembly
// against a set of regexps in comments.
ops := t.wantedAsmOpcodes(long)
@@ -746,6 +920,10 @@ func (t *test) run() {
return
case "errorcheck":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Compile Go file.
// Fail if wantError is true and compilation was successful and vice versa.
// Match errors produced by gc against errors in comments.
@@ -774,72 +952,20 @@ func (t *test) run() {
t.updateErrors(string(out), long)
}
t.err = t.errorCheck(string(out), wantAuto, long, t.gofile)
- if t.err != nil {
- return // don't hide error if run below succeeds
- }
-
- // The following is temporary scaffolding to get types2 typechecker
- // up and running against the existing test cases. The explicitly
- // listed files don't pass yet, usually because the error messages
- // are slightly different (this list is not complete). Any errorcheck
- // tests that require output from analysis phases past initial type-
- // checking are also excluded since these phases are not running yet.
- // We can get rid of this code once types2 is fully plugged in.
-
- // For now we're done when we can't handle the file or some of the flags.
- // The first goal is to eliminate the excluded list; the second goal is to
- // eliminate the flag list.
-
- // Excluded files.
- filename := strings.Replace(t.goFileName(), "\\", "/", -1) // goFileName() uses \ on Windows
- if excluded[filename] {
- if *verbose {
- fmt.Printf("excl\t%s\n", filename)
- }
- return // cannot handle file yet
- }
- // Excluded flags.
- for _, flag := range flags {
- for _, pattern := range []string{
- "-m",
- } {
- if strings.Contains(flag, pattern) {
- if *verbose {
- fmt.Printf("excl\t%s\t%s\n", filename, flags)
- }
- return // cannot handle flag
- }
- }
+ case "compile":
+ if !validForGLevel(Compile) {
+ return
}
- // Run errorcheck again with -G option (new typechecker).
- cmdline = []string{goTool(), "tool", "compile", "-G=3", "-C", "-e", "-o", "a.o"}
- // No need to add -dynlink even if linkshared if we're just checking for errors...
- cmdline = append(cmdline, flags...)
- cmdline = append(cmdline, long)
- out, err = runcmd(cmdline...)
- if wantError {
- if err == nil {
- t.err = fmt.Errorf("compilation succeeded unexpectedly\n%s", out)
- return
- }
- } else {
- if err != nil {
- t.err = err
- return
- }
- }
- if *updateErrors {
- t.updateErrors(string(out), long)
- }
- t.err = t.errorCheck(string(out), wantAuto, long, t.gofile)
-
- case "compile":
// Compile Go file.
_, t.err = compileFile(runcmd, long, flags)
case "compiledir":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Compile all files in the directory as packages in lexicographic order.
longdir := filepath.Join(cwd, t.goDirName())
pkgs, err := goDirPackages(longdir, singlefilepkgs)
@@ -855,6 +981,10 @@ func (t *test) run() {
}
case "errorcheckdir", "errorcheckandrundir":
+ if !validForGLevel(Compile) {
+ return
+ }
+
flags = append(flags, "-d=panic")
// Compile and errorCheck all files in the directory as packages in lexicographic order.
// If errorcheckdir and wantError, compilation of the last package must fail.
@@ -900,6 +1030,10 @@ func (t *test) run() {
fallthrough
case "rundir":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Compile all files in the directory as packages in lexicographic order.
// In case of errorcheckandrundir, ignore failed compilation of the package before the last.
// Link as if the last file is the main package, run it.
@@ -958,6 +1092,10 @@ func (t *test) run() {
}
case "runindir":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Make a shallow copy of t.goDirName() in its own module and GOPATH, and
// run "go run ." in it. The module path (and hence import path prefix) of
// the copy is equal to the basename of the source directory.
@@ -983,7 +1121,7 @@ func (t *test) run() {
return
}
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -997,13 +1135,21 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "build":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build Go file.
- _, err := runcmd(goTool(), "build", goGcflags(), "-o", "a.exe", long)
+ _, err := runcmd(goTool(), "build", t.goGcflags(), "-o", "a.exe", long)
if err != nil {
t.err = err
}
case "builddir", "buildrundir":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build an executable from all the .go and .s files in a subdirectory.
// Run it and verify its output in the buildrundir case.
longdir := filepath.Join(cwd, t.goDirName())
@@ -1083,10 +1229,14 @@ func (t *test) run() {
}
case "buildrun":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build an executable from Go file, then run it, verify its output.
// Useful for timeout tests where failure mode is infinite loop.
// TODO: not supported on NaCl
- cmd := []string{goTool(), "build", goGcflags(), "-o", "a.exe"}
+ cmd := []string{goTool(), "build", t.goGcflags(), "-o", "a.exe"}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1108,13 +1258,17 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "run":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Run Go file if no special go command flags are provided;
// otherwise build an executable and run it.
// Verify the output.
runInDir = ""
var out []byte
var err error
- if len(flags)+len(args) == 0 && goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS {
+ if len(flags)+len(args) == 0 && t.goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS && goexp == env.GOEXPERIMENT {
// If we're not using special go command flags,
// skip all the go command machinery.
// This avoids any time the go command would
@@ -1136,7 +1290,7 @@ func (t *test) run() {
}
out, err = runcmd(append([]string{exe}, args...)...)
} else {
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1151,6 +1305,10 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "runoutput":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Run Go file and write its output into temporary Go file.
// Run generated Go file and verify its output.
rungatec <- true
@@ -1158,7 +1316,7 @@ func (t *test) run() {
<-rungatec
}()
runInDir = ""
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1173,7 +1331,7 @@ func (t *test) run() {
t.err = fmt.Errorf("write tempfile:%s", err)
return
}
- cmd = []string{goTool(), "run", goGcflags()}
+ cmd = []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1186,10 +1344,14 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "errorcheckoutput":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Run Go file and write its output into temporary Go file.
// Compile and errorCheck generated Go file.
runInDir = ""
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1941,66 +2103,165 @@ func overlayDir(dstRoot, srcRoot string) error {
})
}
+// The following is temporary scaffolding to get types2 typechecker
+// up and running against the existing test cases. The explicitly
+// listed files don't pass yet, usually because the error messages
+// are slightly different (this list is not complete). Any errorcheck
+// tests that require output from analysis phases past initial type-
+// checking are also excluded since these phases are not running yet.
+// We can get rid of this code once types2 is fully plugged in.
+
// List of files that the compiler cannot errorcheck with the new typechecker (compiler -G option).
// Temporary scaffolding until we pass all the tests at which point this map can be removed.
-var excluded = map[string]bool{
- "complit1.go": true, // types2 reports extra errors
- "const2.go": true, // types2 not run after syntax errors
- "ddd1.go": true, // issue #42987
- "directive.go": true, // misplaced compiler directive checks
- "float_lit3.go": true, // types2 reports extra errors
- "import1.go": true, // types2 reports extra errors
- "import5.go": true, // issue #42988
- "import6.go": true, // issue #43109
- "initializerr.go": true, // types2 reports extra errors
- "linkname2.go": true, // error reported by noder (not running for types2 errorcheck test)
- "notinheap.go": true, // types2 doesn't report errors about conversions that are invalid due to //go:notinheap
- "shift1.go": true, // issue #42989
- "typecheck.go": true, // invalid function is not causing errors when called
- "writebarrier.go": true, // correct diagnostics, but different lines (probably irgen's fault)
-
- "fixedbugs/bug176.go": true, // types2 reports all errors (pref: types2)
- "fixedbugs/bug195.go": true, // types2 reports slightly different (but correct) bugs
- "fixedbugs/bug228.go": true, // types2 not run after syntax errors
- "fixedbugs/bug231.go": true, // types2 bug? (same error reported twice)
- "fixedbugs/bug255.go": true, // types2 reports extra errors
- "fixedbugs/bug351.go": true, // types2 reports extra errors
- "fixedbugs/bug374.go": true, // types2 reports extra errors
- "fixedbugs/bug385_32.go": true, // types2 doesn't produce missing error "type .* too large" (32-bit specific)
- "fixedbugs/bug388.go": true, // types2 not run due to syntax errors
- "fixedbugs/bug412.go": true, // types2 produces a follow-on error
-
- "fixedbugs/issue11590.go": true, // types2 doesn't report a follow-on error (pref: types2)
- "fixedbugs/issue11610.go": true, // types2 not run after syntax errors
- "fixedbugs/issue11614.go": true, // types2 reports an extra error
- "fixedbugs/issue13415.go": true, // declared but not used conflict
- "fixedbugs/issue14520.go": true, // missing import path error by types2
- "fixedbugs/issue16428.go": true, // types2 reports two instead of one error
- "fixedbugs/issue17038.go": true, // types2 doesn't report a follow-on error (pref: types2)
- "fixedbugs/issue17645.go": true, // multiple errors on same line
- "fixedbugs/issue18331.go": true, // missing error about misuse of //go:noescape (irgen needs code from noder)
- "fixedbugs/issue18393.go": true, // types2 not run after syntax errors
- "fixedbugs/issue19012.go": true, // multiple errors on same line
- "fixedbugs/issue20233.go": true, // types2 reports two instead of one error (pref: compiler)
- "fixedbugs/issue20245.go": true, // types2 reports two instead of one error (pref: compiler)
- "fixedbugs/issue20250.go": true, // correct diagnostics, but different lines (probably irgen's fault)
- "fixedbugs/issue21979.go": true, // types2 doesn't report a follow-on error (pref: types2)
- "fixedbugs/issue23732.go": true, // types2 reports different (but ok) line numbers
- "fixedbugs/issue25958.go": true, // types2 doesn't report a follow-on error (pref: types2)
- "fixedbugs/issue28079b.go": true, // types2 reports follow-on errors
- "fixedbugs/issue28268.go": true, // types2 reports follow-on errors
- "fixedbugs/issue33460.go": true, // types2 reports alternative positions in separate error
- "fixedbugs/issue41575.go": true, // types2 reports alternative positions in separate error
- "fixedbugs/issue42058a.go": true, // types2 doesn't report "channel element type too large"
- "fixedbugs/issue42058b.go": true, // types2 doesn't report "channel element type too large"
- "fixedbugs/issue4232.go": true, // types2 reports (correct) extra errors
- "fixedbugs/issue4452.go": true, // types2 reports (correct) extra errors
- "fixedbugs/issue5609.go": true, // types2 needs a better error message
- "fixedbugs/issue6889.go": true, // types2 can handle this without constant overflow
- "fixedbugs/issue7525.go": true, // types2 reports init cycle error on different line - ok otherwise
- "fixedbugs/issue7525b.go": true, // types2 reports init cycle error on different line - ok otherwise
- "fixedbugs/issue7525c.go": true, // types2 reports init cycle error on different line - ok otherwise
- "fixedbugs/issue7525d.go": true, // types2 reports init cycle error on different line - ok otherwise
- "fixedbugs/issue7525e.go": true, // types2 reports init cycle error on different line - ok otherwise
- "fixedbugs/issue46749.go": true, // types2 reports can not convert error instead of type mismatched
+var types2Failures = setOf(
+ "directive.go", // misplaced compiler directive checks
+ "float_lit3.go", // types2 reports extra errors
+ "import1.go", // types2 reports extra errors
+ "import6.go", // issue #43109
+ "initializerr.go", // types2 reports extra errors
+ "linkname2.go", // error reported by noder (not running for types2 errorcheck test)
+ "notinheap.go", // types2 doesn't report errors about conversions that are invalid due to //go:notinheap
+ "shift1.go", // issue #42989
+ "typecheck.go", // invalid function is not causing errors when called
+
+ "interface/private.go", // types2 phrases errors differently (doesn't use non-spec "private" term)
+
+ "fixedbugs/bug176.go", // types2 reports all errors (pref: types2)
+ "fixedbugs/bug195.go", // types2 reports slightly different (but correct) bugs
+ "fixedbugs/bug228.go", // types2 doesn't run when there are syntax errors
+ "fixedbugs/bug231.go", // types2 bug? (same error reported twice)
+ "fixedbugs/bug255.go", // types2 reports extra errors
+ "fixedbugs/bug374.go", // types2 reports extra errors
+ "fixedbugs/bug388.go", // types2 not run due to syntax errors
+ "fixedbugs/bug412.go", // types2 produces a follow-on error
+
+ "fixedbugs/issue10700.go", // types2 reports ok hint, but does not match regexp
+ "fixedbugs/issue11590.go", // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue11610.go", // types2 not run after syntax errors
+ "fixedbugs/issue11614.go", // types2 reports an extra error
+ "fixedbugs/issue14520.go", // missing import path error by types2
+ "fixedbugs/issue16428.go", // types2 reports two instead of one error
+ "fixedbugs/issue17038.go", // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue17645.go", // multiple errors on same line
+ "fixedbugs/issue18331.go", // missing error about misuse of //go:noescape (irgen needs code from noder)
+ "fixedbugs/issue18419.go", // types2 reports
+ "fixedbugs/issue19012.go", // multiple errors on same line
+ "fixedbugs/issue20233.go", // types2 reports two instead of one error (pref: compiler)
+ "fixedbugs/issue20245.go", // types2 reports two instead of one error (pref: compiler)
+ "fixedbugs/issue21979.go", // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue23732.go", // types2 reports different (but ok) line numbers
+ "fixedbugs/issue25958.go", // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue28079b.go", // types2 reports follow-on errors
+ "fixedbugs/issue28268.go", // types2 reports follow-on errors
+ "fixedbugs/issue31053.go", // types2 reports "unknown field" instead of "cannot refer to unexported field"
+ "fixedbugs/issue33460.go", // types2 reports alternative positions in separate error
+ "fixedbugs/issue42058a.go", // types2 doesn't report "channel element type too large"
+ "fixedbugs/issue42058b.go", // types2 doesn't report "channel element type too large"
+ "fixedbugs/issue4232.go", // types2 reports (correct) extra errors
+ "fixedbugs/issue4452.go", // types2 reports (correct) extra errors
+ "fixedbugs/issue4510.go", // types2 reports different (but ok) line numbers
+ "fixedbugs/issue47201.go", // types2 spells the error message differently
+ "fixedbugs/issue5609.go", // types2 needs a better error message
+ "fixedbugs/issue7525b.go", // types2 reports init cycle error on different line - ok otherwise
+ "fixedbugs/issue7525c.go", // types2 reports init cycle error on different line - ok otherwise
+ "fixedbugs/issue7525d.go", // types2 reports init cycle error on different line - ok otherwise
+ "fixedbugs/issue7525e.go", // types2 reports init cycle error on different line - ok otherwise
+ "fixedbugs/issue7525.go", // types2 reports init cycle error on different line - ok otherwise
+)
+
+var types2Failures32Bit = setOf(
+ "printbig.go", // large untyped int passed to print (32-bit)
+ "fixedbugs/bug114.go", // large untyped int passed to println (32-bit)
+ "fixedbugs/issue23305.go", // large untyped int passed to println (32-bit)
+ "fixedbugs/bug385_32.go", // types2 doesn't produce missing error "type .* too large" (32-bit specific)
+)
+
+var g3Failures = setOf(
+ "writebarrier.go", // correct diagnostics, but different lines (probably irgen's fault)
+
+ "fixedbugs/issue30862.go", // -G=3 doesn't handle //go:nointerface
+
+ "typeparam/nested.go", // -G=3 doesn't support function-local types with generics
+
+ "typeparam/mdempsky/4.go", // -G=3 can't export functions with labeled breaks in loops
+ "typeparam/mdempsky/15.go", // ICE in (*irgen).buildClosure
+)
+
+var unifiedFailures = setOf(
+ "closure3.go", // unified IR numbers closures differently than -d=inlfuncswithclosures
+ "escape4.go", // unified IR can inline f5 and f6; test doesn't expect this
+ "inline.go", // unified IR reports function literal diagnostics on different lines than -d=inlfuncswithclosures
+
+ "fixedbugs/issue42284.go", // prints "T(0) does not escape", but test expects "a.I(a.T(0)) does not escape"
+ "fixedbugs/issue7921.go", // prints "… escapes to heap", but test expects "string(…) escapes to heap"
+)
+
+func setOf(keys ...string) map[string]bool {
+ m := make(map[string]bool, len(keys))
+ for _, key := range keys {
+ m[key] = true
+ }
+ return m
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+// [copied from src/go/build/build.go]
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
}
diff --git a/test/typeparam/absdiff.go b/test/typeparam/absdiff.go
index 1381d7c92c..cad6e84c4e 100644
--- a/test/typeparam/absdiff.go
+++ b/test/typeparam/absdiff.go
@@ -12,10 +12,10 @@ import (
)
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
// numericAbs matches numeric types with an Abs method.
@@ -33,14 +33,14 @@ func absDifference[T numericAbs[T]](a, b T) T {
// orderedNumeric matches numeric types that support the < operator.
type orderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// orderedAbs is a helper type that defines an Abs method for
@@ -48,8 +48,7 @@ type Complex interface {
type orderedAbs[T orderedNumeric] T
func (a orderedAbs[T]) Abs() orderedAbs[T] {
- // TODO(danscales): orderedAbs[T] conversion shouldn't be needed
- if a < orderedAbs[T](0) {
+ if a < 0 {
return -a
}
return a
@@ -62,7 +61,7 @@ type complexAbs[T Complex] T
func (a complexAbs[T]) Abs() complexAbs[T] {
r := float64(real(a))
i := float64(imag(a))
- d := math.Sqrt(r * r + i * i)
+ d := math.Sqrt(r*r + i*i)
return complexAbs[T](complex(d, 0))
}
@@ -89,10 +88,10 @@ func main() {
panic(fmt.Sprintf("got = %v, want = %v", got, want))
}
- if got, want := complexAbsDifference(5.0 + 2.0i, 2.0 - 2.0i), 5+0i; got != want {
+ if got, want := complexAbsDifference(5.0+2.0i, 2.0-2.0i), 5+0i; got != want {
panic(fmt.Sprintf("got = %v, want = %v", got, want))
}
- if got, want := complexAbsDifference(2.0 - 2.0i, 5.0 + 2.0i), 5+0i; got != want {
+ if got, want := complexAbsDifference(2.0-2.0i, 5.0+2.0i), 5+0i; got != want {
panic(fmt.Sprintf("got = %v, want = %v", got, want))
}
}
diff --git a/test/typeparam/absdiffimp.dir/a.go b/test/typeparam/absdiffimp.dir/a.go
new file mode 100644
index 0000000000..7b5bfbe2ac
--- /dev/null
+++ b/test/typeparam/absdiffimp.dir/a.go
@@ -0,0 +1,75 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "math"
+)
+
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
+}
+
+// numericAbs matches numeric types with an Abs method.
+type numericAbs[T any] interface {
+ Numeric
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func absDifference[T numericAbs[T]](a, b T) T {
+ d := a - b
+ return d.Abs()
+}
+
+// orderedNumeric matches numeric types that support the < operator.
+type orderedNumeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// Complex matches the two complex types, which do not have a < operator.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// orderedAbs is a helper type that defines an Abs method for
+// ordered numeric types.
+type orderedAbs[T orderedNumeric] T
+
+func (a orderedAbs[T]) Abs() orderedAbs[T] {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
+
+// complexAbs is a helper type that defines an Abs method for
+// complex types.
+type complexAbs[T Complex] T
+
+func (a complexAbs[T]) Abs() complexAbs[T] {
+ r := float64(real(a))
+ i := float64(imag(a))
+ d := math.Sqrt(r*r + i*i)
+ return complexAbs[T](complex(d, 0))
+}
+
+// OrderedAbsDifference returns the absolute value of the difference
+// between a and b, where a and b are of an ordered type.
+func OrderedAbsDifference[T orderedNumeric](a, b T) T {
+ return T(absDifference(orderedAbs[T](a), orderedAbs[T](b)))
+}
+
+// ComplexAbsDifference returns the absolute value of the difference
+// between a and b, where a and b are of a complex type.
+func ComplexAbsDifference[T Complex](a, b T) T {
+ return T(absDifference(complexAbs[T](a), complexAbs[T](b)))
+}
diff --git a/test/typeparam/absdiffimp.dir/main.go b/test/typeparam/absdiffimp.dir/main.go
new file mode 100644
index 0000000000..8eefdbdf38
--- /dev/null
+++ b/test/typeparam/absdiffimp.dir/main.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ if got, want := a.OrderedAbsDifference(1.0, -2.0), 3.0; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.OrderedAbsDifference(-1.0, 2.0), 3.0; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.OrderedAbsDifference(-20, 15), 35; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+
+ if got, want := a.ComplexAbsDifference(5.0+2.0i, 2.0-2.0i), 5+0i; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.ComplexAbsDifference(2.0-2.0i, 5.0+2.0i), 5+0i; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+}
diff --git a/test/typeparam/absdiffimp.go b/test/typeparam/absdiffimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/absdiffimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/adder.go b/test/typeparam/adder.go
index 0c25ad4ef2..79319bd236 100644
--- a/test/typeparam/adder.go
+++ b/test/typeparam/adder.go
@@ -11,19 +11,19 @@ import (
)
type AddType interface {
- type int, int64, string
+ int | int64 | string
}
-// _Add can add numbers or strings
-func _Add[T AddType](a, b T) T {
+// Add can add numbers or strings
+func Add[T AddType](a, b T) T {
return a + b
}
func main() {
- if got, want := _Add(5, 3), 8; got != want {
+ if got, want := Add(5, 3), 8; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- if got, want := _Add("ab", "cd"), "abcd"; got != want {
+ if got, want := Add("ab", "cd"), "abcd"; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
diff --git a/test/typeparam/aliasimp.dir/a.go b/test/typeparam/aliasimp.dir/a.go
new file mode 100644
index 0000000000..c64e87c10f
--- /dev/null
+++ b/test/typeparam/aliasimp.dir/a.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Rimp[T any] struct {
+ F T
+}
diff --git a/test/typeparam/aliasimp.dir/main.go b/test/typeparam/aliasimp.dir/main.go
new file mode 100644
index 0000000000..221a6c758d
--- /dev/null
+++ b/test/typeparam/aliasimp.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "a"
+
+type R[T any] struct {
+ F T
+}
+
+type S = R
+
+type Sint = R[int]
+
+type Simp = a.Rimp
+
+type SimpString Simp[string]
+
+func main() {
+ var s S[int]
+ if s.F != 0 {
+ panic(s.F)
+ }
+ var s2 Sint
+ if s2.F != 0 {
+ panic(s2.F)
+ }
+ var s3 Simp[string]
+ if s3.F != "" {
+ panic(s3.F)
+ }
+ var s4 SimpString
+ if s4.F != "" {
+ panic(s4.F)
+ }
+}
diff --git a/test/typeparam/aliasimp.go b/test/typeparam/aliasimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/aliasimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/boundmethod.go b/test/typeparam/boundmethod.go
new file mode 100644
index 0000000000..22f416422d
--- /dev/null
+++ b/test/typeparam/boundmethod.go
@@ -0,0 +1,106 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test illustrates how a type bound method (String below) can be implemented
+// either by a concrete type (myint below) or a instantiated generic type
+// (StringInt[myint] below).
+
+package main
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+type myint int
+
+//go:noinline
+func (m myint) String() string {
+ return strconv.Itoa(int(m))
+}
+
+type Stringer interface {
+ String() string
+}
+
+func stringify[T Stringer](s []T) (ret []string) {
+ for _, v := range s {
+ // Test normal bounds method call on type param
+ x1 := v.String()
+
+ // Test converting type param to its bound interface first
+ v1 := Stringer(v)
+ x2 := v1.String()
+
+ // Test method expression with type param type
+ f1 := T.String
+ x3 := f1(v)
+
+ // Test creating and calling closure equivalent to the method expression
+ f2 := func(v1 T) string {
+ return Stringer(v1).String()
+ }
+ x4 := f2(v)
+
+ if x1 != x2 || x2 != x3 || x3 != x4 {
+ panic(fmt.Sprintf("Mismatched values %v, %v, %v, %v\n", x1, x2, x3, x4))
+ }
+
+ ret = append(ret, v.String())
+ }
+ return ret
+}
+
+type Ints interface {
+ ~int32 | ~int
+}
+
+type StringInt[T Ints] T
+
+//go:noinline
+func (m StringInt[T]) String() string {
+ return strconv.Itoa(int(m))
+}
+
+type StringStruct[T Ints] struct {
+ f T
+}
+
+func (m StringStruct[T]) String() string {
+ return strconv.Itoa(int(m.f))
+}
+
+func main() {
+ x := []myint{myint(1), myint(2), myint(3)}
+
+ // stringify on a normal type, whose bound method is associated with the base type.
+ got := stringify(x)
+ want := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got, want) {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+
+ x2 := []StringInt[myint]{StringInt[myint](5), StringInt[myint](7), StringInt[myint](6)}
+
+ // stringify on an instantiated type, whose bound method is associated with
+ // the generic type StringInt[T], which maps directly to T.
+ got2 := stringify(x2)
+ want2 := []string{ "5", "7", "6" }
+ if !reflect.DeepEqual(got2, want2) {
+ panic(fmt.Sprintf("got %s, want %s", got2, want2))
+ }
+
+ // stringify on an instantiated type, whose bound method is associated with
+ // the generic type StringStruct[T], which maps to a struct containing T.
+ x3 := []StringStruct[myint]{StringStruct[myint]{f: 11}, StringStruct[myint]{f: 10}, StringStruct[myint]{f: 9}}
+
+ got3 := stringify(x3)
+ want3 := []string{ "11", "10", "9" }
+ if !reflect.DeepEqual(got3, want3) {
+ panic(fmt.Sprintf("got %s, want %s", got3, want3))
+ }
+}
diff --git a/test/typeparam/builtins.go b/test/typeparam/builtins.go
new file mode 100644
index 0000000000..844cdae8ab
--- /dev/null
+++ b/test/typeparam/builtins.go
@@ -0,0 +1,111 @@
+// compile -G=3
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests built-in calls on generic types.
+
+// derived and expanded from cmd/compile/internal/types2/testdata/check/builtins.go2
+
+package builtins
+
+// close
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func f1[T C1](ch T) {
+ close(ch)
+}
+
+func f2[T C3](ch T) {
+ close(ch)
+}
+
+func f3[T C4](ch T) {
+ close(ch)
+}
+
+func f4[T C5[X], X any](ch T) {
+ close(ch)
+}
+
+// delete
+
+type M0 interface{ int }
+type M1 interface{ map[string]int }
+type M2 interface {
+ map[string]int | map[string]float64
+}
+type M3 interface{ map[string]int | map[rune]int }
+type M4[K comparable, V any] interface{ map[K]V | map[rune]V }
+
+func g1[T M1](m T) {
+ delete(m, "foo")
+}
+
+func g2[T M2](m T) {
+ delete(m, "foo")
+}
+
+func g3[T M4[rune, V], V any](m T) {
+ delete(m, 'k')
+}
+
+// make
+
+func m1[
+ S1 interface{ []int },
+ S2 interface{ []int | chan int },
+
+ M1 interface{ map[string]int },
+ M2 interface{ map[string]int | chan int },
+
+ C1 interface{ chan int },
+ C2 interface{ chan int | chan string },
+]() {
+ type S0 []int
+ _ = make([]int, 10)
+ _ = make(S0, 10)
+ _ = make(S1, 10)
+ _ = make(S1, 10, 20)
+
+ type M0 map[string]int
+ _ = make(map[string]int)
+ _ = make(M0)
+ _ = make(M1)
+ _ = make(M1, 10)
+
+ type C0 chan int
+ _ = make(chan int)
+ _ = make(C0)
+ _ = make(C1)
+ _ = make(C1, 10)
+}
+
+// len/cap
+
+type Slice[T any] interface {
+ []T
+}
+
+func c1[T any, S Slice[T]]() {
+ x := make(S, 5, 10)
+ _ = len(x)
+ _ = cap(x)
+}
+
+// append
+
+func a1[T any, S Slice[T]]() {
+ x := make(S, 5)
+ y := make(S, 2)
+ var z T
+ _ = append(x, y...)
+ _ = append(x, z)
+}
diff --git a/test/typeparam/chans.go b/test/typeparam/chans.go
index 2fcd4af75e..c30c21c37b 100644
--- a/test/typeparam/chans.go
+++ b/test/typeparam/chans.go
@@ -183,7 +183,7 @@ func _Ranger[Elem any]() (*_Sender[Elem], *_Receiver[Elem]) {
values: c,
done: d,
}
- r := &_Receiver[Elem] {
+ r := &_Receiver[Elem]{
values: c,
done: d,
}
diff --git a/test/typeparam/chansimp.dir/a.go b/test/typeparam/chansimp.dir/a.go
new file mode 100644
index 0000000000..7321992704
--- /dev/null
+++ b/test/typeparam/chansimp.dir/a.go
@@ -0,0 +1,232 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "context"
+ "runtime"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ReadAll reads from c until the channel is closed or the context is
+// canceled, returning all the values read.
+func ReadAll[Elem any](ctx context.Context, c <-chan Elem) []Elem {
+ var r []Elem
+ for {
+ select {
+ case <-ctx.Done():
+ return r
+ case v, ok := <-c:
+ if !ok {
+ return r
+ }
+ r = append(r, v)
+ }
+ }
+}
+
+// Merge merges two channels into a single channel.
+// This will leave a goroutine running until either both channels are closed
+// or the context is canceled, at which point the returned channel is closed.
+func Merge[Elem any](ctx context.Context, c1, c2 <-chan Elem) <-chan Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, c1, c2 <-chan Elem, r chan<- Elem) {
+ defer close(r)
+ for c1 != nil || c2 != nil {
+ select {
+ case <-ctx.Done():
+ return
+ case v1, ok := <-c1:
+ if ok {
+ r <- v1
+ } else {
+ c1 = nil
+ }
+ case v2, ok := <-c2:
+ if ok {
+ r <- v2
+ } else {
+ c2 = nil
+ }
+ }
+ }
+ }(ctx, c1, c2, r)
+ return r
+}
+
+// Filter calls f on each value read from c. If f returns true the value
+// is sent on the returned channel. This will leave a goroutine running
+// until c is closed or the context is canceled, at which point the
+// returned channel is closed.
+func Filter[Elem any](ctx context.Context, c <-chan Elem, f func(Elem) bool) <-chan Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, c <-chan Elem, f func(Elem) bool, r chan<- Elem) {
+ defer close(r)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case v, ok := <-c:
+ if !ok {
+ return
+ }
+ if f(v) {
+ r <- v
+ }
+ }
+ }
+ }(ctx, c, f, r)
+ return r
+}
+
+// Sink returns a channel that discards all values sent to it.
+// This will leave a goroutine running until the context is canceled
+// or the returned channel is closed.
+func Sink[Elem any](ctx context.Context) chan<- Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, r <-chan Elem) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case _, ok := <-r:
+ if !ok {
+ return
+ }
+ }
+ }
+ }(ctx, r)
+ return r
+}
+
+// An Exclusive is a value that may only be used by a single goroutine
+// at a time. This is implemented using channels rather than a mutex.
+type Exclusive[Val any] struct {
+ c chan Val
+}
+
+// MakeExclusive makes an initialized exclusive value.
+func MakeExclusive[Val any](initial Val) *Exclusive[Val] {
+ r := &Exclusive[Val]{
+ c: make(chan Val, 1),
+ }
+ r.c <- initial
+ return r
+}
+
+// Acquire acquires the exclusive value for private use.
+// It must be released using the Release method.
+func (e *Exclusive[Val]) Acquire() Val {
+ return <-e.c
+}
+
+// TryAcquire attempts to acquire the value. The ok result reports whether
+// the value was acquired. If the value is acquired, it must be released
+// using the Release method.
+func (e *Exclusive[Val]) TryAcquire() (v Val, ok bool) {
+ select {
+ case r := <-e.c:
+ return r, true
+ default:
+ return v, false
+ }
+}
+
+// Release updates and releases the value.
+// This method panics if the value has not been acquired.
+func (e *Exclusive[Val]) Release(v Val) {
+ select {
+ case e.c <- v:
+ default:
+ panic("Exclusive Release without Acquire")
+ }
+}
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[Elem any]() (*Sender[Elem], *Receiver[Elem]) {
+ c := make(chan Elem)
+ d := make(chan struct{})
+ s := &Sender[Elem]{
+ values: c,
+ done: d,
+ }
+ r := &Receiver[Elem]{
+ values: c,
+ done: d,
+ }
+ runtime.SetFinalizer(r, (*Receiver[Elem]).finalize)
+ return s, r
+}
+
+// A Sender is used to send values to a Receiver.
+type Sender[Elem any] struct {
+ values chan<- Elem
+ done <-chan struct{}
+}
+
+// Send sends a value to the receiver. It reports whether the value was sent.
+// The value will not be sent if the context is closed or the receiver
+// is freed.
+func (s *Sender[Elem]) Send(ctx context.Context, v Elem) bool {
+ select {
+ case <-ctx.Done():
+ return false
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[Elem]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[Elem any] struct {
+ values <-chan Elem
+ done chan<- struct{}
+}
+
+// Next returns the next value from the channel. The bool result indicates
+// whether the value is valid.
+func (r *Receiver[Elem]) Next(ctx context.Context) (v Elem, ok bool) {
+ select {
+ case <-ctx.Done():
+ case v, ok = <-r.values:
+ }
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[Elem]) finalize() {
+ close(r.done)
+}
diff --git a/test/typeparam/chansimp.dir/main.go b/test/typeparam/chansimp.dir/main.go
new file mode 100644
index 0000000000..ca27167598
--- /dev/null
+++ b/test/typeparam/chansimp.dir/main.go
@@ -0,0 +1,189 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "context"
+ "fmt"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+)
+
+func TestReadAll() {
+ c := make(chan int)
+ go func() {
+ c <- 4
+ c <- 2
+ c <- 5
+ close(c)
+ }()
+ got := a.ReadAll(context.Background(), c)
+ want := []int{4, 2, 5}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("ReadAll returned %v, want %v", got, want))
+ }
+}
+
+func TestMerge() {
+ c1 := make(chan int)
+ c2 := make(chan int)
+ go func() {
+ c1 <- 1
+ c1 <- 3
+ c1 <- 5
+ close(c1)
+ }()
+ go func() {
+ c2 <- 2
+ c2 <- 4
+ c2 <- 6
+ close(c2)
+ }()
+ ctx := context.Background()
+ got := a.ReadAll(ctx, a.Merge(ctx, c1, c2))
+ sort.Ints(got)
+ want := []int{1, 2, 3, 4, 5, 6}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Merge returned %v, want %v", got, want))
+ }
+}
+
+func TestFilter() {
+ c := make(chan int)
+ go func() {
+ c <- 1
+ c <- 2
+ c <- 3
+ close(c)
+ }()
+ even := func(i int) bool { return i%2 == 0 }
+ ctx := context.Background()
+ got := a.ReadAll(ctx, a.Filter(ctx, c, even))
+ want := []int{2}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Filter returned %v, want %v", got, want))
+ }
+}
+
+func TestSink() {
+ c := a.Sink[int](context.Background())
+ after := time.NewTimer(time.Minute)
+ defer after.Stop()
+ send := func(v int) {
+ select {
+ case c <- v:
+ case <-after.C:
+ panic("timed out sending to Sink")
+ }
+ }
+ send(1)
+ send(2)
+ send(3)
+ close(c)
+}
+
+func TestExclusive() {
+ val := 0
+ ex := a.MakeExclusive(&val)
+
+ var wg sync.WaitGroup
+ f := func() {
+ defer wg.Done()
+ for i := 0; i < 10; i++ {
+ p := ex.Acquire()
+ (*p)++
+ ex.Release(p)
+ }
+ }
+
+ wg.Add(2)
+ go f()
+ go f()
+
+ wg.Wait()
+ if val != 20 {
+ panic(fmt.Sprintf("after Acquire/Release loop got %d, want 20", val))
+ }
+}
+
+func TestExclusiveTry() {
+ s := ""
+ ex := a.MakeExclusive(&s)
+ p, ok := ex.TryAcquire()
+ if !ok {
+ panic("TryAcquire failed")
+ }
+ *p = "a"
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, ok := ex.TryAcquire()
+ if ok {
+ panic(fmt.Sprintf("TryAcquire succeeded unexpectedly"))
+ }
+ }()
+ wg.Wait()
+
+ ex.Release(p)
+
+ p, ok = ex.TryAcquire()
+ if !ok {
+ panic(fmt.Sprintf("TryAcquire failed"))
+ }
+}
+
+func TestRanger() {
+ s, r := a.Ranger[int]()
+
+ ctx := context.Background()
+ go func() {
+ // Receive one value then exit.
+ v, ok := r.Next(ctx)
+ if !ok {
+ panic(fmt.Sprintf("did not receive any values"))
+ } else if v != 1 {
+ panic(fmt.Sprintf("received %d, want 1", v))
+ }
+ }()
+
+ c1 := make(chan bool)
+ c2 := make(chan bool)
+ go func() {
+ defer close(c2)
+ if !s.Send(ctx, 1) {
+ panic(fmt.Sprintf("Send failed unexpectedly"))
+ }
+ close(c1)
+ if s.Send(ctx, 2) {
+ panic(fmt.Sprintf("Send succeeded unexpectedly"))
+ }
+ }()
+
+ <-c1
+
+ // Force a garbage collection to try to get the finalizers to run.
+ runtime.GC()
+
+ select {
+ case <-c2:
+ case <-time.After(time.Minute):
+ panic("Ranger Send should have failed, but timed out")
+ }
+}
+
+func main() {
+ TestReadAll()
+ TestMerge()
+ TestFilter()
+ TestSink()
+ TestExclusive()
+ TestExclusiveTry()
+ TestRanger()
+}
diff --git a/test/typeparam/chansimp.go b/test/typeparam/chansimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/chansimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/combine.go b/test/typeparam/combine.go
index d4a2988a7b..5dfdb78442 100644
--- a/test/typeparam/combine.go
+++ b/test/typeparam/combine.go
@@ -10,56 +10,56 @@ import (
"fmt"
)
-type _Gen[A any] func() (A, bool)
+type Gen[A any] func() (A, bool)
-func combine[T1, T2, T any](g1 _Gen[T1], g2 _Gen[T2], join func(T1, T2) T) _Gen[T] {
- return func() (T, bool) {
- var t T
- t1, ok := g1()
- if !ok {
- return t, false
- }
- t2, ok := g2()
- if !ok {
- return t, false
- }
- return join(t1, t2), true
- }
+func Combine[T1, T2, T any](g1 Gen[T1], g2 Gen[T2], join func(T1, T2) T) Gen[T] {
+ return func() (T, bool) {
+ var t T
+ t1, ok := g1()
+ if !ok {
+ return t, false
+ }
+ t2, ok := g2()
+ if !ok {
+ return t, false
+ }
+ return join(t1, t2), true
+ }
}
-type _Pair[A, B any] struct {
+type Pair[A, B any] struct {
A A
B B
}
-func _NewPair[A, B any](a A, b B) _Pair[A, B] {
- return _Pair[A, B]{a, b}
+func _NewPair[A, B any](a A, b B) Pair[A, B] {
+ return Pair[A, B]{a, b}
}
-func _Combine2[A, B any](ga _Gen[A], gb _Gen[B]) _Gen[_Pair[A, B]] {
- return combine(ga, gb, _NewPair[A, B])
+func Combine2[A, B any](ga Gen[A], gb Gen[B]) Gen[Pair[A, B]] {
+ return Combine(ga, gb, _NewPair[A, B])
}
func main() {
- var g1 _Gen[int] = func() (int, bool) { return 3, true }
- var g2 _Gen[string] = func() (string, bool) { return "x", false }
- var g3 _Gen[string] = func() (string, bool) { return "y", true }
+ var g1 Gen[int] = func() (int, bool) { return 3, true }
+ var g2 Gen[string] = func() (string, bool) { return "x", false }
+ var g3 Gen[string] = func() (string, bool) { return "y", true }
- gc := combine(g1, g2, _NewPair[int, string])
+ gc := Combine(g1, g2, _NewPair[int, string])
if got, ok := gc(); ok {
panic(fmt.Sprintf("got %v, %v, wanted -/false", got, ok))
}
- gc2 := _Combine2(g1, g2)
+ gc2 := Combine2(g1, g2)
if got, ok := gc2(); ok {
panic(fmt.Sprintf("got %v, %v, wanted -/false", got, ok))
}
- gc3 := combine(g1, g3, _NewPair[int, string])
+ gc3 := Combine(g1, g3, _NewPair[int, string])
if got, ok := gc3(); !ok || got.A != 3 || got.B != "y" {
panic(fmt.Sprintf("got %v, %v, wanted {3, y}, true", got, ok))
}
- gc4 := _Combine2(g1, g3)
+ gc4 := Combine2(g1, g3)
if got, ok := gc4(); !ok || got.A != 3 || got.B != "y" {
- panic (fmt.Sprintf("got %v, %v, wanted {3, y}, true", got, ok))
+ panic(fmt.Sprintf("got %v, %v, wanted {3, y}, true", got, ok))
}
}
diff --git a/test/typeparam/cons.go b/test/typeparam/cons.go
index 8d255ebdb8..4750392a95 100644
--- a/test/typeparam/cons.go
+++ b/test/typeparam/cons.go
@@ -12,7 +12,7 @@ import "fmt"
// argument
type any interface{}
-type _Function[a, b any] interface {
+type Function[a, b any] interface {
Apply(x a) b
}
@@ -29,8 +29,8 @@ func (this pos) Apply(x int) bool {
}
type compose[a, b, c any] struct {
- f _Function[a, b]
- g _Function[b, c]
+ f Function[a, b]
+ g Function[b, c]
}
func (this compose[a, b, c]) Apply(x a) c {
@@ -47,52 +47,52 @@ func (this Int) Equal(that int) bool {
return int(this) == that
}
-type _List[a any] interface {
- Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any
+type List[a any] interface {
+ Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any
}
-type _Nil[a any] struct{
+type Nil[a any] struct {
}
-func (xs _Nil[a]) Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any {
+func (xs Nil[a]) Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any {
return casenil.Apply(xs)
}
-type _Cons[a any] struct {
+type Cons[a any] struct {
Head a
- Tail _List[a]
+ Tail List[a]
}
-func (xs _Cons[a]) Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any {
+func (xs Cons[a]) Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any {
return casecons.Apply(xs)
}
-type mapNil[a, b any] struct{
+type mapNil[a, b any] struct {
}
-func (m mapNil[a, b]) Apply(_ _Nil[a]) any {
- return _Nil[b]{}
+func (m mapNil[a, b]) Apply(_ Nil[a]) any {
+ return Nil[b]{}
}
type mapCons[a, b any] struct {
- f _Function[a, b]
+ f Function[a, b]
}
-func (m mapCons[a, b]) Apply(xs _Cons[a]) any {
- return _Cons[b]{m.f.Apply(xs.Head), _Map[a, b](m.f, xs.Tail)}
+func (m mapCons[a, b]) Apply(xs Cons[a]) any {
+ return Cons[b]{m.f.Apply(xs.Head), Map[a, b](m.f, xs.Tail)}
}
-func _Map[a, b any](f _Function[a, b], xs _List[a]) _List[b] {
- return xs.Match(mapNil[a, b]{}, mapCons[a, b]{f}).(_List[b])
+func Map[a, b any](f Function[a, b], xs List[a]) List[b] {
+ return xs.Match(mapNil[a, b]{}, mapCons[a, b]{f}).(List[b])
}
func main() {
- var xs _List[int] = _Cons[int]{3, _Cons[int]{6, _Nil[int]{}}}
- var ys _List[int] = _Map[int, int](incr{-5}, xs)
- var xz _List[bool] = _Map[int, bool](pos{}, ys)
- cs1 := xz.(_Cons[bool])
- cs2 := cs1.Tail.(_Cons[bool])
- _, ok := cs2.Tail.(_Nil[bool])
+ var xs List[int] = Cons[int]{3, Cons[int]{6, Nil[int]{}}}
+ var ys List[int] = Map[int, int](incr{-5}, xs)
+ var xz List[bool] = Map[int, bool](pos{}, ys)
+ cs1 := xz.(Cons[bool])
+ cs2 := cs1.Tail.(Cons[bool])
+ _, ok := cs2.Tail.(Nil[bool])
if cs1.Head != false || cs2.Head != true || !ok {
panic(fmt.Sprintf("got %v, %v, %v, expected false, true, true",
cs1.Head, cs2.Head, ok))
diff --git a/test/typeparam/dedup.dir/a.go b/test/typeparam/dedup.dir/a.go
new file mode 100644
index 0000000000..f5cb6dc762
--- /dev/null
+++ b/test/typeparam/dedup.dir/a.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+//go:noinline
+func F[T comparable](a, b T) bool {
+ return a == b
+}
diff --git a/test/typeparam/dedup.dir/b.go b/test/typeparam/dedup.dir/b.go
new file mode 100644
index 0000000000..ce037e2d8a
--- /dev/null
+++ b/test/typeparam/dedup.dir/b.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "a"
+
+func B() {
+ var x int64
+ println(a.F(&x, &x))
+ var y int32
+ println(a.F(&y, &y))
+}
diff --git a/test/typeparam/dedup.dir/c.go b/test/typeparam/dedup.dir/c.go
new file mode 100644
index 0000000000..11a5d97642
--- /dev/null
+++ b/test/typeparam/dedup.dir/c.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package c
+
+import "a"
+
+func C() {
+ var x int64
+ println(a.F(&x, &x))
+ var y int32
+ println(a.F(&y, &y))
+}
diff --git a/test/typeparam/dedup.dir/main.go b/test/typeparam/dedup.dir/main.go
new file mode 100644
index 0000000000..dc3ff6f75f
--- /dev/null
+++ b/test/typeparam/dedup.dir/main.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "b"
+ "c"
+)
+
+func main() {
+ b.B()
+ c.C()
+}
diff --git a/test/typeparam/dedup.go b/test/typeparam/dedup.go
new file mode 100644
index 0000000000..dca4cf3a84
--- /dev/null
+++ b/test/typeparam/dedup.go
@@ -0,0 +1,12 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Note: this doesn't really test the deduplication of
+// instantiations. It just provides an easy mechanism to build a
+// binary that you can then check with objdump manually to make sure
+// deduplication is happening. TODO: automate this somehow?
+
+package ignored
diff --git a/test/typeparam/dedup.out b/test/typeparam/dedup.out
new file mode 100644
index 0000000000..1140ff52e2
--- /dev/null
+++ b/test/typeparam/dedup.out
@@ -0,0 +1,4 @@
+true
+true
+true
+true
diff --git a/test/typeparam/dictionaryCapture-noinline.go b/test/typeparam/dictionaryCapture-noinline.go
new file mode 100644
index 0000000000..ad5bfa008e
--- /dev/null
+++ b/test/typeparam/dictionaryCapture-noinline.go
@@ -0,0 +1,126 @@
+// run -gcflags="-G=3 -l"
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test situations where functions/methods are not
+// immediately called and we need to capture the dictionary
+// required for later invocation.
+
+package main
+
+func main() {
+ functions()
+ methodExpressions()
+ methodValues()
+ interfaceMethods()
+ globals()
+}
+
+func g0[T any](x T) {
+}
+func g1[T any](x T) T {
+ return x
+}
+func g2[T any](x T) (T, T) {
+ return x, x
+}
+
+func functions() {
+ f0 := g0[int]
+ f0(7)
+ f1 := g1[int]
+ is7(f1(7))
+ f2 := g2[int]
+ is77(f2(7))
+}
+
+func is7(x int) {
+ if x != 7 {
+ println(x)
+ panic("assertion failed")
+ }
+}
+func is77(x, y int) {
+ if x != 7 || y != 7 {
+ println(x, y)
+ panic("assertion failed")
+ }
+}
+
+type s[T any] struct {
+ a T
+}
+
+func (x s[T]) g0() {
+}
+func (x s[T]) g1() T {
+ return x.a
+}
+func (x s[T]) g2() (T, T) {
+ return x.a, x.a
+}
+
+func methodExpressions() {
+ x := s[int]{a: 7}
+ f0 := s[int].g0
+ f0(x)
+ f1 := s[int].g1
+ is7(f1(x))
+ f2 := s[int].g2
+ is77(f2(x))
+}
+
+func methodValues() {
+ x := s[int]{a: 7}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ is7(f1())
+ f2 := x.g2
+ is77(f2())
+}
+
+var x interface {
+ g0()
+ g1() int
+ g2() (int, int)
+} = s[int]{a: 7}
+var y interface{} = s[int]{a: 7}
+
+func interfaceMethods() {
+ x.g0()
+ is7(x.g1())
+ is77(x.g2())
+ y.(interface{ g0() }).g0()
+ is7(y.(interface{ g1() int }).g1())
+ is77(y.(interface{ g2() (int, int) }).g2())
+}
+
+// Also check for instantiations outside functions.
+var gg0 = g0[int]
+var gg1 = g1[int]
+var gg2 = g2[int]
+
+var hh0 = s[int].g0
+var hh1 = s[int].g1
+var hh2 = s[int].g2
+
+var xtop = s[int]{a: 7}
+var ii0 = x.g0
+var ii1 = x.g1
+var ii2 = x.g2
+
+func globals() {
+ gg0(7)
+ is7(gg1(7))
+ is77(gg2(7))
+ x := s[int]{a: 7}
+ hh0(x)
+ is7(hh1(x))
+ is77(hh2(x))
+ ii0()
+ is7(ii1())
+ is77(ii2())
+}
diff --git a/test/typeparam/dictionaryCapture.go b/test/typeparam/dictionaryCapture.go
new file mode 100644
index 0000000000..7c7948145a
--- /dev/null
+++ b/test/typeparam/dictionaryCapture.go
@@ -0,0 +1,203 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test situations where functions/methods are not
+// immediately called and we need to capture the dictionary
+// required for later invocation.
+
+package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ functions()
+ methodExpressions()
+ genMethodExpressions[int](7)
+ methodValues()
+ genMethodValues[int](7)
+ interfaceMethods()
+ globals()
+ recursive()
+}
+
+func g0[T any](x T) {
+}
+func g1[T any](x T) T {
+ return x
+}
+func g2[T any](x T) (T, T) {
+ return x, x
+}
+
+func functions() {
+ f0 := g0[int]
+ f0(7)
+ f1 := g1[int]
+ is7(f1(7))
+ f2 := g2[int]
+ is77(f2(7))
+}
+
+func is7(x int) {
+ if x != 7 {
+ println(x)
+ panic("assertion failed")
+ }
+}
+func is77(x, y int) {
+ if x != 7 || y != 7 {
+ println(x, y)
+ panic("assertion failed")
+ }
+}
+
+type s[T any] struct {
+ a T
+}
+
+func (x s[T]) g0() {
+}
+func (x s[T]) g1() T {
+ return x.a
+}
+func (x s[T]) g2() (T, T) {
+ return x.a, x.a
+}
+
+func methodExpressions() {
+ x := s[int]{a: 7}
+ f0 := s[int].g0
+ f0(x)
+ f0p := (*s[int]).g0
+ f0p(&x)
+ f1 := s[int].g1
+ is7(f1(x))
+ f1p := (*s[int]).g1
+ is7(f1p(&x))
+ f2 := s[int].g2
+ is77(f2(x))
+ f2p := (*s[int]).g2
+ is77(f2p(&x))
+}
+
+func genMethodExpressions[T comparable](want T) {
+ x := s[T]{a: want}
+ f0 := s[T].g0
+ f0(x)
+ f0p := (*s[T]).g0
+ f0p(&x)
+ f1 := s[T].g1
+ if got := f1(x); got != want {
+ panic(fmt.Sprintf("f1(x) == %d, want %d", got, want))
+ }
+ f1p := (*s[T]).g1
+ if got := f1p(&x); got != want {
+ panic(fmt.Sprintf("f1p(&x) == %d, want %d", got, want))
+ }
+ f2 := s[T].g2
+ if got1, got2 := f2(x); got1 != want || got2 != want {
+ panic(fmt.Sprintf("f2(x) == %d, %d, want %d, %d", got1, got2, want, want))
+ }
+}
+
+func methodValues() {
+ x := s[int]{a: 7}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ is7(f1())
+ f2 := x.g2
+ is77(f2())
+}
+
+func genMethodValues[T comparable](want T) {
+ x := s[T]{a: want}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ if got := f1(); got != want {
+ panic(fmt.Sprintf("f1() == %d, want %d", got, want))
+ }
+ f2 := x.g2
+ if got1, got2 := f2(); got1 != want || got2 != want {
+ panic(fmt.Sprintf("f2() == %d, %d, want %d, %d", got1, got2, want, want))
+ }
+}
+
+var x interface {
+ g0()
+ g1() int
+ g2() (int, int)
+} = s[int]{a: 7}
+var y interface{} = s[int]{a: 7}
+
+func interfaceMethods() {
+ x.g0()
+ is7(x.g1())
+ is77(x.g2())
+ y.(interface{ g0() }).g0()
+ is7(y.(interface{ g1() int }).g1())
+ is77(y.(interface{ g2() (int, int) }).g2())
+}
+
+// Also check for instantiations outside functions.
+var gg0 = g0[int]
+var gg1 = g1[int]
+var gg2 = g2[int]
+
+var hh0 = s[int].g0
+var hh1 = s[int].g1
+var hh2 = s[int].g2
+
+var xtop = s[int]{a: 7}
+var ii0 = x.g0
+var ii1 = x.g1
+var ii2 = x.g2
+
+func globals() {
+ gg0(7)
+ is7(gg1(7))
+ is77(gg2(7))
+ x := s[int]{a: 7}
+ hh0(x)
+ is7(hh1(x))
+ is77(hh2(x))
+ ii0()
+ is7(ii1())
+ is77(ii2())
+}
+
+func recursive() {
+ if got, want := recur1[int](5), 110; got != want {
+ panic(fmt.Sprintf("recur1[int](5) = %d, want = %d", got, want))
+ }
+}
+
+type Integer interface {
+ int | int32 | int64
+}
+
+func recur1[T Integer](n T) T {
+ if n == 0 || n == 1 {
+ return T(1)
+ } else {
+ return n * recur2(n-1)
+ }
+}
+
+func recur2[T Integer](n T) T {
+ list := make([]T, n)
+ for i, _ := range list {
+ list[i] = T(i + 1)
+ }
+ var sum T
+ for _, elt := range list {
+ sum += elt
+ }
+ return sum + recur1(n-1)
+}
diff --git a/test/typeparam/dottype.go b/test/typeparam/dottype.go
new file mode 100644
index 0000000000..c9c900c096
--- /dev/null
+++ b/test/typeparam/dottype.go
@@ -0,0 +1,86 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func f[T any](x interface{}) T {
+ return x.(T)
+}
+func f2[T any](x interface{}) (T, bool) {
+ t, ok := x.(T)
+ return t, ok
+}
+
+type I interface {
+ foo()
+}
+
+type myint int
+
+func (myint) foo() {
+}
+
+type myfloat float64
+
+func (myfloat) foo() {
+}
+
+func g[T I](x I) T {
+ return x.(T)
+}
+func g2[T I](x I) (T, bool) {
+ t, ok := x.(T)
+ return t, ok
+}
+
+func h[T any](x interface{}) struct{ a, b T } {
+ return x.(struct{ a, b T })
+}
+
+func k[T any](x interface{}) interface{ bar() T } {
+ return x.(interface{ bar() T })
+}
+
+type mybar int
+
+func (x mybar) bar() int {
+ return int(x)
+}
+
+func main() {
+ var i interface{} = int(3)
+ var j I = myint(3)
+ var x interface{} = float64(3)
+ var y I = myfloat(3)
+
+ println(f[int](i))
+ shouldpanic(func() { f[int](x) })
+ println(f2[int](i))
+ println(f2[int](x))
+
+ println(g[myint](j))
+ shouldpanic(func() { g[myint](y) })
+ println(g2[myint](j))
+ println(g2[myint](y))
+
+ println(h[int](struct{ a, b int }{3, 5}).a)
+
+ println(k[int](mybar(3)).bar())
+
+ type large struct {a,b,c,d,e,f int}
+ println(f[large](large{}).a)
+ l2, ok := f2[large](large{})
+ println(l2.a, ok)
+}
+func shouldpanic(x func()) {
+ defer func() {
+ e := recover()
+ if e == nil {
+ panic("didn't panic")
+ }
+ }()
+ x()
+}
diff --git a/test/typeparam/dottype.out b/test/typeparam/dottype.out
new file mode 100644
index 0000000000..8e6a3c2552
--- /dev/null
+++ b/test/typeparam/dottype.out
@@ -0,0 +1,10 @@
+3
+3 true
+0 false
+3
+3 true
+0 false
+3
+3
+0
+0 true
diff --git a/test/typeparam/double.go b/test/typeparam/double.go
index ce78ec9748..6ddb6b2d08 100644
--- a/test/typeparam/double.go
+++ b/test/typeparam/double.go
@@ -12,7 +12,7 @@ import (
)
type Number interface {
- type int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64
}
type MySlice []int
@@ -44,29 +44,29 @@ func main() {
want := MySlice{2, 4, 6}
got := _DoubleElems[MySlice, int](arg)
if !reflect.DeepEqual(got, want) {
- panic(fmt.Sprintf("got %s, want %s", got, want))
+ panic(fmt.Sprintf("got %s, want %s", got, want))
}
// constraint type inference
got = _DoubleElems[MySlice](arg)
if !reflect.DeepEqual(got, want) {
- panic(fmt.Sprintf("got %s, want %s", got, want))
+ panic(fmt.Sprintf("got %s, want %s", got, want))
}
got = _DoubleElems(arg)
if !reflect.DeepEqual(got, want) {
- panic(fmt.Sprintf("got %s, want %s", got, want))
+ panic(fmt.Sprintf("got %s, want %s", got, want))
}
farg := MyFloatSlice{1.2, 2.0, 3.5}
fwant := MyFloatSlice{2.4, 4.0, 7.0}
fgot := _DoubleElems(farg)
if !reflect.DeepEqual(fgot, fwant) {
- panic(fmt.Sprintf("got %s, want %s", fgot, fwant))
+ panic(fmt.Sprintf("got %s, want %s", fgot, fwant))
}
fgot = _DoubleElems2(farg)
if !reflect.DeepEqual(fgot, fwant) {
- panic(fmt.Sprintf("got %s, want %s", fgot, fwant))
+ panic(fmt.Sprintf("got %s, want %s", fgot, fwant))
}
}
diff --git a/test/typeparam/equal.go b/test/typeparam/equal.go
new file mode 100644
index 0000000000..a1d3e8ae02
--- /dev/null
+++ b/test/typeparam/equal.go
@@ -0,0 +1,69 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comparisons of type parameters to interfaces
+
+package main
+
+func f[T comparable](t, u T) bool {
+ // Comparing two type parameters directly.
+ // (Not really testing comparisons to interfaces, but just 'cause we're here.)
+ return t == u
+}
+
+func g[T comparable](t T, i interface{}) bool {
+ // Compare type parameter value to empty interface.
+ return t == i
+}
+
+type I interface {
+ foo()
+}
+
+type C interface {
+ comparable
+ I
+}
+
+func h[T C](t T, i I) bool {
+ // Compare type parameter value to nonempty interface.
+ return t == i
+}
+
+type myint int
+
+func (x myint) foo() {
+}
+
+func k[T comparable](t T, i interface{}) bool {
+ // Compare derived type value to interface.
+ return struct{ a, b T }{t, t} == i
+}
+
+func main() {
+ assert(f(3, 3))
+ assert(!f(3, 5))
+ assert(g(3, 3))
+ assert(!g(3, 5))
+ assert(h(myint(3), myint(3)))
+ assert(!h(myint(3), myint(5)))
+
+ type S struct{ a, b float64 }
+
+ assert(f(S{3, 5}, S{3, 5}))
+ assert(!f(S{3, 5}, S{4, 6}))
+ assert(g(S{3, 5}, S{3, 5}))
+ assert(!g(S{3, 5}, S{4, 6}))
+
+ assert(k(3, struct{ a, b int }{3, 3}))
+ assert(!k(3, struct{ a, b int }{3, 4}))
+}
+
+func assert(b bool) {
+ if !b {
+ panic("assertion failed")
+ }
+}
diff --git a/test/typeparam/fact.go b/test/typeparam/fact.go
index 16b2adf6fb..e19cfe6956 100644
--- a/test/typeparam/fact.go
+++ b/test/typeparam/fact.go
@@ -8,11 +8,11 @@ package main
import "fmt"
-func fact[T interface { type int, int64, float64 }](n T) T {
- if n == T(1) {
- return T(1)
+func fact[T interface{ ~int | ~int64 | ~float64 }](n T) T {
+ if n == 1 {
+ return 1
}
- return n * fact(n - T(1))
+ return n * fact(n-1)
}
func main() {
diff --git a/test/typeparam/factimp.dir/a.go b/test/typeparam/factimp.dir/a.go
new file mode 100644
index 0000000000..0bd73a88e7
--- /dev/null
+++ b/test/typeparam/factimp.dir/a.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func Fact[T interface{ int | int64 | float64 }](n T) T {
+ if n == 1 {
+ return 1
+ }
+ return n * Fact(n-1)
+}
diff --git a/test/typeparam/factimp.dir/main.go b/test/typeparam/factimp.dir/main.go
new file mode 100644
index 0000000000..c2238002ae
--- /dev/null
+++ b/test/typeparam/factimp.dir/main.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 120
+
+ if got := a.Fact(5); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Fact[int64](5); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Fact(5.0); got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+}
diff --git a/test/typeparam/factimp.go b/test/typeparam/factimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/factimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/graph.go b/test/typeparam/graph.go
index f2a2630ad0..cecf349a9a 100644
--- a/test/typeparam/graph.go
+++ b/test/typeparam/graph.go
@@ -225,7 +225,6 @@ func TestShortestPath() {
}
}
-
func main() {
TestShortestPath()
}
diff --git a/test/typeparam/ifaceconv.go b/test/typeparam/ifaceconv.go
new file mode 100644
index 0000000000..ee3a9e0dc3
--- /dev/null
+++ b/test/typeparam/ifaceconv.go
@@ -0,0 +1,83 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that we can convert type parameters to both empty
+// and nonempty interfaces, and named and nonnamed versions
+// thereof.
+
+package main
+
+import "fmt"
+
+type E interface{}
+
+func f[T any](x T) interface{} {
+ var i interface{} = x
+ return i
+}
+
+func fs[T any](x T) interface{} {
+ y := []T{x}
+ var i interface{} = y
+ return i
+}
+
+func g[T any](x T) E {
+ var i E = x
+ return i
+}
+
+type C interface {
+ foo() int
+}
+
+type myInt int
+
+func (x myInt) foo() int {
+ return int(x + 1)
+}
+
+func h[T C](x T) interface{ foo() int } {
+ var i interface{ foo() int } = x
+ return i
+}
+func i[T C](x T) C {
+ var i C = x // conversion in assignment
+ return i
+}
+
+func j[T C](t T) C {
+ return C(t) // explicit conversion
+}
+
+func js[T any](x T) interface{} {
+ y := []T{x}
+ return interface{}(y)
+}
+
+func main() {
+ if got, want := f[int](7), 7; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := fs[int](7), []int{7}; got.([]int)[0] != want[0] {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := g[int](7), 7; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := h[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := i[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := j[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := js[int](7), []int{7}; got.([]int)[0] != want[0] {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+}
diff --git a/test/typeparam/index.go b/test/typeparam/index.go
index 83e65acdd0..906f76d325 100644
--- a/test/typeparam/index.go
+++ b/test/typeparam/index.go
@@ -11,7 +11,7 @@ import (
)
// Index returns the index of x in s, or -1 if not found.
-func index[T comparable](s []T, x T) int {
+func Index[T comparable](s []T, x T) int {
for i, v := range s {
// v and x are type T, which has the comparable
// constraint, so we can use == here.
@@ -26,21 +26,56 @@ type obj struct {
x int
}
+type obj2 struct {
+ x int8
+ y float64
+}
+
+type obj3 struct {
+ x int64
+ y int8
+}
+
+type inner struct {
+ y int64
+ z int32
+}
+
+type obj4 struct {
+ x int32
+ s inner
+}
+
func main() {
want := 2
vec1 := []string{"ab", "cd", "ef"}
- if got := index(vec1, "ef"); got != want {
+ if got := Index(vec1, "ef"); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
vec2 := []byte{'c', '6', '@'}
- if got := index(vec2, '@'); got != want {
+ if got := Index(vec2, '@'); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
vec3 := []*obj{&obj{2}, &obj{42}, &obj{1}}
- if got := index(vec3, vec3[2]); got != want {
+ if got := Index(vec3, vec3[2]); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ vec4 := []obj2{obj2{2, 3.0}, obj2{3, 4.0}, obj2{4, 5.0}}
+ if got := Index(vec4, vec4[2]); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ vec5 := []obj3{obj3{2, 3}, obj3{3, 4}, obj3{4, 5}}
+ if got := Index(vec5, vec5[2]); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ vec6 := []obj4{obj4{2, inner{3, 4}}, obj4{3, inner{4, 5}}, obj4{4, inner{5, 6}}}
+ if got := Index(vec6, vec6[2]); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
diff --git a/test/typeparam/interfacearg.go b/test/typeparam/interfacearg.go
index e2d85e3647..28ea3e3afb 100644
--- a/test/typeparam/interfacearg.go
+++ b/test/typeparam/interfacearg.go
@@ -9,23 +9,25 @@ package main
type I interface{}
type _S[T any] struct {
- *T
+ x *T
}
// F is a non-generic function, but has a type _S[I] which is instantiated from a
// generic type. Test that _S[I] is successfully exported.
func F() {
v := _S[I]{}
- if v.T != nil {
+ if v.x != nil {
panic(v)
}
}
// Testing the various combinations of method expressions.
type S1 struct{}
+
func (*S1) M() {}
type S2 struct{}
+
func (S2) M() {}
func _F1[T interface{ M() }](t T) {
@@ -33,9 +35,9 @@ func _F1[T interface{ M() }](t T) {
}
func F2() {
- _F1(&S1{})
- _F1(S2{})
- _F1(&S2{})
+ _F1(&S1{})
+ _F1(S2{})
+ _F1(&S2{})
}
func main() {
diff --git a/test/typeparam/issue39755.go b/test/typeparam/issue39755.go
new file mode 100644
index 0000000000..c4b6902eea
--- /dev/null
+++ b/test/typeparam/issue39755.go
@@ -0,0 +1,27 @@
+// compile -G=3
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// copied from cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go
+
+package p
+
+func _[T interface{ ~map[string]int }](x T) {
+ _ = x == nil
+}
+
+// simplified test case from issue
+
+type PathParamsConstraint interface {
+ ~map[string]string | ~[]struct{ key, value string }
+}
+
+type PathParams[T PathParamsConstraint] struct {
+ t T
+}
+
+func (pp *PathParams[T]) IsNil() bool {
+ return pp.t == nil // this must succeed
+}
diff --git a/test/typeparam/issue44688.go b/test/typeparam/issue44688.go
new file mode 100644
index 0000000000..5ebce72628
--- /dev/null
+++ b/test/typeparam/issue44688.go
@@ -0,0 +1,148 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// derived & expanded from cmd/compile/internal/types2/testdata/fixedbugs/issue44688.go2
+
+package main
+
+type A1[T any] struct {
+ val T
+}
+
+func (p *A1[T]) m1(val T) {
+ p.val = val
+}
+
+type A2[T any] interface {
+ m2(T)
+}
+
+type B1[T any] struct {
+ filler int
+ *A1[T]
+ A2[T]
+}
+
+type B2[T any] interface {
+ A2[T]
+}
+
+type ImpA2[T any] struct {
+ f T
+}
+
+func (a2 *ImpA2[T]) m2(s T) {
+ a2.f = s
+}
+
+type C[T any] struct {
+ filler1 int
+ filler2 int
+ B1[T]
+}
+
+type D[T any] struct {
+ filler1 int
+ filler2 int
+ filler3 int
+ C[T]
+}
+
+func test1[T any](arg T) {
+ // calling embedded methods
+ var b1 B1[T]
+ b1.A1 = &A1[T]{}
+ b1.A2 = &ImpA2[T]{}
+
+ b1.A1.m1(arg)
+ b1.m1(arg)
+
+ b1.A2.m2(arg)
+ b1.m2(arg)
+
+ var b2 B2[T]
+ b2 = &ImpA2[T]{}
+ b2.m2(arg)
+
+ // a deeper nesting
+ var d D[T]
+ d.C.B1.A1 = &A1[T]{}
+ d.C.B1.A2 = &ImpA2[T]{}
+ d.m1(arg)
+ d.m2(arg)
+
+ // calling method expressions
+ m1x := B1[T].m1
+ m1x(b1, arg)
+ m2x := B2[T].m2
+ m2x(b2, arg)
+
+ // calling method values
+ m1v := b1.m1
+ m1v(arg)
+ m2v := b1.m2
+ m2v(arg)
+ b2v := b2.m2
+ b2v(arg)
+}
+
+func test2() {
+ // calling embedded methods
+ var b1 B1[string]
+ b1.A1 = &A1[string]{}
+ b1.A2 = &ImpA2[string]{}
+
+ b1.A1.m1("")
+ b1.m1("")
+
+ b1.A2.m2("")
+ b1.m2("")
+
+ var b2 B2[string]
+ b2 = &ImpA2[string]{}
+ b2.m2("")
+
+ // a deeper nesting
+ var d D[string]
+ d.C.B1.A1 = &A1[string]{}
+ d.C.B1.A2 = &ImpA2[string]{}
+ d.m1("")
+ d.m2("")
+
+ // calling method expressions
+ m1x := B1[string].m1
+ m1x(b1, "")
+ m2x := B2[string].m2
+ m2x(b2, "")
+
+ // calling method values
+ m1v := b1.m1
+ m1v("")
+ m2v := b1.m2
+ m2v("")
+ b2v := b2.m2
+ b2v("")
+}
+
+// actual test case from issue
+
+type A[T any] struct{}
+
+func (*A[T]) f(T) {}
+
+type B[T any] struct{ A[T] }
+
+func test3() {
+ var b B[string]
+ b.A.f("")
+ b.f("")
+}
+
+func main() {
+ test1[string]("")
+ test2()
+ test3()
+}
diff --git a/test/typeparam/issue45547.go b/test/typeparam/issue45547.go
index 0a08d66b70..b354d4d7f6 100644
--- a/test/typeparam/issue45547.go
+++ b/test/typeparam/issue45547.go
@@ -11,7 +11,7 @@ func f[T any]() (f, g T) { return f, g }
// Tests for generic function instantiation on the right hande side of multi-value
// assignments.
-func _() {
+func g() {
// Multi-value assignment within a function
var _, _ = f[int]()
}
diff --git a/test/typeparam/issue45817.go b/test/typeparam/issue45817.go
new file mode 100644
index 0000000000..1efee3b0af
--- /dev/null
+++ b/test/typeparam/issue45817.go
@@ -0,0 +1,26 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+)
+
+type s[T any] struct {
+ a T
+}
+
+func (x s[T]) f() T {
+ return x.a
+}
+func main() {
+ x := s[int]{a: 7}
+ f := x.f
+ if got, want := f(), 7; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+}
diff --git a/test/typeparam/issue46472.go b/test/typeparam/issue46472.go
new file mode 100644
index 0000000000..cd4d923ef5
--- /dev/null
+++ b/test/typeparam/issue46472.go
@@ -0,0 +1,20 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func foo[T any](d T) {
+ switch v := interface{}(d).(type) {
+ case string:
+ if v != "x" {
+ panic("unexpected v: " + v)
+ }
+ }
+
+}
+func main() {
+ foo("x")
+}
diff --git a/test/typeparam/issue47258.go b/test/typeparam/issue47258.go
new file mode 100644
index 0000000000..717329471e
--- /dev/null
+++ b/test/typeparam/issue47258.go
@@ -0,0 +1,32 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+)
+
+type Numeric interface {
+ int32 | int64 | float64 | complex64
+}
+
+//go:noline
+func inc[T Numeric](x T) T {
+ x++
+ return x
+}
+func main() {
+ if got, want := inc(int32(5)), int32(6); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+ if got, want := inc(float64(5)), float64(6.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+ if got, want := inc(complex64(5)), complex64(6.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+}
diff --git a/test/typeparam/issue47514.go b/test/typeparam/issue47514.go
new file mode 100644
index 0000000000..947f254003
--- /dev/null
+++ b/test/typeparam/issue47514.go
@@ -0,0 +1,20 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that closures inside a generic function are not exported,
+// even though not themselves generic.
+
+package main
+
+func Do[T any]() {
+ _ = func() string {
+ return ""
+ }
+}
+
+func main() {
+ Do[int]()
+}
diff --git a/test/typeparam/issue47514b.go b/test/typeparam/issue47514b.go
new file mode 100644
index 0000000000..5428a0edc5
--- /dev/null
+++ b/test/typeparam/issue47514b.go
@@ -0,0 +1,19 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func Do[T any](do func() (T, string)) {
+ _ = func() (T, string) {
+ return do()
+ }
+}
+
+func main() {
+ Do[int](func() (int, string) {
+ return 3, "3"
+ })
+}
diff --git a/test/typeparam/list.go b/test/typeparam/list.go
index 579078f02f..adfe72f1de 100644
--- a/test/typeparam/list.go
+++ b/test/typeparam/list.go
@@ -11,10 +11,10 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
// _List is a linked list of ordered values of type T.
@@ -34,9 +34,9 @@ func (l *_List[T]) Largest() T {
}
type OrderedNum interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// _ListNum is a linked _List of ordered numeric values of type T.
@@ -64,40 +64,40 @@ func main() {
i2 := &_List[int]{i3, 3}
i1 := &_List[int]{i2, 2}
if got, want := i1.Largest(), 3; got != want {
- panic(fmt.Sprintf("got %d, want %d", got, want))
+ panic(fmt.Sprintf("got %d, want %d", got, want))
}
b3 := &_List[byte]{nil, byte(1)}
b2 := &_List[byte]{b3, byte(3)}
b1 := &_List[byte]{b2, byte(2)}
if got, want := b1.Largest(), byte(3); got != want {
- panic(fmt.Sprintf("got %d, want %d", got, want))
+ panic(fmt.Sprintf("got %d, want %d", got, want))
}
f3 := &_List[float64]{nil, 13.5}
f2 := &_List[float64]{f3, 1.2}
f1 := &_List[float64]{f2, 4.5}
if got, want := f1.Largest(), 13.5; got != want {
- panic(fmt.Sprintf("got %f, want %f", got, want))
+ panic(fmt.Sprintf("got %f, want %f", got, want))
}
s3 := &_List[string]{nil, "dd"}
s2 := &_List[string]{s3, "aa"}
s1 := &_List[string]{s2, "bb"}
if got, want := s1.Largest(), "dd"; got != want {
- panic(fmt.Sprintf("got %s, want %s", got, want))
+ panic(fmt.Sprintf("got %s, want %s", got, want))
}
j3 := &_ListNum[int]{nil, 1}
j2 := &_ListNum[int]{j3, 32}
j1 := &_ListNum[int]{j2, 2}
if got, want := j1.ClippedLargest(), 2; got != want {
- panic(fmt.Sprintf("got %d, want %d", got, want))
+ panic(fmt.Sprintf("got %d, want %d", got, want))
}
g3 := &_ListNum[float64]{nil, 13.5}
g2 := &_ListNum[float64]{g3, 1.2}
g1 := &_ListNum[float64]{g2, 4.5}
if got, want := g1.ClippedLargest(), 4.5; got != want {
- panic(fmt.Sprintf("got %f, want %f", got, want))
+ panic(fmt.Sprintf("got %f, want %f", got, want))
}
}
diff --git a/test/typeparam/list2.go b/test/typeparam/list2.go
index 385193d876..e7f346c78e 100644
--- a/test/typeparam/list2.go
+++ b/test/typeparam/list2.go
@@ -50,7 +50,7 @@ func (e *_Element[T]) Prev() *_Element[T] {
// The zero value for _List is an empty list ready to use.
type _List[T any] struct {
root _Element[T] // sentinel list element, only &root, root.prev, and root.next are used
- len int // current list length excluding (this) sentinel element
+ len int // current list length excluding (this) sentinel element
}
// Init initializes or clears list l.
@@ -594,8 +594,15 @@ func TestTransform() {
checkList(l2, []interface{}{"1", "2"})
}
-
func main() {
TestList()
+ TestExtending()
+ TestRemove()
+ TestIssue4103()
+ TestIssue6349()
+ TestMove()
+ TestZeroList()
+ TestInsertBeforeUnknownMark()
+ TestInsertAfterUnknownMark()
+ TestTransform()
}
-
diff --git a/test/typeparam/listimp.dir/a.go b/test/typeparam/listimp.dir/a.go
new file mode 100644
index 0000000000..bf1641af9c
--- /dev/null
+++ b/test/typeparam/listimp.dir/a.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// List is a linked list of ordered values of type T.
+type List[T Ordered] struct {
+ Next *List[T]
+ Val T
+}
+
+func (l *List[T]) Largest() T {
+ var max T
+ for p := l; p != nil; p = p.Next {
+ if p.Val > max {
+ max = p.Val
+ }
+ }
+ return max
+}
+
+type OrderedNum interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// ListNum is a linked _List of ordered numeric values of type T.
+type ListNum[T OrderedNum] struct {
+ Next *ListNum[T]
+ Val T
+}
+
+const Clip = 5
+
+// clippedLargest returns the largest in the list of OrderNums, but a max of 5.
+func (l *ListNum[T]) ClippedLargest() T {
+ var max T
+ for p := l; p != nil; p = p.Next {
+ if p.Val > max && p.Val < Clip {
+ max = p.Val
+ }
+ }
+ return max
+}
diff --git a/test/typeparam/listimp.dir/main.go b/test/typeparam/listimp.dir/main.go
new file mode 100644
index 0000000000..985ff59a18
--- /dev/null
+++ b/test/typeparam/listimp.dir/main.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ i3 := &a.List[int]{nil, 1}
+ i2 := &a.List[int]{i3, 3}
+ i1 := &a.List[int]{i2, 2}
+ if got, want := i1.Largest(), 3; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ b3 := &a.List[byte]{nil, byte(1)}
+ b2 := &a.List[byte]{b3, byte(3)}
+ b1 := &a.List[byte]{b2, byte(2)}
+ if got, want := b1.Largest(), byte(3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ f3 := &a.List[float64]{nil, 13.5}
+ f2 := &a.List[float64]{f3, 1.2}
+ f1 := &a.List[float64]{f2, 4.5}
+ if got, want := f1.Largest(), 13.5; got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+
+ s3 := &a.List[string]{nil, "dd"}
+ s2 := &a.List[string]{s3, "aa"}
+ s1 := &a.List[string]{s2, "bb"}
+ if got, want := s1.Largest(), "dd"; got != want {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+ j3 := &a.ListNum[int]{nil, 1}
+ j2 := &a.ListNum[int]{j3, 32}
+ j1 := &a.ListNum[int]{j2, 2}
+ if got, want := j1.ClippedLargest(), 2; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+ g3 := &a.ListNum[float64]{nil, 13.5}
+ g2 := &a.ListNum[float64]{g3, 1.2}
+ g1 := &a.ListNum[float64]{g2, 4.5}
+ if got, want := g1.ClippedLargest(), 4.5; got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+}
diff --git a/test/typeparam/listimp.go b/test/typeparam/listimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/listimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/listimp2.dir/a.go b/test/typeparam/listimp2.dir/a.go
new file mode 100644
index 0000000000..3a7dfc3999
--- /dev/null
+++ b/test/typeparam/listimp2.dir/a.go
@@ -0,0 +1,298 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "fmt"
+)
+
+// Element is an element of a linked list.
+type Element[T any] struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *Element[T]
+
+ // The list to which this element belongs.
+ list *List[T]
+
+ // The value stored with this element.
+ Value T
+}
+
+// Next returns the next list element or nil.
+func (e *Element[T]) Next() *Element[T] {
+ if p := e.next; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// Prev returns the previous list element or nil.
+func (e *Element[T]) Prev() *Element[T] {
+ if p := e.prev; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// List represents a doubly linked list.
+// The zero value for List is an empty list ready to use.
+type List[T any] struct {
+ root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list length excluding (this) sentinel element
+}
+
+// Init initializes or clears list l.
+func (l *List[T]) Init() *List[T] {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ return l
+}
+
+// New returns an initialized list.
+func New[T any]() *List[T] { return new(List[T]).Init() }
+
+// Len returns the number of elements of list l.
+// The complexity is O(1).
+func (l *List[_]) Len() int { return l.len }
+
+// Front returns the first element of list l or nil if the list is empty.
+func (l *List[T]) Front() *Element[T] {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.next
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *List[T]) Back() *Element[T] {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.prev
+}
+
+// lazyInit lazily initializes a zero List value.
+func (l *List[_]) lazyInit() {
+ if l.root.next == nil {
+ l.Init()
+ }
+}
+
+// insert inserts e after at, increments l.len, and returns e.
+func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+ e.list = l
+ l.len++
+ return e
+}
+
+// insertValue is a convenience wrapper for insert(&Element[T]{Value: v}, at).
+func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
+ return l.insert(&Element[T]{Value: v}, at)
+}
+
+// remove removes e from its list, decrements l.len, and returns e.
+func (l *List[T]) remove(e *Element[T]) *Element[T] {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+ e.list = nil
+ l.len--
+ return e
+}
+
+// move moves e to next to at and returns e.
+func (l *List[T]) move(e, at *Element[T]) *Element[T] {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+
+ return e
+}
+
+// Remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *List[T]) Remove(e *Element[T]) T {
+ if e.list == l {
+ // if e.list == l, l must have been initialized when e was inserted
+ // in l or l == nil (e is a zero Element) and l.remove will crash
+ l.remove(e)
+ }
+ return e.Value
+}
+
+// PushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *List[T]) PushFront(v T) *Element[T] {
+ l.lazyInit()
+ return l.insertValue(v, &l.root)
+}
+
+// PushBack inserts a new element e with value v at the back of list l and returns e.
+func (l *List[T]) PushBack(v T) *Element[T] {
+ l.lazyInit()
+ return l.insertValue(v, l.root.prev)
+}
+
+// InsertBefore inserts a new element e with value v immediately before mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark.prev)
+}
+
+// InsertAfter inserts a new element e with value v immediately after mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark)
+}
+
+// MoveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[T]) MoveToFront(e *Element[T]) {
+ if e.list != l || l.root.next == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, &l.root)
+}
+
+// MoveToBack moves element e to the back of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[T]) MoveToBack(e *Element[T]) {
+ if e.list != l || l.root.prev == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, l.root.prev)
+}
+
+// MoveBefore moves element e to its new position before mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[T]) MoveBefore(e, mark *Element[T]) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark.prev)
+}
+
+// MoveAfter moves element e to its new position after mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[T]) MoveAfter(e, mark *Element[T]) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark)
+}
+
+// PushBackList inserts a copy of an other list at the back of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List[T]) PushBackList(other *List[T]) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
+ l.insertValue(e.Value, l.root.prev)
+ }
+}
+
+// PushFrontList inserts a copy of an other list at the front of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List[T]) PushFrontList(other *List[T]) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
+ l.insertValue(e.Value, &l.root)
+ }
+}
+
+// Transform runs a transform function on a list returning a new list.
+func Transform[TElem1, TElem2 any](lst *List[TElem1], f func(TElem1) TElem2) *List[TElem2] {
+ ret := New[TElem2]()
+ for p := lst.Front(); p != nil; p = p.Next() {
+ ret.PushBack(f(p.Value))
+ }
+ return ret
+}
+
+func CheckListLen[T any](l *List[T], len int) bool {
+ if n := l.Len(); n != len {
+ panic(fmt.Sprintf("l.Len() = %d, want %d", n, len))
+ return false
+ }
+ return true
+}
+
+func CheckListPointers[T any](l *List[T], es []*Element[T]) {
+ root := &l.root
+
+ if !CheckListLen(l, len(es)) {
+ return
+ }
+
+ // zero length lists must be the zero value or properly initialized (sentinel circle)
+ if len(es) == 0 {
+ if l.root.next != nil && l.root.next != root || l.root.prev != nil && l.root.prev != root {
+ panic(fmt.Sprintf("l.root.next = %p, l.root.prev = %p; both should both be nil or %p", l.root.next, l.root.prev, root))
+ }
+ return
+ }
+ // len(es) > 0
+
+ // check internal and external prev/next connections
+ for i, e := range es {
+ prev := root
+ Prev := (*Element[T])(nil)
+ if i > 0 {
+ prev = es[i-1]
+ Prev = prev
+ }
+ if p := e.prev; p != prev {
+ panic(fmt.Sprintf("elt[%d](%p).prev = %p, want %p", i, e, p, prev))
+ }
+ if p := e.Prev(); p != Prev {
+ panic(fmt.Sprintf("elt[%d](%p).Prev() = %p, want %p", i, e, p, Prev))
+ }
+
+ next := root
+ Next := (*Element[T])(nil)
+ if i < len(es)-1 {
+ next = es[i+1]
+ Next = next
+ }
+ if n := e.next; n != next {
+ panic(fmt.Sprintf("elt[%d](%p).next = %p, want %p", i, e, n, next))
+ }
+ if n := e.Next(); n != Next {
+ panic(fmt.Sprintf("elt[%d](%p).Next() = %p, want %p", i, e, n, Next))
+ }
+ }
+}
diff --git a/test/typeparam/listimp2.dir/main.go b/test/typeparam/listimp2.dir/main.go
new file mode 100644
index 0000000000..226e1a9a57
--- /dev/null
+++ b/test/typeparam/listimp2.dir/main.go
@@ -0,0 +1,315 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "strconv"
+)
+
+func TestList() {
+ l := a.New[string]()
+ a.CheckListPointers(l, []*(a.Element[string]){})
+
+ // Single element list
+ e := l.PushFront("a")
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.MoveToFront(e)
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.MoveToBack(e)
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[string]){})
+
+ // Bigger list
+ l2 := a.New[int]()
+ e2 := l2.PushFront(2)
+ e1 := l2.PushFront(1)
+ e3 := l2.PushBack(3)
+ e4 := l2.PushBack(600)
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l2.Remove(e2)
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e3, e4})
+
+ l2.MoveToFront(e3) // move from middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+
+ l2.MoveToFront(e1)
+ l2.MoveToBack(e3) // move from middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+
+ l2.MoveToFront(e3) // move from back
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+ l2.MoveToFront(e3) // should be no-op
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+
+ l2.MoveToBack(e3) // move from front
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+ l2.MoveToBack(e3) // should be no-op
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+
+ e2 = l2.InsertBefore(2, e1) // insert before front
+ a.CheckListPointers(l2, []*(a.Element[int]){e2, e1, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertBefore(2, e4) // insert before middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertBefore(2, e3) // insert before back
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e2, e3})
+ l2.Remove(e2)
+
+ e2 = l2.InsertAfter(2, e1) // insert after front
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertAfter(2, e4) // insert after middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e2, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertAfter(2, e3) // insert after back
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3, e2})
+ l2.Remove(e2)
+
+ // Check standard iteration.
+ sum := 0
+ for e := l2.Front(); e != nil; e = e.Next() {
+ sum += e.Value
+ }
+ if sum != 604 {
+ panic(fmt.Sprintf("sum over l = %d, want 604", sum))
+ }
+
+ // Clear all elements by iterating
+ var next *a.Element[int]
+ for e := l2.Front(); e != nil; e = next {
+ next = e.Next()
+ l2.Remove(e)
+ }
+ a.CheckListPointers(l2, []*(a.Element[int]){})
+}
+
+func checkList[T comparable](l *a.List[T], es []interface{}) {
+ if !a.CheckListLen(l, len(es)) {
+ return
+ }
+
+ i := 0
+ for e := l.Front(); e != nil; e = e.Next() {
+ le := e.Value
+ // Comparison between a generically-typed variable le and an interface.
+ if le != es[i] {
+ panic(fmt.Sprintf("elt[%d].Value = %v, want %v", i, le, es[i]))
+ }
+ i++
+ }
+}
+
+func TestExtending() {
+ l1 := a.New[int]()
+ l2 := a.New[int]()
+
+ l1.PushBack(1)
+ l1.PushBack(2)
+ l1.PushBack(3)
+
+ l2.PushBack(4)
+ l2.PushBack(5)
+
+ l3 := a.New[int]()
+ l3.PushBackList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushBackList(l2)
+ checkList(l3, []interface{}{1, 2, 3, 4, 5})
+
+ l3 = a.New[int]()
+ l3.PushFrontList(l2)
+ checkList(l3, []interface{}{4, 5})
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1, 2, 3, 4, 5})
+
+ checkList(l1, []interface{}{1, 2, 3})
+ checkList(l2, []interface{}{4, 5})
+
+ l3 = a.New[int]()
+ l3.PushBackList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushBackList(l3)
+ checkList(l3, []interface{}{1, 2, 3, 1, 2, 3})
+
+ l3 = a.New[int]()
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushFrontList(l3)
+ checkList(l3, []interface{}{1, 2, 3, 1, 2, 3})
+
+ l3 = a.New[int]()
+ l1.PushBackList(l3)
+ checkList(l1, []interface{}{1, 2, 3})
+ l1.PushFrontList(l3)
+ checkList(l1, []interface{}{1, 2, 3})
+}
+
+func TestRemove() {
+ l := a.New[int]()
+ e1 := l.PushBack(1)
+ e2 := l.PushBack(2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2})
+ e := l.Front()
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[int]){e2})
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[int]){e2})
+}
+
+func TestIssue4103() {
+ l1 := a.New[int]()
+ l1.PushBack(1)
+ l1.PushBack(2)
+
+ l2 := a.New[int]()
+ l2.PushBack(3)
+ l2.PushBack(4)
+
+ e := l1.Front()
+ l2.Remove(e) // l2 should not change because e is not an element of l2
+ if n := l2.Len(); n != 2 {
+ panic(fmt.Sprintf("l2.Len() = %d, want 2", n))
+ }
+
+ l1.InsertBefore(8, e)
+ if n := l1.Len(); n != 3 {
+ panic(fmt.Sprintf("l1.Len() = %d, want 3", n))
+ }
+}
+
+func TestIssue6349() {
+ l := a.New[int]()
+ l.PushBack(1)
+ l.PushBack(2)
+
+ e := l.Front()
+ l.Remove(e)
+ if e.Value != 1 {
+ panic(fmt.Sprintf("e.value = %d, want 1", e.Value))
+ }
+ if e.Next() != nil {
+ panic(fmt.Sprintf("e.Next() != nil"))
+ }
+ if e.Prev() != nil {
+ panic(fmt.Sprintf("e.Prev() != nil"))
+ }
+}
+
+func TestMove() {
+ l := a.New[int]()
+ e1 := l.PushBack(1)
+ e2 := l.PushBack(2)
+ e3 := l.PushBack(3)
+ e4 := l.PushBack(4)
+
+ l.MoveAfter(e3, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+ l.MoveBefore(e2, e2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l.MoveAfter(e3, e2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+ l.MoveBefore(e2, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l.MoveBefore(e2, e4)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e3, e2, e4})
+ e2, e3 = e3, e2
+
+ l.MoveBefore(e4, e1)
+ a.CheckListPointers(l, []*(a.Element[int]){e4, e1, e2, e3})
+ e1, e2, e3, e4 = e4, e1, e2, e3
+
+ l.MoveAfter(e4, e1)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e4, e2, e3})
+ e2, e3, e4 = e4, e2, e3
+
+ l.MoveAfter(e2, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e3, e2, e4})
+ e2, e3 = e3, e2
+}
+
+// Test PushFront, PushBack, PushFrontList, PushBackList with uninitialized a.List
+func TestZeroList() {
+ var l1 = new(a.List[int])
+ l1.PushFront(1)
+ checkList(l1, []interface{}{1})
+
+ var l2 = new(a.List[int])
+ l2.PushBack(1)
+ checkList(l2, []interface{}{1})
+
+ var l3 = new(a.List[int])
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1})
+
+ var l4 = new(a.List[int])
+ l4.PushBackList(l2)
+ checkList(l4, []interface{}{1})
+}
+
+// Test that a list l is not modified when calling InsertBefore with a mark that is not an element of l.
+func TestInsertBeforeUnknownMark() {
+ var l a.List[int]
+ l.PushBack(1)
+ l.PushBack(2)
+ l.PushBack(3)
+ l.InsertBefore(1, new(a.Element[int]))
+ checkList(&l, []interface{}{1, 2, 3})
+}
+
+// Test that a list l is not modified when calling InsertAfter with a mark that is not an element of l.
+func TestInsertAfterUnknownMark() {
+ var l a.List[int]
+ l.PushBack(1)
+ l.PushBack(2)
+ l.PushBack(3)
+ l.InsertAfter(1, new(a.Element[int]))
+ checkList(&l, []interface{}{1, 2, 3})
+}
+
+// Test that a list l is not modified when calling MoveAfter or MoveBefore with a mark that is not an element of l.
+func TestMoveUnknownMark() {
+ var l1 a.List[int]
+ e1 := l1.PushBack(1)
+
+ var l2 a.List[int]
+ e2 := l2.PushBack(2)
+
+ l1.MoveAfter(e1, e2)
+ checkList(&l1, []interface{}{1})
+ checkList(&l2, []interface{}{2})
+
+ l1.MoveBefore(e1, e2)
+ checkList(&l1, []interface{}{1})
+ checkList(&l2, []interface{}{2})
+}
+
+// Test the Transform function.
+func TestTransform() {
+ l1 := a.New[int]()
+ l1.PushBack(1)
+ l1.PushBack(2)
+ l2 := a.Transform(l1, strconv.Itoa)
+ checkList(l2, []interface{}{"1", "2"})
+}
+
+func main() {
+ TestList()
+ TestExtending()
+ TestRemove()
+ TestIssue4103()
+ TestIssue6349()
+ TestMove()
+ TestZeroList()
+ TestInsertBeforeUnknownMark()
+ TestInsertAfterUnknownMark()
+ TestTransform()
+}
diff --git a/test/typeparam/listimp2.go b/test/typeparam/listimp2.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/listimp2.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/lockable.go b/test/typeparam/lockable.go
index d53817521f..9b20d87bb7 100644
--- a/test/typeparam/lockable.go
+++ b/test/typeparam/lockable.go
@@ -8,29 +8,29 @@ package main
import "sync"
-// A _Lockable is a value that may be safely simultaneously accessed
+// A Lockable is a value that may be safely simultaneously accessed
// from multiple goroutines via the Get and Set methods.
-type _Lockable[T any] struct {
- T
+type Lockable[T any] struct {
+ x T
mu sync.Mutex
}
-// Get returns the value stored in a _Lockable.
-func (l *_Lockable[T]) get() T {
+// Get returns the value stored in a Lockable.
+func (l *Lockable[T]) get() T {
l.mu.Lock()
defer l.mu.Unlock()
- return l.T
+ return l.x
}
-// set sets the value in a _Lockable.
-func (l *_Lockable[T]) set(v T) {
+// set sets the value in a Lockable.
+func (l *Lockable[T]) set(v T) {
l.mu.Lock()
defer l.mu.Unlock()
- l.T = v
+ l.x = v
}
func main() {
- sl := _Lockable[string]{T: "a"}
+ sl := Lockable[string]{x: "a"}
if got := sl.get(); got != "a" {
panic(got)
}
@@ -39,7 +39,7 @@ func main() {
panic(got)
}
- il := _Lockable[int]{T: 1}
+ il := Lockable[int]{x: 1}
if got := il.get(); got != 1 {
panic(got)
}
diff --git a/test/typeparam/mapimp.dir/a.go b/test/typeparam/mapimp.dir/a.go
new file mode 100644
index 0000000000..cbfa80ac6b
--- /dev/null
+++ b/test/typeparam/mapimp.dir/a.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// Map calls the function f on every element of the slice s,
+// returning a new slice of the results.
+func Mapper[F, T any](s []F, f func(F) T) []T {
+ r := make([]T, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
diff --git a/test/typeparam/mapimp.dir/main.go b/test/typeparam/mapimp.dir/main.go
new file mode 100644
index 0000000000..4d4a4d9eb0
--- /dev/null
+++ b/test/typeparam/mapimp.dir/main.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+func main() {
+ got := a.Mapper([]int{1, 2, 3}, strconv.Itoa)
+ want := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got, want) {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+
+ fgot := a.Mapper([]float64{2.5, 2.3, 3.5}, func(f float64) string {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+ })
+ fwant := []string{"2.5", "2.3", "3.5"}
+ if !reflect.DeepEqual(fgot, fwant) {
+ panic(fmt.Sprintf("got %s, want %s", fgot, fwant))
+ }
+}
diff --git a/test/typeparam/mapimp.go b/test/typeparam/mapimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/mapimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mapsimp.dir/a.go b/test/typeparam/mapsimp.dir/a.go
new file mode 100644
index 0000000000..696e2a5680
--- /dev/null
+++ b/test/typeparam/mapsimp.dir/a.go
@@ -0,0 +1,108 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// SliceEqual reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Keys returns the keys of the map m.
+// The keys will be an indeterminate order.
+func Keys[K comparable, V any](m map[K]V) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
+
+// Values returns the values of the map m.
+// The values will be in an indeterminate order.
+func Values[K comparable, V any](m map[K]V) []V {
+ r := make([]V, 0, len(m))
+ for _, v := range m {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Equal reports whether two maps contain the same key/value pairs.
+// Values are compared using ==.
+func Equal[K, V comparable](m1, m2 map[K]V) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[k]; !ok || v1 != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy returns a copy of m.
+func Copy[K comparable, V any](m map[K]V) map[K]V {
+ r := make(map[K]V, len(m))
+ for k, v := range m {
+ r[k] = v
+ }
+ return r
+}
+
+// Add adds all key/value pairs in m2 to m1. Keys in m2 that are already
+// present in m1 will be overwritten with the value in m2.
+func Add[K comparable, V any](m1, m2 map[K]V) {
+ for k, v := range m2 {
+ m1[k] = v
+ }
+}
+
+// Sub removes all keys in m2 from m1. Keys in m2 that are not present
+// in m1 are ignored. The values in m2 are ignored.
+func Sub[K comparable, V any](m1, m2 map[K]V) {
+ for k := range m2 {
+ delete(m1, k)
+ }
+}
+
+// Intersect removes all keys from m1 that are not present in m2.
+// Keys in m2 that are not in m1 are ignored. The values in m2 are ignored.
+func Intersect[K comparable, V any](m1, m2 map[K]V) {
+ for k := range m1 {
+ if _, ok := m2[k]; !ok {
+ delete(m1, k)
+ }
+ }
+}
+
+// Filter deletes any key/value pairs from m for which f returns false.
+func Filter[K comparable, V any](m map[K]V, f func(K, V) bool) {
+ for k, v := range m {
+ if !f(k, v) {
+ delete(m, k)
+ }
+ }
+}
+
+// TransformValues applies f to each value in m. The keys remain unchanged.
+func TransformValues[K comparable, V any](m map[K]V, f func(V) V) {
+ for k, v := range m {
+ m[k] = f(v)
+ }
+}
diff --git a/test/typeparam/mapsimp.dir/main.go b/test/typeparam/mapsimp.dir/main.go
new file mode 100644
index 0000000000..873660e4cd
--- /dev/null
+++ b/test/typeparam/mapsimp.dir/main.go
@@ -0,0 +1,156 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "math"
+ "sort"
+)
+
+var m1 = map[int]int{1: 2, 2: 4, 4: 8, 8: 16}
+var m2 = map[int]string{1: "2", 2: "4", 4: "8", 8: "16"}
+
+func TestKeys() {
+ want := []int{1, 2, 4, 8}
+
+ got1 := a.Keys(m1)
+ sort.Ints(got1)
+ if !a.SliceEqual(got1, want) {
+ panic(fmt.Sprintf("a.Keys(%v) = %v, want %v", m1, got1, want))
+ }
+
+ got2 := a.Keys(m2)
+ sort.Ints(got2)
+ if !a.SliceEqual(got2, want) {
+ panic(fmt.Sprintf("a.Keys(%v) = %v, want %v", m2, got2, want))
+ }
+}
+
+func TestValues() {
+ got1 := a.Values(m1)
+ want1 := []int{2, 4, 8, 16}
+ sort.Ints(got1)
+ if !a.SliceEqual(got1, want1) {
+ panic(fmt.Sprintf("a.Values(%v) = %v, want %v", m1, got1, want1))
+ }
+
+ got2 := a.Values(m2)
+ want2 := []string{"16", "2", "4", "8"}
+ sort.Strings(got2)
+ if !a.SliceEqual(got2, want2) {
+ panic(fmt.Sprintf("a.Values(%v) = %v, want %v", m2, got2, want2))
+ }
+}
+
+func TestEqual() {
+ if !a.Equal(m1, m1) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", m1, m1))
+ }
+ if a.Equal(m1, nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil) = true, want false", m1))
+ }
+ if a.Equal(nil, m1) {
+ panic(fmt.Sprintf("a.Equal(nil, %v) = true, want false", m1))
+ }
+ if !a.Equal[int, int](nil, nil) {
+ panic("a.Equal(nil, nil) = false, want true")
+ }
+ if ms := map[int]int{1: 2}; a.Equal(m1, ms) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", m1, ms))
+ }
+
+ // Comparing NaN for equality is expected to fail.
+ mf := map[int]float64{1: 0, 2: math.NaN()}
+ if a.Equal(mf, mf) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", mf, mf))
+ }
+}
+
+func TestCopy() {
+ m2 := a.Copy(m1)
+ if !a.Equal(m1, m2) {
+ panic(fmt.Sprintf("a.Copy(%v) = %v, want %v", m1, m2, m1))
+ }
+ m2[16] = 32
+ if a.Equal(m1, m2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", m1, m2))
+ }
+}
+
+func TestAdd() {
+ mc := a.Copy(m1)
+ a.Add(mc, mc)
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Add(%v, %v) = %v, want %v", m1, m1, mc, m1))
+ }
+ a.Add(mc, map[int]int{16: 32})
+ want := map[int]int{1: 2, 2: 4, 4: 8, 8: 16, 16: 32}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Add result = %v, want %v", mc, want))
+ }
+}
+
+func TestSub() {
+ mc := a.Copy(m1)
+ a.Sub(mc, mc)
+ if len(mc) > 0 {
+ panic(fmt.Sprintf("a.Sub(%v, %v) = %v, want empty map", m1, m1, mc))
+ }
+ mc = a.Copy(m1)
+ a.Sub(mc, map[int]int{1: 0})
+ want := map[int]int{2: 4, 4: 8, 8: 16}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Sub result = %v, want %v", mc, want))
+ }
+}
+
+func TestIntersect() {
+ mc := a.Copy(m1)
+ a.Intersect(mc, mc)
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Intersect(%v, %v) = %v, want %v", m1, m1, mc, m1))
+ }
+ a.Intersect(mc, map[int]int{1: 0, 2: 0})
+ want := map[int]int{1: 2, 2: 4}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Intersect result = %v, want %v", mc, want))
+ }
+}
+
+func TestFilter() {
+ mc := a.Copy(m1)
+ a.Filter(mc, func(int, int) bool { return true })
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Filter(%v, true) = %v, want %v", m1, mc, m1))
+ }
+ a.Filter(mc, func(k, v int) bool { return k < 3 })
+ want := map[int]int{1: 2, 2: 4}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Filter result = %v, want %v", mc, want))
+ }
+}
+
+func TestTransformValues() {
+ mc := a.Copy(m1)
+ a.TransformValues(mc, func(i int) int { return i / 2 })
+ want := map[int]int{1: 1, 2: 2, 4: 4, 8: 8}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.TransformValues result = %v, want %v", mc, want))
+ }
+}
+
+func main() {
+ TestKeys()
+ TestValues()
+ TestEqual()
+ TestCopy()
+ TestAdd()
+ TestSub()
+ TestIntersect()
+ TestFilter()
+ TestTransformValues()
+}
diff --git a/test/typeparam/mapsimp.go b/test/typeparam/mapsimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/mapsimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mdempsky/1.dir/a.go b/test/typeparam/mdempsky/1.dir/a.go
new file mode 100644
index 0000000000..a668eb52dc
--- /dev/null
+++ b/test/typeparam/mdempsky/1.dir/a.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type T[_ any] int
+
+func F() { _ = new(T[int]) }
diff --git a/test/typeparam/mdempsky/1.dir/b.go b/test/typeparam/mdempsky/1.dir/b.go
new file mode 100644
index 0000000000..af6fef3f6d
--- /dev/null
+++ b/test/typeparam/mdempsky/1.dir/b.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "./a"
+
+func main() { a.F() }
diff --git a/test/typeparam/mdempsky/1.go b/test/typeparam/mdempsky/1.go
new file mode 100644
index 0000000000..87b4ff46c1
--- /dev/null
+++ b/test/typeparam/mdempsky/1.go
@@ -0,0 +1,7 @@
+// compiledir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mdempsky/10.dir/a.go b/test/typeparam/mdempsky/10.dir/a.go
new file mode 100644
index 0000000000..95e111d347
--- /dev/null
+++ b/test/typeparam/mdempsky/10.dir/a.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type I[T any] interface{ M() T }
diff --git a/test/typeparam/mdempsky/10.dir/b.go b/test/typeparam/mdempsky/10.dir/b.go
new file mode 100644
index 0000000000..0ef28fd02d
--- /dev/null
+++ b/test/typeparam/mdempsky/10.dir/b.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "./a"
+
+var m = a.I[int].M
+
+var never bool
+
+func main() {
+ if never {
+ m(nil)
+ }
+}
diff --git a/test/typeparam/mdempsky/10.go b/test/typeparam/mdempsky/10.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/mdempsky/10.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mdempsky/11.go b/test/typeparam/mdempsky/11.go
new file mode 100644
index 0000000000..e86c038a10
--- /dev/null
+++ b/test/typeparam/mdempsky/11.go
@@ -0,0 +1,16 @@
+// errorcheck
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Reported by Cuong Manh Le.
+
+package p
+
+type a struct{}
+
+//go:notinheap
+type b a
+
+var _ = (*b)(new(a)) // ERROR "cannot convert"
diff --git a/test/typeparam/mdempsky/12.dir/a.go b/test/typeparam/mdempsky/12.dir/a.go
new file mode 100644
index 0000000000..ee8be939a8
--- /dev/null
+++ b/test/typeparam/mdempsky/12.dir/a.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type S[T any] struct {
+ F T
+}
+
+var X = S[int]{}
diff --git a/test/typeparam/mdempsky/12.dir/main.go b/test/typeparam/mdempsky/12.dir/main.go
new file mode 100644
index 0000000000..2891322e29
--- /dev/null
+++ b/test/typeparam/mdempsky/12.dir/main.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "./a"
+)
+
+func main() {
+ _ = a.X
+}
diff --git a/test/typeparam/mdempsky/12.go b/test/typeparam/mdempsky/12.go
new file mode 100644
index 0000000000..a2dc4daacc
--- /dev/null
+++ b/test/typeparam/mdempsky/12.go
@@ -0,0 +1,9 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Reported by Cuong Manh Le.
+
+package ignored
diff --git a/test/typeparam/mdempsky/13.go b/test/typeparam/mdempsky/13.go
new file mode 100644
index 0000000000..b492774d3d
--- /dev/null
+++ b/test/typeparam/mdempsky/13.go
@@ -0,0 +1,84 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Interface which will be used as a regular interface type and as a type bound.
+type Mer interface{
+ M()
+}
+
+// Interface that is a superset of Mer.
+type Mer2 interface {
+ M()
+ String() string
+}
+
+func F[T Mer](t T) {
+ T.M(t)
+ t.M()
+}
+
+type MyMer int
+
+func (MyMer) M() {}
+func (MyMer) String() string {
+ return "aa"
+}
+
+// Parameterized interface
+type Abs[T any] interface {
+ Abs() T
+}
+
+func G[T Abs[U], U any](t T) {
+ T.Abs(t)
+ t.Abs()
+}
+
+type MyInt int
+func (m MyInt) Abs() MyInt {
+ if m < 0 {
+ return -m
+ }
+ return m
+}
+
+type Abs2 interface {
+ Abs() MyInt
+}
+
+
+func main() {
+ mm := MyMer(3)
+ ms := struct{ Mer }{Mer: mm }
+
+ // Testing F with an interface type arg: Mer and Mer2
+ F[Mer](mm)
+ F[Mer2](mm)
+ F[struct{ Mer }](ms)
+ F[*struct{ Mer }](&ms)
+
+ ms2 := struct { MyMer }{MyMer: mm}
+ ms3 := struct { *MyMer }{MyMer: &mm}
+
+ // Testing F with a concrete type arg
+ F[MyMer](mm)
+ F[*MyMer](&mm)
+ F[struct{ MyMer }](ms2)
+ F[struct{ *MyMer }](ms3)
+ F[*struct{ MyMer }](&ms2)
+ F[*struct{ *MyMer }](&ms3)
+
+ // Testing G with a concrete type args
+ mi := MyInt(-3)
+ G[MyInt,MyInt](mi)
+
+ // Interface Abs[MyInt] holding an mi.
+ intMi := Abs[MyInt](mi)
+ // First type arg here is Abs[MyInt], an interface type.
+ G[Abs[MyInt],MyInt](intMi)
+}
diff --git a/test/typeparam/mdempsky/14.go b/test/typeparam/mdempsky/14.go
new file mode 100644
index 0000000000..ba685bc35c
--- /dev/null
+++ b/test/typeparam/mdempsky/14.go
@@ -0,0 +1,40 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Zero returns the zero value of T
+func Zero[T any]() (_ T) {
+ return
+}
+
+type AnyInt[X any] int
+
+func (AnyInt[X]) M() {
+ var have interface{} = Zero[X]()
+ var want interface{} = Zero[MyInt]()
+
+ if have != want {
+ println("FAIL")
+ }
+}
+
+type I interface{ M() }
+
+type MyInt int
+type U = AnyInt[MyInt]
+
+var x = U(0)
+var i I = x
+
+func main() {
+ x.M()
+ U.M(x)
+ (*U).M(&x)
+
+ i.M()
+ I.M(x)
+}
diff --git a/test/typeparam/mdempsky/15.go b/test/typeparam/mdempsky/15.go
new file mode 100644
index 0000000000..4899fc75ee
--- /dev/null
+++ b/test/typeparam/mdempsky/15.go
@@ -0,0 +1,69 @@
+// run -goexperiment fieldtrack -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that generics, promoted methods, and //go:nointerface
+// interoperate as expected.
+
+package main
+
+import (
+ "reflect"
+)
+
+func TypeString[T any]() string {
+ return reflect.TypeOf(new(T)).Elem().String()
+}
+
+func Test[T, Bad, Good any]() {
+ switch interface{}(new(T)).(type) {
+ case Bad:
+ println("FAIL:", TypeString[T](), "matched", TypeString[Bad]())
+ case Good:
+ // ok
+ default:
+ println("FAIL:", TypeString[T](), "did not match", TypeString[Good]())
+ }
+}
+
+func TestE[T any]() { Test[T, interface{ EBad() }, interface{ EGood() }]() }
+func TestX[T any]() { Test[T, interface{ XBad() }, interface{ XGood() }]() }
+
+type E struct{}
+
+//go:nointerface
+func (E) EBad() {}
+func (E) EGood() {}
+
+type X[T any] struct{ E }
+
+//go:nointerface
+func (X[T]) XBad() {}
+func (X[T]) XGood() {}
+
+type W struct{ X[int] }
+
+func main() {
+ _ = E.EGood
+ _ = E.EBad
+
+ TestE[E]()
+
+ _ = X[int].EGood
+ _ = X[int].EBad
+ _ = X[int].XGood
+ _ = X[int].XBad
+
+ TestE[X[int]]()
+ TestX[X[int]]()
+
+ _ = W.EGood
+ _ = W.EBad
+ _ = W.XGood
+ _ = W.XBad
+
+ TestE[W]()
+ TestX[W]()
+}
diff --git a/test/typeparam/mdempsky/2.go b/test/typeparam/mdempsky/2.go
new file mode 100644
index 0000000000..f09730f949
--- /dev/null
+++ b/test/typeparam/mdempsky/2.go
@@ -0,0 +1,20 @@
+// compile -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type T[A, B, C any] int
+
+func (T[A, B, C]) m(x int) {
+ if x <= 0 {
+ return
+ }
+ T[B, C, A](0).m(x - 1)
+}
+
+func main() {
+ T[int8, int16, int32](0).m(3)
+}
diff --git a/test/typeparam/mdempsky/3.dir/a.go b/test/typeparam/mdempsky/3.dir/a.go
new file mode 100644
index 0000000000..cf456e8d48
--- /dev/null
+++ b/test/typeparam/mdempsky/3.dir/a.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func F[T interface{ chan int }](c T) {}
diff --git a/test/typeparam/mdempsky/3.dir/b.go b/test/typeparam/mdempsky/3.dir/b.go
new file mode 100644
index 0000000000..0cfd142f4c
--- /dev/null
+++ b/test/typeparam/mdempsky/3.dir/b.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+func g() { a.F(make(chan int)) }
diff --git a/test/typeparam/mdempsky/3.go b/test/typeparam/mdempsky/3.go
new file mode 100644
index 0000000000..87b4ff46c1
--- /dev/null
+++ b/test/typeparam/mdempsky/3.go
@@ -0,0 +1,7 @@
+// compiledir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mdempsky/4.dir/a.go b/test/typeparam/mdempsky/4.dir/a.go
new file mode 100644
index 0000000000..cb672949ea
--- /dev/null
+++ b/test/typeparam/mdempsky/4.dir/a.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func F[T any](T) {
+Loop:
+ for {
+ break Loop
+ }
+}
diff --git a/test/typeparam/mdempsky/4.dir/b.go b/test/typeparam/mdempsky/4.dir/b.go
new file mode 100644
index 0000000000..e1fb0e7c5e
--- /dev/null
+++ b/test/typeparam/mdempsky/4.dir/b.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+func f() { a.F(0) }
diff --git a/test/typeparam/mdempsky/4.go b/test/typeparam/mdempsky/4.go
new file mode 100644
index 0000000000..87b4ff46c1
--- /dev/null
+++ b/test/typeparam/mdempsky/4.go
@@ -0,0 +1,7 @@
+// compiledir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mdempsky/5.go b/test/typeparam/mdempsky/5.go
new file mode 100644
index 0000000000..0d1ad39946
--- /dev/null
+++ b/test/typeparam/mdempsky/5.go
@@ -0,0 +1,15 @@
+// compile -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type X[T any] int
+
+func (X[T]) F(T) {}
+
+func x() {
+ X[interface{}](0).F(0)
+}
diff --git a/test/typeparam/mdempsky/6.go b/test/typeparam/mdempsky/6.go
new file mode 100644
index 0000000000..a26ff62f6d
--- /dev/null
+++ b/test/typeparam/mdempsky/6.go
@@ -0,0 +1,11 @@
+// compile -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type I[T any] interface{ M() T }
+
+var _ = I[int].M
diff --git a/test/typeparam/mdempsky/7.dir/a.go b/test/typeparam/mdempsky/7.dir/a.go
new file mode 100644
index 0000000000..59c5995611
--- /dev/null
+++ b/test/typeparam/mdempsky/7.dir/a.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type I[T any] interface{ M() T }
+
+var X I[int]
diff --git a/test/typeparam/mdempsky/7.dir/b.go b/test/typeparam/mdempsky/7.dir/b.go
new file mode 100644
index 0000000000..9f70530811
--- /dev/null
+++ b/test/typeparam/mdempsky/7.dir/b.go
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+var _ = a.X
diff --git a/test/typeparam/mdempsky/7.go b/test/typeparam/mdempsky/7.go
new file mode 100644
index 0000000000..87b4ff46c1
--- /dev/null
+++ b/test/typeparam/mdempsky/7.go
@@ -0,0 +1,7 @@
+// compiledir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mdempsky/8.dir/a.go b/test/typeparam/mdempsky/8.dir/a.go
new file mode 100644
index 0000000000..607fe5e0af
--- /dev/null
+++ b/test/typeparam/mdempsky/8.dir/a.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func F[T interface{ comparable }]() {}
diff --git a/src/cmd/gofmt/gofmt_typeparams_test.go b/test/typeparam/mdempsky/8.dir/b.go
index 10641a77cb..ef2637b894 100644
--- a/src/cmd/gofmt/gofmt_typeparams_test.go
+++ b/test/typeparam/mdempsky/8.dir/b.go
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build typeparams
-// +build typeparams
+package b
-package main
+import "./a"
func init() {
- typeParamsEnabled = true
+ a.F[func()]() // ERROR "does not satisfy comparable"
}
diff --git a/test/typeparam/mdempsky/8.go b/test/typeparam/mdempsky/8.go
new file mode 100644
index 0000000000..32cf4b830d
--- /dev/null
+++ b/test/typeparam/mdempsky/8.go
@@ -0,0 +1,7 @@
+// errorcheckdir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mdempsky/9.go b/test/typeparam/mdempsky/9.go
new file mode 100644
index 0000000000..b72516c4ea
--- /dev/null
+++ b/test/typeparam/mdempsky/9.go
@@ -0,0 +1,11 @@
+// compile -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func f[V any]() []V { return []V{0: *new(V)} }
+
+func g() { f[int]() }
diff --git a/test/typeparam/min.go b/test/typeparam/min.go
index a3e4464a30..d6c65d68b7 100644
--- a/test/typeparam/min.go
+++ b/test/typeparam/min.go
@@ -11,7 +11,7 @@ import (
)
type Ordered interface {
- type int, int64, float64
+ ~int | ~int64 | ~float64 | ~string
}
func min[T Ordered](x, y T) T {
@@ -38,4 +38,13 @@ func main() {
if got := min(3.5, 2.0); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
+
+ const want2 = "ay"
+ if got := min[string]("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := min("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
}
diff --git a/test/typeparam/mincheck.dir/a.go b/test/typeparam/mincheck.dir/a.go
new file mode 100644
index 0000000000..fa0f249e61
--- /dev/null
+++ b/test/typeparam/mincheck.dir/a.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ int | int64 | float64
+}
+
+func Min[T Ordered](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
diff --git a/test/typeparam/mincheck.dir/main.go b/test/typeparam/mincheck.dir/main.go
new file mode 100644
index 0000000000..9cf2c6bafd
--- /dev/null
+++ b/test/typeparam/mincheck.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 2
+ if got := a.Min[int](2, 3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(2, 3); got != want {
+ panic(fmt.Sprintf("want %d, got %d", want, got))
+ }
+
+ if got := a.Min[float64](3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ const want2 = "ay"
+ if got := a.Min[string]("bb", "ay"); got != want2 { // ERROR "string does not satisfy"
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := a.Min("bb", "ay"); got != want2 { // ERROR "string does not satisfy"
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+}
diff --git a/test/typeparam/mincheck.go b/test/typeparam/mincheck.go
new file mode 100644
index 0000000000..32cf4b830d
--- /dev/null
+++ b/test/typeparam/mincheck.go
@@ -0,0 +1,7 @@
+// errorcheckdir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/minimp.dir/a.go b/test/typeparam/minimp.dir/a.go
new file mode 100644
index 0000000000..fabde62c5d
--- /dev/null
+++ b/test/typeparam/minimp.dir/a.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int64 | ~float64 | ~string
+}
+
+func Min[T Ordered](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
diff --git a/test/typeparam/minimp.dir/main.go b/test/typeparam/minimp.dir/main.go
new file mode 100644
index 0000000000..509f5aaed2
--- /dev/null
+++ b/test/typeparam/minimp.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 2
+ if got := a.Min[int](2, 3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(2, 3); got != want {
+ panic(fmt.Sprintf("want %d, got %d", want, got))
+ }
+
+ if got := a.Min[float64](3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ const want2 = "ay"
+ if got := a.Min[string]("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := a.Min("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+}
diff --git a/test/typeparam/minimp.go b/test/typeparam/minimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/minimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mutualimp.dir/a.go b/test/typeparam/mutualimp.dir/a.go
new file mode 100644
index 0000000000..5b924d3ce5
--- /dev/null
+++ b/test/typeparam/mutualimp.dir/a.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type X int
+
+func (x X) M() X { return x }
+
+func F[T interface{ M() U }, U interface{ M() T }]() {}
+func G() { F[X, X]() }
diff --git a/test/typeparam/mutualimp.dir/b.go b/test/typeparam/mutualimp.dir/b.go
new file mode 100644
index 0000000000..83cc3af283
--- /dev/null
+++ b/test/typeparam/mutualimp.dir/b.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+func H() {
+ a.F[a.X, a.X]()
+ a.G()
+}
diff --git a/test/typeparam/mutualimp.go b/test/typeparam/mutualimp.go
new file mode 100644
index 0000000000..87b4ff46c1
--- /dev/null
+++ b/test/typeparam/mutualimp.go
@@ -0,0 +1,7 @@
+// compiledir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/nested.go b/test/typeparam/nested.go
new file mode 100644
index 0000000000..c0037a3e6e
--- /dev/null
+++ b/test/typeparam/nested.go
@@ -0,0 +1,134 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test case stress tests a number of subtle cases involving
+// nested type-parameterized declarations. At a high-level, it
+// declares a generic function that contains a generic type
+// declaration:
+//
+// func F[A intish]() {
+// type T[B intish] struct{}
+//
+// // store reflect.Type tuple (A, B, F[A].T[B]) in tests
+// }
+//
+// It then instantiates this function with a variety of type arguments
+// for A and B. Particularly tricky things like shadowed types.
+//
+// From this data it tests two things:
+//
+// 1. Given tuples (A, B, F[A].T[B]) and (A', B', F[A'].T[B']),
+// F[A].T[B] should be identical to F[A'].T[B'] iff (A, B) is
+// identical to (A', B').
+//
+// 2. A few of the instantiations are constructed to be identical, and
+// it tests that exactly these pairs are duplicated (by golden
+// output comparison to nested.out).
+//
+// In both cases, we're effectively using the compiler's existing
+// runtime.Type handling (which is well tested) of type identity of A
+// and B as a way to help bootstrap testing and validate its new
+// runtime.Type handling of F[A].T[B].
+//
+// This isn't perfect, but it smoked out a handful of issues in
+// gotypes2 and unified IR.
+
+package main
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type test struct {
+ TArgs [2]reflect.Type
+ Instance reflect.Type
+}
+
+var tests []test
+
+type intish interface{ ~int }
+
+type Int int
+type GlobalInt = Int // allow access to global Int, even when shadowed
+
+func F[A intish]() {
+ add := func(B, T interface{}) {
+ tests = append(tests, test{
+ TArgs: [2]reflect.Type{
+ reflect.TypeOf(A(0)),
+ reflect.TypeOf(B),
+ },
+ Instance: reflect.TypeOf(T),
+ })
+ }
+
+ type Int int
+
+ type T[B intish] struct{}
+
+ add(int(0), T[int]{})
+ add(Int(0), T[Int]{})
+ add(GlobalInt(0), T[GlobalInt]{})
+ add(A(0), T[A]{}) // NOTE: intentionally dups with int and GlobalInt
+
+ type U[_ any] int
+ type V U[int]
+ type W V
+
+ add(U[int](0), T[U[int]]{})
+ add(U[Int](0), T[U[Int]]{})
+ add(U[GlobalInt](0), T[U[GlobalInt]]{})
+ add(U[A](0), T[U[A]]{}) // NOTE: intentionally dups with U[int] and U[GlobalInt]
+ add(V(0), T[V]{})
+ add(W(0), T[W]{})
+}
+
+func main() {
+ type Int int
+
+ F[int]()
+ F[Int]()
+ F[GlobalInt]()
+
+ type U[_ any] int
+ type V U[int]
+ type W V
+
+ F[U[int]]()
+ F[U[Int]]()
+ F[U[GlobalInt]]()
+ F[V]()
+ F[W]()
+
+ type X[A any] U[X[A]]
+
+ F[X[int]]()
+ F[X[Int]]()
+ F[X[GlobalInt]]()
+
+ for j, tj := range tests {
+ for i, ti := range tests[:j+1] {
+ if (ti.TArgs == tj.TArgs) != (ti.Instance == tj.Instance) {
+ fmt.Printf("FAIL: %d,%d: %s, but %s\n", i, j, eq(ti.TArgs, tj.TArgs), eq(ti.Instance, tj.Instance))
+ }
+
+ // The test is constructed so we should see a few identical types.
+ // See "NOTE" comments above.
+ if i != j && ti.Instance == tj.Instance {
+ fmt.Printf("%d,%d: %v\n", i, j, ti.Instance)
+ }
+ }
+ }
+}
+
+func eq(a, b interface{}) string {
+ op := "=="
+ if a != b {
+ op = "!="
+ }
+ return fmt.Sprintf("%v %s %v", a, op, b)
+}
diff --git a/test/typeparam/nested.out b/test/typeparam/nested.out
new file mode 100644
index 0000000000..9110518248
--- /dev/null
+++ b/test/typeparam/nested.out
@@ -0,0 +1,4 @@
+0,3: main.T·2[int;int]
+4,7: main.T·2[int;"".U·3[int;int]]
+22,23: main.T·2["".Int;"".Int]
+26,27: main.T·2["".Int;"".U·3["".Int;"".Int]]
diff --git a/test/typeparam/ordered.go b/test/typeparam/ordered.go
index 448db68bb5..0f539d659c 100644
--- a/test/typeparam/ordered.go
+++ b/test/typeparam/ordered.go
@@ -13,15 +13,15 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
type orderedSlice[Elem Ordered] []Elem
-func (s orderedSlice[Elem]) Len() int { return len(s) }
+func (s orderedSlice[Elem]) Len() int { return len(s) }
func (s orderedSlice[Elem]) Less(i, j int) bool {
if s[i] < s[j] {
return true
@@ -32,7 +32,7 @@ func (s orderedSlice[Elem]) Less(i, j int) bool {
}
return false
}
-func (s orderedSlice[Elem]) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s orderedSlice[Elem]) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func _OrderedSlice[Elem Ordered](s []Elem) {
sort.Sort(orderedSlice[Elem](s))
@@ -68,7 +68,7 @@ func testOrdered[Elem Ordered](name string, s []Elem, sorter func([]Elem)) bool
}
for i := len(s1) - 1; i > 0; i-- {
if s1[i] < s1[i-1] {
- fmt.Printf("%s: element %d (%v) < element %d (%v)", name, i, s1[i], i - 1, s1[i - 1])
+ fmt.Printf("%s: element %d (%v) < element %d (%v)", name, i, s1[i], i-1, s1[i-1])
ok = false
}
}
diff --git a/test/typeparam/orderedmap.go b/test/typeparam/orderedmap.go
index db1b374267..1f077333b8 100644
--- a/test/typeparam/orderedmap.go
+++ b/test/typeparam/orderedmap.go
@@ -15,10 +15,10 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
// _Map is an ordered map.
@@ -230,7 +230,7 @@ func _Ranger[Elem any]() (*_Sender[Elem], *_Receiver[Elem]) {
values: c,
done: d,
}
- r := &_Receiver[Elem] {
+ r := &_Receiver[Elem]{
values: c,
done: d,
}
diff --git a/test/typeparam/orderedmapsimp.dir/a.go b/test/typeparam/orderedmapsimp.dir/a.go
new file mode 100644
index 0000000000..d6a2de5d7b
--- /dev/null
+++ b/test/typeparam/orderedmapsimp.dir/a.go
@@ -0,0 +1,226 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "context"
+ "runtime"
+)
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map. It takes a comparison function that compares two
+// keys and returns < 0 if the first is less, == 0 if they are equal,
+// > 0 if the first is greater.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// NewOrdered returns a new map whose key is an ordered type.
+// This is like New, but does not require providing a compare function.
+// The map compare function uses the obvious key ordering.
+func NewOrdered[K Ordered, V any]() *Map[K, V] {
+ return New[K, V](func(k1, k2 K) int {
+ switch {
+ case k1 < k2:
+ return -1
+ case k1 > k2:
+ return 1
+ default:
+ return 0
+ }
+ })
+}
+
+// find looks up key in the map, returning either a pointer to the slot of the
+// node holding key, or a pointer to the slot where a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Reports whether this is a new key.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or the zero value
+// if not present. The second result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used while iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// iterate returns an iterator that traverses the map.
+func (m *Map[K, V]) Iterate() *Iterator[K, V] {
+ sender, receiver := Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop the traversal if Send fails, which means that
+ // nothing is listening to the receiver.
+ return f(n.left) &&
+ sender.Send(context.Background(), keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean that reports
+// whether they are valid. If not valid, we have reached the end of the map.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next(context.Background())
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[Elem any]() (*Sender[Elem], *Receiver[Elem]) {
+ c := make(chan Elem)
+ d := make(chan struct{})
+ s := &Sender[Elem]{
+ values: c,
+ done: d,
+ }
+ r := &Receiver[Elem]{
+ values: c,
+ done: d,
+ }
+ runtime.SetFinalizer(r, (*Receiver[Elem]).finalize)
+ return s, r
+}
+
+// A Sender is used to send values to a Receiver.
+type Sender[Elem any] struct {
+ values chan<- Elem
+ done <-chan struct{}
+}
+
+// Send sends a value to the receiver. It reports whether the value was sent.
+// The value will not be sent if the context is closed or the receiver
+// is freed.
+func (s *Sender[Elem]) Send(ctx context.Context, v Elem) bool {
+ select {
+ case <-ctx.Done():
+ return false
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[Elem]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[Elem any] struct {
+ values <-chan Elem
+ done chan<- struct{}
+}
+
+// Next returns the next value from the channel. The bool result indicates
+// whether the value is valid.
+func (r *Receiver[Elem]) Next(ctx context.Context) (v Elem, ok bool) {
+ select {
+ case <-ctx.Done():
+ case v, ok = <-r.values:
+ }
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[Elem]) finalize() {
+ close(r.done)
+}
diff --git a/test/typeparam/orderedmapsimp.dir/main.go b/test/typeparam/orderedmapsimp.dir/main.go
new file mode 100644
index 0000000000..978f1e763c
--- /dev/null
+++ b/test/typeparam/orderedmapsimp.dir/main.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "bytes"
+ "fmt"
+)
+
+func TestMap() {
+ m := a.New[[]byte, int](bytes.Compare)
+
+ if _, found := m.Find([]byte("a")); found {
+ panic(fmt.Sprintf("unexpectedly found %q in empty map", []byte("a")))
+ }
+
+ for _, c := range []int{'a', 'c', 'b'} {
+ if !m.Insert([]byte(string(c)), c) {
+ panic(fmt.Sprintf("key %q unexpectedly already present", []byte(string(c))))
+ }
+ }
+ if m.Insert([]byte("c"), 'x') {
+ panic(fmt.Sprintf("key %q unexpectedly not present", []byte("c")))
+ }
+
+ if v, found := m.Find([]byte("a")); !found {
+ panic(fmt.Sprintf("did not find %q", []byte("a")))
+ } else if v != 'a' {
+ panic(fmt.Sprintf("key %q returned wrong value %c, expected %c", []byte("a"), v, 'a'))
+ }
+ if v, found := m.Find([]byte("c")); !found {
+ panic(fmt.Sprintf("did not find %q", []byte("c")))
+ } else if v != 'x' {
+ panic(fmt.Sprintf("key %q returned wrong value %c, expected %c", []byte("c"), v, 'x'))
+ }
+
+ if _, found := m.Find([]byte("d")); found {
+ panic(fmt.Sprintf("unexpectedly found %q", []byte("d")))
+ }
+
+ gather := func(it *a.Iterator[[]byte, int]) []int {
+ var r []int
+ for {
+ _, v, ok := it.Next()
+ if !ok {
+ return r
+ }
+ r = append(r, v)
+ }
+ }
+ got := gather(m.Iterate())
+ want := []int{'a', 'b', 'x'}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Iterate returned %v, want %v", got, want))
+ }
+
+}
+
+func main() {
+ TestMap()
+}
diff --git a/test/typeparam/orderedmapsimp.go b/test/typeparam/orderedmapsimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/orderedmapsimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/pair.go b/test/typeparam/pair.go
index 7faf083c89..c1427b9c52 100644
--- a/test/typeparam/pair.go
+++ b/test/typeparam/pair.go
@@ -24,7 +24,11 @@ func main() {
if got, want := unsafe.Sizeof(p.f2), uintptr(8); got != want {
panic(fmt.Sprintf("unexpected f2 size == %d, want %d", got, want))
}
- type mypair struct { f1 int32; f2 int64 }
+
+ type mypair struct {
+ f1 int32
+ f2 int64
+ }
mp := mypair(p)
if mp.f1 != 1 || mp.f2 != 2 {
panic(fmt.Sprintf("mp == %#v, want %#v", mp, mypair{1, 2}))
diff --git a/test/typeparam/pairimp.dir/a.go b/test/typeparam/pairimp.dir/a.go
new file mode 100644
index 0000000000..a984fba37b
--- /dev/null
+++ b/test/typeparam/pairimp.dir/a.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Pair[F1, F2 any] struct {
+ Field1 F1
+ Field2 F2
+}
diff --git a/test/typeparam/pairimp.dir/main.go b/test/typeparam/pairimp.dir/main.go
new file mode 100644
index 0000000000..027fdd9ce7
--- /dev/null
+++ b/test/typeparam/pairimp.dir/main.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "unsafe"
+)
+
+func main() {
+ p := a.Pair[int32, int64]{1, 2}
+ if got, want := unsafe.Sizeof(p.Field1), uintptr(4); got != want {
+ panic(fmt.Sprintf("unexpected f1 size == %d, want %d", got, want))
+ }
+ if got, want := unsafe.Sizeof(p.Field2), uintptr(8); got != want {
+ panic(fmt.Sprintf("unexpected f2 size == %d, want %d", got, want))
+ }
+
+ type mypair struct {
+ Field1 int32
+ Field2 int64
+ }
+ mp := mypair(p)
+ if mp.Field1 != 1 || mp.Field2 != 2 {
+ panic(fmt.Sprintf("mp == %#v, want %#v", mp, mypair{1, 2}))
+ }
+}
diff --git a/test/typeparam/pairimp.go b/test/typeparam/pairimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/pairimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/sets.go b/test/typeparam/sets.go
index 258514489e..4f07b590e3 100644
--- a/test/typeparam/sets.go
+++ b/test/typeparam/sets.go
@@ -160,7 +160,7 @@ func TestSet() {
vals := s1.Values()
sort.Ints(vals)
w1 := []int{1, 2, 3, 4}
- if !_SliceEqual(vals, w1) {
+ if !_SliceEqual(vals, w1) {
panic(fmt.Sprintf("(%v).Values() == %v, want %v", s1, vals, w1))
}
}
diff --git a/test/typeparam/setsimp.dir/a.go b/test/typeparam/setsimp.dir/a.go
new file mode 100644
index 0000000000..92449ce956
--- /dev/null
+++ b/test/typeparam/setsimp.dir/a.go
@@ -0,0 +1,128 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// SliceEqual reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A Set is a set of elements of some type.
+type Set[Elem comparable] struct {
+ m map[Elem]struct{}
+}
+
+// Make makes a new set.
+func Make[Elem comparable]() Set[Elem] {
+ return Set[Elem]{m: make(map[Elem]struct{})}
+}
+
+// Add adds an element to a set.
+func (s Set[Elem]) Add(v Elem) {
+ s.m[v] = struct{}{}
+}
+
+// Delete removes an element from a set. If the element is not present
+// in the set, this does nothing.
+func (s Set[Elem]) Delete(v Elem) {
+ delete(s.m, v)
+}
+
+// Contains reports whether v is in the set.
+func (s Set[Elem]) Contains(v Elem) bool {
+ _, ok := s.m[v]
+ return ok
+}
+
+// Len returns the number of elements in the set.
+func (s Set[Elem]) Len() int {
+ return len(s.m)
+}
+
+// Values returns the values in the set.
+// The values will be in an indeterminate order.
+func (s Set[Elem]) Values() []Elem {
+ r := make([]Elem, 0, len(s.m))
+ for v := range s.m {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Equal reports whether two sets contain the same elements.
+func Equal[Elem comparable](s1, s2 Set[Elem]) bool {
+ if len(s1.m) != len(s2.m) {
+ return false
+ }
+ for v1 := range s1.m {
+ if !s2.Contains(v1) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy returns a copy of s.
+func (s Set[Elem]) Copy() Set[Elem] {
+ r := Set[Elem]{m: make(map[Elem]struct{}, len(s.m))}
+ for v := range s.m {
+ r.m[v] = struct{}{}
+ }
+ return r
+}
+
+// AddSet adds all the elements of s2 to s.
+func (s Set[Elem]) AddSet(s2 Set[Elem]) {
+ for v := range s2.m {
+ s.m[v] = struct{}{}
+ }
+}
+
+// SubSet removes all elements in s2 from s.
+// Values in s2 that are not in s are ignored.
+func (s Set[Elem]) SubSet(s2 Set[Elem]) {
+ for v := range s2.m {
+ delete(s.m, v)
+ }
+}
+
+// Intersect removes all elements from s that are not present in s2.
+// Values in s2 that are not in s are ignored.
+func (s Set[Elem]) Intersect(s2 Set[Elem]) {
+ for v := range s.m {
+ if !s2.Contains(v) {
+ delete(s.m, v)
+ }
+ }
+}
+
+// Iterate calls f on every element in the set.
+func (s Set[Elem]) Iterate(f func(Elem)) {
+ for v := range s.m {
+ f(v)
+ }
+}
+
+// Filter deletes any elements from s for which f returns false.
+func (s Set[Elem]) Filter(f func(Elem) bool) {
+ for v := range s.m {
+ if !f(v) {
+ delete(s.m, v)
+ }
+ }
+}
diff --git a/test/typeparam/setsimp.dir/main.go b/test/typeparam/setsimp.dir/main.go
new file mode 100644
index 0000000000..8fd1657143
--- /dev/null
+++ b/test/typeparam/setsimp.dir/main.go
@@ -0,0 +1,156 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "sort"
+)
+
+func TestSet() {
+ s1 := a.Make[int]()
+ if got := s1.Len(); got != 0 {
+ panic(fmt.Sprintf("Len of empty set = %d, want 0", got))
+ }
+ s1.Add(1)
+ s1.Add(1)
+ s1.Add(1)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ s1.Add(2)
+ s1.Add(3)
+ s1.Add(4)
+ if got := s1.Len(); got != 4 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 4", s1, got))
+ }
+ if !s1.Contains(1) {
+ panic(fmt.Sprintf("(%v).Contains(1) == false, want true", s1))
+ }
+ if s1.Contains(5) {
+ panic(fmt.Sprintf("(%v).Contains(5) == true, want false", s1))
+ }
+ vals := s1.Values()
+ sort.Ints(vals)
+ w1 := []int{1, 2, 3, 4}
+ if !a.SliceEqual(vals, w1) {
+ panic(fmt.Sprintf("(%v).Values() == %v, want %v", s1, vals, w1))
+ }
+}
+
+func TestEqual() {
+ s1 := a.Make[string]()
+ s2 := a.Make[string]()
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s1.Add("hello")
+ s1.Add("world")
+ if got := s1.Len(); got != 2 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 2", s1, got))
+ }
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+}
+
+func TestCopy() {
+ s1 := a.Make[float64]()
+ s1.Add(0)
+ s2 := s1.Copy()
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s1.Add(1)
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+}
+
+func TestAddSet() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.AddSet(s2)
+ if got := s1.Len(); got != 3 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 3", s1, got))
+ }
+ s2.Add(1)
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+}
+
+func TestSubSet() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.SubSet(s2)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ if vals, want := s1.Values(), []int{1}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after SubSet got %v, want %v", vals, want))
+ }
+}
+
+func TestIntersect() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.Intersect(s2)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ if vals, want := s1.Values(), []int{2}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after Intersect got %v, want %v", vals, want))
+ }
+}
+
+func TestIterate() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s1.Add(3)
+ s1.Add(4)
+ tot := 0
+ s1.Iterate(func(i int) { tot += i })
+ if tot != 10 {
+ panic(fmt.Sprintf("total of %v == %d, want 10", s1, tot))
+ }
+}
+
+func TestFilter() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s1.Add(3)
+ s1.Filter(func(v int) bool { return v%2 == 0 })
+ if vals, want := s1.Values(), []int{2}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after Filter got %v, want %v", vals, want))
+ }
+
+}
+
+func main() {
+ TestSet()
+ TestEqual()
+ TestCopy()
+ TestAddSet()
+ TestSubSet()
+ TestIntersect()
+ TestIterate()
+ TestFilter()
+}
diff --git a/test/typeparam/setsimp.go b/test/typeparam/setsimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/setsimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/settable.go b/test/typeparam/settable.go
index 588166da85..99455e93fa 100644
--- a/test/typeparam/settable.go
+++ b/test/typeparam/settable.go
@@ -13,13 +13,13 @@ import (
// Various implementations of fromStrings().
-type _Setter[B any] interface {
+type Setter[B any] interface {
Set(string)
type *B
}
// Takes two type parameters where PT = *T
-func fromStrings1[T any, PT _Setter[T]](s []string) []T {
+func fromStrings1[T any, PT Setter[T]](s []string) []T {
result := make([]T, len(s))
for i, v := range s {
// The type of &result[i] is *T which is in the type list
@@ -31,7 +31,7 @@ func fromStrings1[T any, PT _Setter[T]](s []string) []T {
return result
}
-func fromStrings1a[T any, PT _Setter[T]](s []string) []PT {
+func fromStrings1a[T any, PT Setter[T]](s []string) []PT {
result := make([]PT, len(s))
for i, v := range s {
// The type new(T) is *T which is in the type list
@@ -44,7 +44,6 @@ func fromStrings1a[T any, PT _Setter[T]](s []string) []PT {
return result
}
-
// Takes one type parameter and a set function
func fromStrings2[T any](s []string, set func(*T, string)) []T {
results := make([]T, len(s))
@@ -54,12 +53,12 @@ func fromStrings2[T any](s []string, set func(*T, string)) []T {
return results
}
-type _Setter2 interface {
+type Setter2 interface {
Set(string)
}
// Takes only one type parameter, but causes a panic (see below)
-func fromStrings3[T _Setter2](s []string) []T {
+func fromStrings3[T Setter2](s []string) []T {
results := make([]T, len(s))
for i, v := range s {
// Panics if T is a pointer type because receiver is T(nil).
diff --git a/test/typeparam/shape1.go b/test/typeparam/shape1.go
new file mode 100644
index 0000000000..de1ea65ed2
--- /dev/null
+++ b/test/typeparam/shape1.go
@@ -0,0 +1,50 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type I interface {
+ foo() int
+}
+
+// There should be one instantiation of f for both squarer and doubler.
+// Similarly, there should be one instantiation of f for both *incrementer and *decrementer.
+func f[T I](x T) int {
+ return x.foo()
+}
+
+type squarer int
+
+func (x squarer) foo() int {
+ return int(x*x)
+}
+
+type doubler int
+
+func (x doubler) foo() int {
+ return int(2*x)
+}
+
+type incrementer int16
+
+func (x *incrementer) foo() int {
+ return int(*x+1)
+}
+
+type decrementer int32
+
+func (x *decrementer) foo() int{
+ return int(*x-1)
+}
+
+func main() {
+ println(f(squarer(5)))
+ println(f(doubler(5)))
+ var i incrementer = 5
+ println(f(&i))
+ var d decrementer = 5
+ println(f(&d))
+}
diff --git a/test/typeparam/shape1.out b/test/typeparam/shape1.out
new file mode 100644
index 0000000000..da9a12ded5
--- /dev/null
+++ b/test/typeparam/shape1.out
@@ -0,0 +1,4 @@
+25
+10
+6
+4
diff --git a/test/typeparam/sliceimp.dir/a.go b/test/typeparam/sliceimp.dir/a.go
new file mode 100644
index 0000000000..da12e9f9fc
--- /dev/null
+++ b/test/typeparam/sliceimp.dir/a.go
@@ -0,0 +1,141 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// Max returns the maximum of two values of some ordered type.
+func Max[T Ordered](a, b T) T {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// Min returns the minimum of two values of some ordered type.
+func Min[T Ordered](a, b T) T {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func Equal[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// EqualFn reports whether two slices are equal using a comparision
+// function on each element.
+func EqualFn[Elem any](s1, s2 []Elem, eq func(Elem, Elem) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Map turns a []Elem1 to a []Elem2 using a mapping function.
+func Map[Elem1, Elem2 any](s []Elem1, f func(Elem1) Elem2) []Elem2 {
+ r := make([]Elem2, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
+
+// Reduce reduces a []Elem1 to a single value of type Elem2 using
+// a reduction function.
+func Reduce[Elem1, Elem2 any](s []Elem1, initializer Elem2, f func(Elem2, Elem1) Elem2) Elem2 {
+ r := initializer
+ for _, v := range s {
+ r = f(r, v)
+ }
+ return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[Elem any](s []Elem, f func(Elem) bool) []Elem {
+ var r []Elem
+ for _, v := range s {
+ if f(v) {
+ r = append(r, v)
+ }
+ }
+ return r
+}
+
+// Max returns the maximum element in a slice of some ordered type.
+// If the slice is empty it returns the zero value of the element type.
+func SliceMax[Elem Ordered](s []Elem) Elem {
+ if len(s) == 0 {
+ var zero Elem
+ return zero
+ }
+ return Reduce(s[1:], s[0], Max[Elem])
+}
+
+// Min returns the minimum element in a slice of some ordered type.
+// If the slice is empty it returns the zero value of the element type.
+func SliceMin[Elem Ordered](s []Elem) Elem {
+ if len(s) == 0 {
+ var zero Elem
+ return zero
+ }
+ return Reduce(s[1:], s[0], Min[Elem])
+}
+
+// Append adds values to the end of a slice, returning a new slice.
+// This is like the predeclared append function; it's an example
+// of how to write it using generics. We used to write code like
+// this before append was added to the language, but we had to write
+// a separate copy for each type.
+func Append[T any](s []T, t ...T) []T {
+ lens := len(s)
+ tot := lens + len(t)
+ if tot <= cap(s) {
+ s = s[:tot]
+ } else {
+ news := make([]T, tot, tot+tot/2)
+ Copy(news, s)
+ s = news
+ }
+ Copy(s[lens:tot], t)
+ return s
+}
+
+// Copy copies values from t to s, stopping when either slice is full,
+// returning the number of values copied. This is like the predeclared
+// copy function; it's an example of how to write it using generics.
+func Copy[T any](s, t []T) int {
+ i := 0
+ for ; i < len(s) && i < len(t); i++ {
+ s[i] = t[i]
+ }
+ return i
+}
diff --git a/test/typeparam/sliceimp.dir/main.go b/test/typeparam/sliceimp.dir/main.go
new file mode 100644
index 0000000000..0f79e10018
--- /dev/null
+++ b/test/typeparam/sliceimp.dir/main.go
@@ -0,0 +1,179 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "math"
+ "strings"
+)
+
+type Integer interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+func TestEqual() {
+ s1 := []int{1, 2, 3}
+ if !a.Equal(s1, s1) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s1))
+ }
+ s2 := []int{1, 2, 3}
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s2 = append(s2, 4)
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+
+ s3 := []float64{1, 2, math.NaN()}
+ if !a.Equal(s3, s3) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s3, s3))
+ }
+
+ if a.Equal(s1, nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil) = true, want false", s1))
+ }
+ if a.Equal(nil, s1) {
+ panic(fmt.Sprintf("a.Equal(nil, %v) = true, want false", s1))
+ }
+ if !a.Equal(s1[:0], nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil = false, want true", s1[:0]))
+ }
+}
+
+func offByOne[Elem Integer](a, b Elem) bool {
+ return a == b+1 || a == b-1
+}
+
+func TestEqualFn() {
+ s1 := []int{1, 2, 3}
+ s2 := []int{2, 3, 4}
+ if a.EqualFn(s1, s1, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, offByOne) = true, want false", s1, s1))
+ }
+ if !a.EqualFn(s1, s2, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, offByOne) = false, want true", s1, s2))
+ }
+
+ if !a.EqualFn(s1[:0], nil, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, nil, offByOne) = false, want true", s1[:0]))
+ }
+
+ s3 := []string{"a", "b", "c"}
+ s4 := []string{"A", "B", "C"}
+ if !a.EqualFn(s3, s4, strings.EqualFold) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, strings.EqualFold) = false, want true", s3, s4))
+ }
+}
+
+func TestMap() {
+ s1 := []int{1, 2, 3}
+ s2 := a.Map(s1, func(i int) float64 { return float64(i) * 2.5 })
+ if want := []float64{2.5, 5, 7.5}; !a.Equal(s2, want) {
+ panic(fmt.Sprintf("a.Map(%v, ...) = %v, want %v", s1, s2, want))
+ }
+
+ s3 := []string{"Hello", "World"}
+ s4 := a.Map(s3, strings.ToLower)
+ if want := []string{"hello", "world"}; !a.Equal(s4, want) {
+ panic(fmt.Sprintf("a.Map(%v, strings.ToLower) = %v, want %v", s3, s4, want))
+ }
+
+ s5 := a.Map(nil, func(i int) int { return i })
+ if len(s5) != 0 {
+ panic(fmt.Sprintf("a.Map(nil, identity) = %v, want empty slice", s5))
+ }
+}
+
+func TestReduce() {
+ s1 := []int{1, 2, 3}
+ r := a.Reduce(s1, 0, func(f float64, i int) float64 { return float64(i)*2.5 + f })
+ if want := 15.0; r != want {
+ panic(fmt.Sprintf("a.Reduce(%v, 0, ...) = %v, want %v", s1, r, want))
+ }
+
+ if got := a.Reduce(nil, 0, func(i, j int) int { return i + j }); got != 0 {
+ panic(fmt.Sprintf("a.Reduce(nil, 0, add) = %v, want 0", got))
+ }
+}
+
+func TestFilter() {
+ s1 := []int{1, 2, 3}
+ s2 := a.Filter(s1, func(i int) bool { return i%2 == 0 })
+ if want := []int{2}; !a.Equal(s2, want) {
+ panic(fmt.Sprintf("a.Filter(%v, even) = %v, want %v", s1, s2, want))
+ }
+
+ if s3 := a.Filter(s1[:0], func(i int) bool { return true }); len(s3) > 0 {
+ panic(fmt.Sprintf("a.Filter(%v, identity) = %v, want empty slice", s1[:0], s3))
+ }
+}
+
+func TestMax() {
+ s1 := []int{1, 2, 3, -5}
+ if got, want := a.SliceMax(s1), 3; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %d, want %d", s1, got, want))
+ }
+
+ s2 := []string{"aaa", "a", "aa", "aaaa"}
+ if got, want := a.SliceMax(s2), "aaaa"; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %q, want %q", s2, got, want))
+ }
+
+ if got, want := a.SliceMax(s2[:0]), ""; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %q, want %q", s2[:0], got, want))
+ }
+}
+
+func TestMin() {
+ s1 := []int{1, 2, 3, -5}
+ if got, want := a.SliceMin(s1), -5; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %d, want %d", s1, got, want))
+ }
+
+ s2 := []string{"aaa", "a", "aa", "aaaa"}
+ if got, want := a.SliceMin(s2), "a"; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %q, want %q", s2, got, want))
+ }
+
+ if got, want := a.SliceMin(s2[:0]), ""; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %q, want %q", s2[:0], got, want))
+ }
+}
+
+func TestAppend() {
+ s := []int{1, 2, 3}
+ s = a.Append(s, 4, 5, 6)
+ want := []int{1, 2, 3, 4, 5, 6}
+ if !a.Equal(s, want) {
+ panic(fmt.Sprintf("after a.Append got %v, want %v", s, want))
+ }
+}
+
+func TestCopy() {
+ s1 := []int{1, 2, 3}
+ s2 := []int{4, 5}
+ if got := a.Copy(s1, s2); got != 2 {
+ panic(fmt.Sprintf("a.Copy returned %d, want 2", got))
+ }
+ want := []int{4, 5, 3}
+ if !a.Equal(s1, want) {
+ panic(fmt.Sprintf("after a.Copy got %v, want %v", s1, want))
+ }
+}
+func main() {
+ TestEqual()
+ TestEqualFn()
+ TestMap()
+ TestReduce()
+ TestFilter()
+ TestMax()
+ TestMin()
+ TestAppend()
+ TestCopy()
+}
diff --git a/test/typeparam/sliceimp.go b/test/typeparam/sliceimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/sliceimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/slices.go b/test/typeparam/slices.go
index 149199eb64..b5e8e0c606 100644
--- a/test/typeparam/slices.go
+++ b/test/typeparam/slices.go
@@ -15,31 +15,31 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
type Integer interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}
// Max returns the maximum of two values of some ordered type.
func _Max[T Ordered](a, b T) T {
- if a > b {
- return a
- }
- return b
+ if a > b {
+ return a
+ }
+ return b
}
// Min returns the minimum of two values of some ordered type.
func _Min[T Ordered](a, b T) T {
- if a < b {
- return a
- }
- return b
+ if a < b {
+ return a
+ }
+ return b
}
// _Equal reports whether two slices are equal: the same length and all
@@ -136,7 +136,7 @@ func _Append[T any](s []T, t ...T) []T {
if tot <= cap(s) {
s = s[:tot]
} else {
- news := make([]T, tot, tot + tot/2)
+ news := make([]T, tot, tot+tot/2)
_Copy(news, s)
s = news
}
@@ -156,37 +156,37 @@ func _Copy[T any](s, t []T) int {
}
func TestEqual() {
- s1 := []int{1, 2, 3}
- if !_Equal(s1, s1) {
- panic(fmt.Sprintf("_Equal(%v, %v) = false, want true", s1, s1))
- }
- s2 := []int{1, 2, 3}
- if !_Equal(s1, s2) {
- panic(fmt.Sprintf("_Equal(%v, %v) = false, want true", s1, s2))
- }
- s2 = append(s2, 4)
- if _Equal(s1, s2) {
- panic(fmt.Sprintf("_Equal(%v, %v) = true, want false", s1, s2))
- }
-
- s3 := []float64{1, 2, math.NaN()}
- if !_Equal(s3, s3) {
- panic(fmt.Sprintf("_Equal(%v, %v) = false, want true", s3, s3))
- }
-
- if _Equal(s1, nil) {
- panic(fmt.Sprintf("_Equal(%v, nil) = true, want false", s1))
- }
- if _Equal(nil, s1) {
- panic(fmt.Sprintf("_Equal(nil, %v) = true, want false", s1))
- }
- if !_Equal(s1[:0], nil) {
- panic(fmt.Sprintf("_Equal(%v, nil = false, want true", s1[:0]))
- }
+ s1 := []int{1, 2, 3}
+ if !_Equal(s1, s1) {
+ panic(fmt.Sprintf("_Equal(%v, %v) = false, want true", s1, s1))
+ }
+ s2 := []int{1, 2, 3}
+ if !_Equal(s1, s2) {
+ panic(fmt.Sprintf("_Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s2 = append(s2, 4)
+ if _Equal(s1, s2) {
+ panic(fmt.Sprintf("_Equal(%v, %v) = true, want false", s1, s2))
+ }
+
+ s3 := []float64{1, 2, math.NaN()}
+ if !_Equal(s3, s3) {
+ panic(fmt.Sprintf("_Equal(%v, %v) = false, want true", s3, s3))
+ }
+
+ if _Equal(s1, nil) {
+ panic(fmt.Sprintf("_Equal(%v, nil) = true, want false", s1))
+ }
+ if _Equal(nil, s1) {
+ panic(fmt.Sprintf("_Equal(nil, %v) = true, want false", s1))
+ }
+ if !_Equal(s1[:0], nil) {
+ panic(fmt.Sprintf("_Equal(%v, nil = false, want true", s1[:0]))
+ }
}
func offByOne[Elem Integer](a, b Elem) bool {
- return a == b + 1 || a == b - 1
+ return a == b+1 || a == b-1
}
func TestEqualFn() {
@@ -231,12 +231,12 @@ func TestMap() {
func TestReduce() {
s1 := []int{1, 2, 3}
- r := _Reduce(s1, 0, func(f float64, i int) float64 { return float64(i) * 2.5 + f })
+ r := _Reduce(s1, 0, func(f float64, i int) float64 { return float64(i)*2.5 + f })
if want := 15.0; r != want {
panic(fmt.Sprintf("_Reduce(%v, 0, ...) = %v, want %v", s1, r, want))
}
- if got := _Reduce(nil, 0, func(i, j int) int { return i + j}); got != 0 {
+ if got := _Reduce(nil, 0, func(i, j int) int { return i + j }); got != 0 {
panic(fmt.Sprintf("_Reduce(nil, 0, add) = %v, want 0", got))
}
}
diff --git a/test/typeparam/smallest.go b/test/typeparam/smallest.go
index 63dd9ddb70..af1d72d899 100644
--- a/test/typeparam/smallest.go
+++ b/test/typeparam/smallest.go
@@ -11,13 +11,13 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
-func smallest[T Ordered](s []T) T {
+func Smallest[T Ordered](s []T) T {
r := s[0] // panics if slice is empty
for _, v := range s[1:] {
if v < r {
@@ -32,11 +32,11 @@ func main() {
vec2 := []string{"abc", "def", "aaa"}
want1 := 1.2
- if got := smallest(vec1); got != want1 {
+ if got := Smallest(vec1); got != want1 {
panic(fmt.Sprintf("got %d, want %d", got, want1))
}
want2 := "aaa"
- if got := smallest(vec2); got != want2 {
+ if got := Smallest(vec2); got != want2 {
panic(fmt.Sprintf("got %d, want %d", got, want2))
}
}
diff --git a/test/typeparam/smoketest.go b/test/typeparam/smoketest.go
index b7d6201b2c..5243dc5c3c 100644
--- a/test/typeparam/smoketest.go
+++ b/test/typeparam/smoketest.go
@@ -9,9 +9,9 @@
package smoketest
// type parameters for functions
-func f1[P any]()
-func f2[P1, P2 any, P3 any]()
-func f3[P interface{}](x P, y T1[int])
+func f1[P any]() {}
+func f2[P1, P2 any, P3 any]() {}
+func f3[P interface{}](x P, y T1[int]) {}
// function instantiations
var _ = f1[int]
@@ -29,15 +29,15 @@ type _ T2[int, string, struct{}]
type _ T3[bool]
// methods
-func (T1[P]) m1() {}
-func (T1[_]) m2() {}
+func (T1[P]) m1() {}
+func (T1[_]) m2() {}
func (x T2[P1, P2, P3]) m() {}
// type lists
type _ interface {
m1()
m2()
- type int, float32, string
+ int | float32 | string
m3()
}
diff --git a/test/typeparam/stringable.go b/test/typeparam/stringable.go
index 9340a3b10a..855a1edb3b 100644
--- a/test/typeparam/stringable.go
+++ b/test/typeparam/stringable.go
@@ -16,11 +16,11 @@ type Stringer interface {
String() string
}
-// stringableList is a slice of some type, where the type
+// StringableList is a slice of some type, where the type
// must have a String method.
-type stringableList[T Stringer] []T
+type StringableList[T Stringer] []T
-func (s stringableList[T]) String() string {
+func (s StringableList[T]) String() string {
var sb strings.Builder
for i, v := range s {
if i > 0 {
@@ -38,9 +38,9 @@ func (a myint) String() string {
}
func main() {
- v := stringableList[myint]{ myint(1), myint(2) }
+ v := StringableList[myint]{myint(1), myint(2)}
if got, want := v.String(), "1, 2"; got != want {
- panic(fmt.Sprintf("got %s, want %s", got, want))
+ panic(fmt.Sprintf("got %s, want %s", got, want))
}
}
diff --git a/test/typeparam/stringerimp.dir/a.go b/test/typeparam/stringerimp.dir/a.go
new file mode 100644
index 0000000000..3f70937ff5
--- /dev/null
+++ b/test/typeparam/stringerimp.dir/a.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Stringer interface {
+ String() string
+}
+
+func Stringify[T Stringer](s []T) (ret []string) {
+ for _, v := range s {
+ ret = append(ret, v.String())
+ }
+ return ret
+}
diff --git a/test/typeparam/stringerimp.dir/main.go b/test/typeparam/stringerimp.dir/main.go
new file mode 100644
index 0000000000..e30bdf1abe
--- /dev/null
+++ b/test/typeparam/stringerimp.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+type myint int
+
+func (i myint) String() string {
+ return strconv.Itoa(int(i))
+}
+
+func main() {
+ x := []myint{myint(1), myint(2), myint(3)}
+
+ got := a.Stringify(x)
+ want := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got, want) {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+
+ m1 := myint(1)
+ m2 := myint(2)
+ m3 := myint(3)
+ y := []*myint{&m1, &m2, &m3}
+ got2 := a.Stringify(y)
+ want2 := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got2, want2) {
+ panic(fmt.Sprintf("got %s, want %s", got2, want2))
+ }
+}
diff --git a/test/typeparam/stringerimp.go b/test/typeparam/stringerimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/stringerimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/struct.go b/test/typeparam/struct.go
index 98f0fcd888..ad1b41ddac 100644
--- a/test/typeparam/struct.go
+++ b/test/typeparam/struct.go
@@ -10,40 +10,40 @@ import (
"fmt"
)
-type _E[T any] struct {
+type E[T any] struct {
v T
}
-type _S1 struct {
- _E[int]
+type S1 struct {
+ E[int]
v string
}
-type _Eint = _E[int]
-type _Ebool = _E[bool]
+type Eint = E[int]
+type Ebool = E[bool]
-type _S2 struct {
- _Eint
- _Ebool
+type S2 struct {
+ Eint
+ Ebool
v string
}
-type _S3 struct {
- *_E[int]
+type S3 struct {
+ *E[int]
}
func main() {
- s1 := _S1{_Eint{2}, "foo"}
- if got, want := s1._E.v, 2; got != want {
- panic(fmt.Sprintf("got %d, want %d", got, want))
+ s1 := S1{Eint{2}, "foo"}
+ if got, want := s1.E.v, 2; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
}
- s2 := _S2{_Eint{3}, _Ebool{true}, "foo"}
- if got, want := s2._Eint.v, 3; got != want {
- panic(fmt.Sprintf("got %d, want %d", got, want))
+ s2 := S2{Eint{3}, Ebool{true}, "foo"}
+ if got, want := s2.Eint.v, 3; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
}
- var s3 _S3
- s3._E = &_Eint{4}
- if got, want := s3._E.v, 4; got != want {
- panic(fmt.Sprintf("got %d, want %d", got, want))
+ var s3 S3
+ s3.E = &Eint{4}
+ if got, want := s3.E.v, 4; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
diff --git a/test/typeparam/subdict.go b/test/typeparam/subdict.go
new file mode 100644
index 0000000000..b4e84baf8a
--- /dev/null
+++ b/test/typeparam/subdict.go
@@ -0,0 +1,41 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases where a main dictionary is needed inside a generic function/method, because
+// we are calling a method on a fully-instantiated type or a fully-instantiated function.
+// (probably not common situations, of course)
+
+package main
+
+import (
+ "fmt"
+)
+
+type value[T comparable] struct {
+ val T
+}
+
+func (v *value[T]) test(def T) bool {
+ return (v.val == def)
+}
+
+func (v *value[T]) get(def T) T {
+ var c value[int]
+ if c.test(32) {
+ return def
+ } else if v.test(def) {
+ return def
+ } else {
+ return v.val
+ }
+}
+
+func main() {
+ var s value[string]
+ if got, want := s.get("ab"), ""; got != want {
+ panic(fmt.Sprintf("get() == %d, want %d", got, want))
+ }
+}
diff --git a/test/typeparam/sum.go b/test/typeparam/sum.go
index f0f5e6aa07..d444e007a3 100644
--- a/test/typeparam/sum.go
+++ b/test/typeparam/sum.go
@@ -10,7 +10,7 @@ import (
"fmt"
)
-func sum[T interface{ type int, float64 }](vec []T) T {
+func Sum[T interface{ int | float64 }](vec []T) T {
var sum T
for _, elt := range vec {
sum = sum + elt
@@ -18,7 +18,7 @@ func sum[T interface{ type int, float64 }](vec []T) T {
return sum
}
-func abs(f float64) float64 {
+func Abs(f float64) float64 {
if f < 0.0 {
return -f
}
@@ -28,23 +28,23 @@ func abs(f float64) float64 {
func main() {
vec1 := []int{3, 4}
vec2 := []float64{5.8, 9.6}
- got := sum[int](vec1)
+ got := Sum[int](vec1)
want := vec1[0] + vec1[1]
if got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- got = sum(vec1)
+ got = Sum(vec1)
if want != got {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
fwant := vec2[0] + vec2[1]
- fgot := sum[float64](vec2)
- if abs(fgot - fwant) > 1e-10 {
+ fgot := Sum[float64](vec2)
+ if Abs(fgot-fwant) > 1e-10 {
panic(fmt.Sprintf("got %f, want %f", fgot, fwant))
}
- fgot = sum(vec2)
- if abs(fgot - fwant) > 1e-10 {
+ fgot = Sum(vec2)
+ if Abs(fgot-fwant) > 1e-10 {
panic(fmt.Sprintf("got %f, want %f", fgot, fwant))
}
}
diff --git a/test/typeparam/tparam1.go b/test/typeparam/tparam1.go
index 7043933326..698877a6f0 100644
--- a/test/typeparam/tparam1.go
+++ b/test/typeparam/tparam1.go
@@ -10,33 +10,33 @@ package tparam1
// The predeclared identifier "any" is only visible as a constraint
// in a type parameter list.
-var _ any // ERROR "undefined"
-func _(_ any) // ERROR "undefined"
-type _[_ any /* ok here */ ] struct{}
+var _ any // ERROR "cannot use any outside constraint position"
+func _(_ any) // ERROR "cannot use any outside constraint position"
+type _[_ any /* ok here */] struct{}
const N = 10
type (
- _[] struct{} // slice
- _[N] struct{} // array
- _[T any] struct{}
- _[T, T any] struct{} // ERROR "T redeclared"
- _[T1, T2 any, T3 any] struct{}
+ _ []struct{} // slice
+ _ [N]struct{} // array
+ _[T any] struct{}
+ _[T, T any] struct{} // ERROR "T redeclared"
+ _[T1, T2 any, T3 any] struct{}
)
-func _[T any]()
-func _[T, T any]() // ERROR "T redeclared"
-func _[T1, T2 any](x T1) T2
+func _[T any]() {}
+func _[T, T any]() {} // ERROR "T redeclared"
+func _[T1, T2 any](x T1) T2 { panic(0) }
// Type parameters are visible from opening [ to end of function.
type C interface{}
-func _[T interface{}]()
-func _[T C]()
-func _[T struct{}]() // ERROR "not an interface"
-func _[T interface{ m() T }]()
+func _[T interface{}]() {}
+func _[T C]() {}
+func _[T struct{}]() {}// ERROR "not an interface"
+func _[T interface{ m() T }]() {}
func _[T1 interface{ m() T2 }, T2 interface{ m() T1 }]() {
- var _ T1
+ var _ T1
}
// TODO(gri) expand this
diff --git a/test/typeparam/typelist.go b/test/typeparam/typelist.go
index bd90d86fcf..5ba14261ab 100644
--- a/test/typeparam/typelist.go
+++ b/test/typeparam/typelist.go
@@ -10,19 +10,19 @@ package p
// Assignability of an unnamed pointer type to a type parameter that
// has a matching underlying type.
-func _[T interface{}, PT interface{type *T}] (x T) PT {
- return &x
+func _[T interface{}, PT interface{ type *T }](x T) PT {
+ return &x
}
// Indexing of generic types containing type parameters in their type list:
func at[T interface{ type []E }, E any](x T, i int) E {
- return x[i]
+ return x[i]
}
// A generic type inside a function acts like a named type. Its underlying
// type is itself, its "operational type" is defined by the type list in
// the tybe bound, if any.
-func _[T interface{type int}](x T) {
+func _[T interface{ type int }](x T) {
type myint int
var _ int = int(x)
var _ T = 42
@@ -30,7 +30,7 @@ func _[T interface{type int}](x T) {
}
// Indexing a generic type which has a structural contraints to be an array.
-func _[T interface { type [10]int }](x T) {
+func _[T interface{ type [10]int }](x T) {
_ = x[9] // ok
}
@@ -44,7 +44,7 @@ func _[T interface{ type *int }](p T) int {
func _[T interface{ type chan int }](ch T) int {
// This would deadlock if executed (but ok for a compile test)
ch <- 0
- return <- ch
+ return <-ch
}
// Calling of a generic type which has a structural constraint to be a function.
@@ -59,32 +59,34 @@ func _[T interface{ type func(string) int }](f T) int {
}
// Map access of a generic type which has a structural constraint to be a map.
-func _[V any, T interface { type map[string]V }](p T) V {
+func _[V any, T interface{ type map[string]V }](p T) V {
return p["test"]
}
-
// Testing partial and full type inference, including the case where the types can
// be inferred without needing the types of the function arguments.
+// Cannot embed stand-alone type parameters. Disabled for now.
+/*
func f0[A any, B interface{type C}, C interface{type D}, D interface{type A}](A, B, C, D)
-func _() {
+func f0x() {
f := f0[string]
f("a", "b", "c", "d")
f0("a", "b", "c", "d")
}
func f1[A any, B interface{type A}](A, B)
-func _() {
+func f1x() {
f := f1[int]
f(int(0), int(0))
f1(int(0), int(0))
f(0, 0)
f1(0, 0)
}
+*/
-func f2[A any, B interface{type []A}](_ A, _ B)
-func _() {
+func f2[A any, B interface{ type []A }](_ A, _ B) {}
+func f2x() {
f := f2[byte]
f(byte(0), []byte{})
f2(byte(0), []byte{})
@@ -92,31 +94,39 @@ func _() {
// f2(0, []byte{}) - this one doesn't work
}
+// Cannot embed stand-alone type parameters. Disabled for now.
+/*
func f3[A any, B interface{type C}, C interface{type *A}](a A, _ B, c C)
-func _() {
+func f3x() {
f := f3[int]
var x int
f(x, &x, &x)
f3(x, &x, &x)
}
+*/
-func f4[A any, B interface{type []C}, C interface{type *A}](_ A, _ B, c C)
-func _() {
+func f4[A any, B interface{ type []C }, C interface{ type *A }](_ A, _ B, c C) {}
+func f4x() {
f := f4[int]
var x int
f(x, []*int{}, &x)
f4(x, []*int{}, &x)
}
-func f5[A interface{type struct{b B; c C}}, B any, C interface{type *B}](x B) A
-func _() {
+func f5[A interface {
+ type struct {
+ b B
+ c C
+ }
+}, B any, C interface{ type *B }](x B) A { panic(0) }
+func f5x() {
x := f5(1.2)
var _ float64 = x.b
var _ float64 = *x.c
}
-func f6[A any, B interface{type struct{f []A}}](B) A
-func _() {
- x := f6(struct{f []string}{})
+func f6[A any, B interface{ type struct{ f []A } }](B) A { panic(0) }
+func f6x() {
+ x := f6(struct{ f []string }{})
var _ string = x
}
diff --git a/test/typeparam/typeswitch1.go b/test/typeparam/typeswitch1.go
new file mode 100644
index 0000000000..27161b3db8
--- /dev/null
+++ b/test/typeparam/typeswitch1.go
@@ -0,0 +1,29 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func f[T any](i interface{}) {
+ switch i.(type) {
+ case T:
+ println("T")
+ case int:
+ println("int")
+ case int32, int16:
+ println("int32/int16")
+ case struct { a, b T }:
+ println("struct{T,T}")
+ default:
+ println("other")
+ }
+}
+func main() {
+ f[float64](float64(6))
+ f[float64](int(7))
+ f[float64](int32(8))
+ f[float64](struct{a, b float64}{a:1, b:2})
+ f[float64](int8(9))
+}
diff --git a/test/typeparam/typeswitch1.out b/test/typeparam/typeswitch1.out
new file mode 100644
index 0000000000..4bdbccfddb
--- /dev/null
+++ b/test/typeparam/typeswitch1.out
@@ -0,0 +1,5 @@
+T
+int
+int32/int16
+struct{T,T}
+other
diff --git a/test/typeparam/typeswitch2.go b/test/typeparam/typeswitch2.go
new file mode 100644
index 0000000000..913c56321c
--- /dev/null
+++ b/test/typeparam/typeswitch2.go
@@ -0,0 +1,31 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "reflect"
+
+func f[T any](i interface{}) {
+ switch x := i.(type) {
+ case T:
+ println("T", x)
+ case int:
+ println("int", x)
+ case int32, int16:
+ println("int32/int16", reflect.ValueOf(x).Int())
+ case struct { a, b T }:
+ println("struct{T,T}", x.a, x.b)
+ default:
+ println("other", reflect.ValueOf(x).Int())
+ }
+}
+func main() {
+ f[float64](float64(6))
+ f[float64](int(7))
+ f[float64](int32(8))
+ f[float64](struct{a, b float64}{a:1, b:2})
+ f[float64](int8(9))
+}
diff --git a/test/typeparam/typeswitch2.out b/test/typeparam/typeswitch2.out
new file mode 100644
index 0000000000..944cc04cc6
--- /dev/null
+++ b/test/typeparam/typeswitch2.out
@@ -0,0 +1,5 @@
+T +6.000000e+000
+int 7
+int32/int16 8
+struct{T,T} +1.000000e+000 +2.000000e+000
+other 9
diff --git a/test/typeparam/typeswitch3.go b/test/typeparam/typeswitch3.go
new file mode 100644
index 0000000000..6ab0301140
--- /dev/null
+++ b/test/typeparam/typeswitch3.go
@@ -0,0 +1,35 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type I interface { foo() int }
+
+type myint int
+
+func (x myint) foo() int { return int(x) }
+
+type myfloat float64
+func (x myfloat) foo() int { return int(x) }
+
+type myint32 int32
+func (x myint32) foo() int { return int(x) }
+
+func f[T I](i I) {
+ switch x := i.(type) {
+ case T:
+ println("T", x.foo())
+ case myint:
+ println("myint", x.foo())
+ default:
+ println("other", x.foo())
+ }
+}
+func main() {
+ f[myfloat](myint(6))
+ f[myfloat](myfloat(7))
+ f[myfloat](myint32(8))
+}
diff --git a/test/typeparam/typeswitch3.out b/test/typeparam/typeswitch3.out
new file mode 100644
index 0000000000..2c69c72c30
--- /dev/null
+++ b/test/typeparam/typeswitch3.out
@@ -0,0 +1,3 @@
+myint 6
+T 7
+other 8
diff --git a/test/typeparam/typeswitch4.go b/test/typeparam/typeswitch4.go
new file mode 100644
index 0000000000..6113026b65
--- /dev/null
+++ b/test/typeparam/typeswitch4.go
@@ -0,0 +1,33 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type I interface { foo() int }
+
+type myint int
+
+func (x myint) foo() int {return int(x)}
+
+type myfloat float64
+func (x myfloat) foo() int {return int(x)}
+
+type myint32 int32
+func (x myint32) foo() int { return int(x) }
+
+func f[T I](i I) {
+ switch x := i.(type) {
+ case T, myint32:
+ println("T/myint32", x.foo())
+ default:
+ println("other", x.foo())
+ }
+}
+func main() {
+ f[myfloat](myint(6))
+ f[myfloat](myfloat(7))
+ f[myfloat](myint32(8))
+}
diff --git a/test/typeparam/typeswitch4.out b/test/typeparam/typeswitch4.out
new file mode 100644
index 0000000000..b0d54077c9
--- /dev/null
+++ b/test/typeparam/typeswitch4.out
@@ -0,0 +1,3 @@
+other 6
+T/myint32 7
+T/myint32 8
diff --git a/test/typeparam/typeswitch5.go b/test/typeparam/typeswitch5.go
new file mode 100644
index 0000000000..1fc6e0a14e
--- /dev/null
+++ b/test/typeparam/typeswitch5.go
@@ -0,0 +1,28 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type myint int
+func (x myint) foo() int {return int(x)}
+
+type myfloat float64
+func (x myfloat) foo() float64 {return float64(x) }
+
+func f[T any](i interface{}) {
+ switch x := i.(type) {
+ case interface { foo() T }:
+ println("fooer", x.foo())
+ default:
+ println("other")
+ }
+}
+func main() {
+ f[int](myint(6))
+ f[int](myfloat(7))
+ f[float64](myint(8))
+ f[float64](myfloat(9))
+}
diff --git a/test/typeparam/typeswitch5.out b/test/typeparam/typeswitch5.out
new file mode 100644
index 0000000000..6b4cb4416f
--- /dev/null
+++ b/test/typeparam/typeswitch5.out
@@ -0,0 +1,4 @@
+fooer 6
+other
+other
+fooer +9.000000e+000
diff --git a/test/typeparam/typeswitch6.go b/test/typeparam/typeswitch6.go
new file mode 100644
index 0000000000..574f4aa819
--- /dev/null
+++ b/test/typeparam/typeswitch6.go
@@ -0,0 +1,30 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func f[T any](i interface{}) {
+ switch i.(type) {
+ case T:
+ println("T")
+ case int:
+ println("int")
+ default:
+ println("other")
+ }
+}
+
+type myint int
+func (myint) foo() {
+}
+
+func main() {
+ f[interface{}](nil)
+ f[interface{}](6)
+ f[interface{foo()}](nil)
+ f[interface{foo()}](7)
+ f[interface{foo()}](myint(8))
+}
diff --git a/test/typeparam/typeswitch6.out b/test/typeparam/typeswitch6.out
new file mode 100644
index 0000000000..441add5ec5
--- /dev/null
+++ b/test/typeparam/typeswitch6.out
@@ -0,0 +1,5 @@
+other
+T
+other
+int
+T
diff --git a/test/typeparam/typeswitch7.go b/test/typeparam/typeswitch7.go
new file mode 100644
index 0000000000..f2e1279fb4
--- /dev/null
+++ b/test/typeparam/typeswitch7.go
@@ -0,0 +1,37 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func f[T any](i interface{foo()}) {
+ switch i.(type) {
+ case interface{bar() T}:
+ println("barT")
+ case myint:
+ println("myint")
+ case myfloat:
+ println("myfloat")
+ default:
+ println("other")
+ }
+}
+
+type myint int
+func (myint) foo() {
+}
+func (x myint) bar() int {
+ return int(x)
+}
+
+type myfloat float64
+func (myfloat) foo() {
+}
+
+func main() {
+ f[int](nil)
+ f[int](myint(6))
+ f[int](myfloat(7))
+}
diff --git a/test/typeparam/typeswitch7.out b/test/typeparam/typeswitch7.out
new file mode 100644
index 0000000000..d7fcad4fee
--- /dev/null
+++ b/test/typeparam/typeswitch7.out
@@ -0,0 +1,3 @@
+other
+barT
+myfloat
diff --git a/test/typeparam/valimp.dir/a.go b/test/typeparam/valimp.dir/a.go
new file mode 100644
index 0000000000..2ed0063cfd
--- /dev/null
+++ b/test/typeparam/valimp.dir/a.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Value[T any] struct {
+ val T
+}
+
+// The noinline directive should survive across import, and prevent instantiations
+// of these functions from being inlined.
+
+//go:noinline
+func Get[T any](v *Value[T]) T {
+ return v.val
+}
+
+//go:noinline
+func Set[T any](v *Value[T], val T) {
+ v.val = val
+}
+
+//go:noinline
+func (v *Value[T]) Set(val T) {
+ v.val = val
+}
+
+//go:noinline
+func (v *Value[T]) Get() T {
+ return v.val
+}
diff --git a/test/typeparam/valimp.dir/main.go b/test/typeparam/valimp.dir/main.go
new file mode 100644
index 0000000000..606ff2273a
--- /dev/null
+++ b/test/typeparam/valimp.dir/main.go
@@ -0,0 +1,55 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ var v1 a.Value[int]
+
+ a.Set(&v1, 1)
+ if got, want := a.Get(&v1), 1; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+ v1.Set(2)
+ if got, want := v1.Get(), 2; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+ v1p := new(a.Value[int])
+ a.Set(v1p, 3)
+ if got, want := a.Get(v1p), 3; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ v1p.Set(4)
+ if got, want := v1p.Get(), 4; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ var v2 a.Value[string]
+ a.Set(&v2, "a")
+ if got, want := a.Get(&v2), "a"; got != want {
+ panic(fmt.Sprintf("Get() == %q, want %q", got, want))
+ }
+
+ v2.Set("b")
+ if got, want := a.Get(&v2), "b"; got != want {
+ panic(fmt.Sprintf("Get() == %q, want %q", got, want))
+ }
+
+ v2p := new(a.Value[string])
+ a.Set(v2p, "c")
+ if got, want := a.Get(v2p), "c"; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ v2p.Set("d")
+ if got, want := v2p.Get(), "d"; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+}
diff --git a/test/typeparam/valimp.go b/test/typeparam/valimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/valimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/value.go b/test/typeparam/value.go
index 5dd7449d9c..6c6dabcf7c 100644
--- a/test/typeparam/value.go
+++ b/test/typeparam/value.go
@@ -12,7 +12,7 @@ type value[T any] struct {
val T
}
-func get[T2 any](v *value[T2]) T2 {
+func get[T any](v *value[T]) T {
return v.val
}
@@ -20,11 +20,11 @@ func set[T any](v *value[T], val T) {
v.val = val
}
-func (v *value[T2]) set(val T2) {
+func (v *value[T]) set(val T) {
v.val = val
}
-func (v *value[T2]) get() T2 {
+func (v *value[T]) get() T {
return v.val
}
diff --git a/test/uintptrescapes2.go b/test/uintptrescapes2.go
index 3ff1d94042..656286c0ff 100644
--- a/test/uintptrescapes2.go
+++ b/test/uintptrescapes2.go
@@ -30,7 +30,7 @@ type T struct{}
func (T) M1(a uintptr) {} // ERROR "escaping uintptr"
//go:uintptrescapes
-func (T) M2(a ...uintptr) {} // ERROR "escaping ...uintptr" "leaking param: a"
+func (T) M2(a ...uintptr) {} // ERROR "escaping ...uintptr"
func TestF1() {
var t int // ERROR "moved to heap"