aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--codereview.cfg3
-rw-r--r--src/cmd/compile/abi-internal.md122
-rw-r--r--src/cmd/compile/internal/amd64/galign.go2
-rw-r--r--src/cmd/compile/internal/amd64/ggen.go10
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go47
-rw-r--r--src/cmd/compile/internal/arm64/galign.go2
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go62
-rw-r--r--src/cmd/compile/internal/base/mapfile_mmap.go (renamed from src/cmd/compile/internal/typecheck/mapfile_mmap.go)4
-rw-r--r--src/cmd/compile/internal/base/mapfile_read.go (renamed from src/cmd/compile/internal/typecheck/mapfile_read.go)4
-rw-r--r--src/cmd/compile/internal/escape/escape.go7
-rw-r--r--src/cmd/compile/internal/gc/export.go116
-rw-r--r--src/cmd/compile/internal/gc/obj.go3
-rw-r--r--src/cmd/compile/internal/importer/exportdata.go1
-rw-r--r--src/cmd/compile/internal/importer/gcimporter.go3
-rw-r--r--src/cmd/compile/internal/importer/gcimporter_test.go23
-rw-r--r--src/cmd/compile/internal/importer/iimport.go269
-rw-r--r--src/cmd/compile/internal/importer/support.go1
-rw-r--r--src/cmd/compile/internal/inline/inl.go332
-rw-r--r--src/cmd/compile/internal/ir/expr.go35
-rw-r--r--src/cmd/compile/internal/ir/fmt.go22
-rw-r--r--src/cmd/compile/internal/ir/func.go118
-rw-r--r--src/cmd/compile/internal/ir/node.go2
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go16
-rw-r--r--src/cmd/compile/internal/ir/type.go13
-rw-r--r--src/cmd/compile/internal/ir/val.go2
-rw-r--r--src/cmd/compile/internal/logopt/logopt_test.go4
-rw-r--r--src/cmd/compile/internal/noder/decl.go53
-rw-r--r--src/cmd/compile/internal/noder/export.go65
-rw-r--r--src/cmd/compile/internal/noder/expr.go95
-rw-r--r--src/cmd/compile/internal/noder/helpers.go26
-rw-r--r--src/cmd/compile/internal/noder/import.go302
-rw-r--r--src/cmd/compile/internal/noder/irgen.go46
-rw-r--r--src/cmd/compile/internal/noder/noder.go147
-rw-r--r--src/cmd/compile/internal/noder/object.go33
-rw-r--r--src/cmd/compile/internal/noder/stencil.go1093
-rw-r--r--src/cmd/compile/internal/noder/transform.go120
-rw-r--r--src/cmd/compile/internal/noder/types.go185
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go172
-rw-r--r--src/cmd/compile/internal/ssa/config.go4
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules9
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go28
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go10
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go5
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go30
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go2
-rw-r--r--src/cmd/compile/internal/ssagen/arch.go8
-rw-r--r--src/cmd/compile/internal/ssagen/pgen.go5
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go327
-rw-r--r--src/cmd/compile/internal/staticdata/embed.go7
-rw-r--r--src/cmd/compile/internal/staticinit/sched.go4
-rw-r--r--src/cmd/compile/internal/syntax/parser.go29
-rw-r--r--src/cmd/compile/internal/syntax/testdata/interface.go246
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue46558.src14
-rw-r--r--src/cmd/compile/internal/test/inl_test.go4
-rw-r--r--src/cmd/compile/internal/typecheck/crawler.go179
-rw-r--r--src/cmd/compile/internal/typecheck/dcl.go9
-rw-r--r--src/cmd/compile/internal/typecheck/expr.go155
-rw-r--r--src/cmd/compile/internal/typecheck/func.go112
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go299
-rw-r--r--src/cmd/compile/internal/typecheck/iimport.go482
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go413
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go11
-rw-r--r--src/cmd/compile/internal/types/fmt.go45
-rw-r--r--src/cmd/compile/internal/types/kind_string.go23
-rw-r--r--src/cmd/compile/internal/types/pkg.go4
-rw-r--r--src/cmd/compile/internal/types/size.go41
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/types/sort.go13
-rw-r--r--src/cmd/compile/internal/types/sym.go8
-rw-r--r--src/cmd/compile/internal/types/type.go125
-rw-r--r--src/cmd/compile/internal/types2/api.go8
-rw-r--r--src/cmd/compile/internal/types2/api_test.go31
-rw-r--r--src/cmd/compile/internal/types2/builtins.go26
-rw-r--r--src/cmd/compile/internal/types2/call.go28
-rw-r--r--src/cmd/compile/internal/types2/check.go3
-rw-r--r--src/cmd/compile/internal/types2/decl.go58
-rw-r--r--src/cmd/compile/internal/types2/expr.go4
-rw-r--r--src/cmd/compile/internal/types2/index.go20
-rw-r--r--src/cmd/compile/internal/types2/infer.go19
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go18
-rw-r--r--src/cmd/compile/internal/types2/interface.go318
-rw-r--r--src/cmd/compile/internal/types2/labels.go3
-rw-r--r--src/cmd/compile/internal/types2/lookup.go26
-rw-r--r--src/cmd/compile/internal/types2/object.go77
-rw-r--r--src/cmd/compile/internal/types2/operand.go19
-rw-r--r--src/cmd/compile/internal/types2/package.go22
-rw-r--r--src/cmd/compile/internal/types2/predicates.go45
-rw-r--r--src/cmd/compile/internal/types2/resolver.go31
-rw-r--r--src/cmd/compile/internal/types2/sanitize.go17
-rw-r--r--src/cmd/compile/internal/types2/scope.go93
-rw-r--r--src/cmd/compile/internal/types2/signature.go314
-rw-r--r--src/cmd/compile/internal/types2/sizeof_test.go7
-rw-r--r--src/cmd/compile/internal/types2/sizes.go4
-rw-r--r--src/cmd/compile/internal/types2/stmt.go9
-rw-r--r--src/cmd/compile/internal/types2/struct.go165
-rw-r--r--src/cmd/compile/internal/types2/subst.go181
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/builtins.go28
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls0.src10
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issues.go228
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issues.src2
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/linalg.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/tinference.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeinst2.go229
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeparams.go280
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/constraints.go260
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/functions.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/inference.go26
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/types.go28
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go26
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go28
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go24
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go22
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go226
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src28
-rw-r--r--src/cmd/compile/internal/types2/type.go365
-rw-r--r--src/cmd/compile/internal/types2/types_test.go9
-rw-r--r--src/cmd/compile/internal/types2/typestring.go40
-rw-r--r--src/cmd/compile/internal/types2/typestring_test.go7
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go773
-rw-r--r--src/cmd/compile/internal/types2/unify.go6
-rw-r--r--src/cmd/compile/internal/types2/union.go236
-rw-r--r--src/cmd/compile/internal/walk/closure.go3
-rw-r--r--src/cmd/compile/internal/walk/complit.go4
-rw-r--r--src/cmd/compile/internal/walk/convert.go2
-rw-r--r--src/cmd/compile/internal/walk/order.go47
-rw-r--r--src/cmd/internal/goobj/builtinlist.go7
-rw-r--r--src/cmd/internal/goobj/mkbuiltin.go4
-rw-r--r--src/cmd/internal/obj/arm64/obj7.go100
-rw-r--r--src/cmd/internal/obj/x86/obj6.go4
-rw-r--r--src/cmd/internal/objabi/funcid.go1
-rw-r--r--src/cmd/link/internal/ld/dwarf_test.go7
-rw-r--r--src/cmd/link/internal/ld/symtab.go1
-rw-r--r--src/cmd/link/link_test.go10
-rw-r--r--src/go/constant/kind_string.go28
-rw-r--r--src/go/constant/value.go2
-rw-r--r--src/go/internal/gcimporter/gcimporter_test.go1
-rw-r--r--src/go/internal/gcimporter/iimport.go53
-rw-r--r--src/go/types/api.go2
-rw-r--r--src/go/types/api_test.go1
-rw-r--r--src/go/types/api_typeparams.go5
-rw-r--r--src/go/types/api_typeparams_test.go2
-rw-r--r--src/go/types/call.go25
-rw-r--r--src/go/types/check.go1
-rw-r--r--src/go/types/check_test.go98
-rw-r--r--src/go/types/decl.go9
-rw-r--r--src/go/types/instantiate.go63
-rw-r--r--src/go/types/interface.go357
-rw-r--r--src/go/types/lookup.go9
-rw-r--r--src/go/types/resolver.go2
-rw-r--r--src/go/types/sanitize.go8
-rw-r--r--src/go/types/signature.go274
-rw-r--r--src/go/types/sizeof_test.go2
-rw-r--r--src/go/types/struct.go154
-rw-r--r--src/go/types/subst.go185
-rw-r--r--src/go/types/testdata/check/decls0.src8
-rw-r--r--src/go/types/testdata/check/tmp.go217
-rw-r--r--src/go/types/testdata/fixedbugs/issue46275.go227
-rw-r--r--src/go/types/testdata/manual.go29
-rw-r--r--src/go/types/type.go166
-rw-r--r--src/go/types/types_test.go11
-rw-r--r--src/go/types/typestring.go2
-rw-r--r--src/go/types/typestring_test.go4
-rw-r--r--src/go/types/typexpr.go716
-rw-r--r--src/internal/abi/abi_arm64.go20
-rw-r--r--src/internal/buildcfg/exp.go24
-rw-r--r--src/internal/bytealg/compare_arm64.s113
-rw-r--r--src/internal/bytealg/equal_arm64.s104
-rw-r--r--src/internal/goexperiment/exp_regabi_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabi_on.go9
-rw-r--r--src/internal/goexperiment/exp_regabidefer_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabidefer_on.go9
-rw-r--r--src/internal/goexperiment/exp_regabig_off.go9
-rw-r--r--src/internal/goexperiment/exp_regabig_on.go9
-rw-r--r--src/internal/goexperiment/flags.go8
-rw-r--r--src/internal/goversion/goversion.go2
-rw-r--r--src/reflect/abi_test.go4
-rw-r--r--src/reflect/asm_amd64.s16
-rw-r--r--src/reflect/asm_arm64.s61
-rw-r--r--src/reflect/makefunc.go12
-rw-r--r--src/runtime/asm.s2
-rw-r--r--src/runtime/asm_386.s2
-rw-r--r--src/runtime/asm_amd64.s32
-rw-r--r--src/runtime/asm_arm.s9
-rw-r--r--src/runtime/asm_arm64.s411
-rw-r--r--src/runtime/asm_mips64x.s7
-rw-r--r--src/runtime/asm_mipsx.s7
-rw-r--r--src/runtime/asm_ppc64x.s3
-rw-r--r--src/runtime/asm_riscv64.s7
-rw-r--r--src/runtime/asm_s390x.s7
-rw-r--r--src/runtime/asm_wasm.s3
-rw-r--r--src/runtime/chan.go5
-rw-r--r--src/runtime/cpuprof.go9
-rw-r--r--src/runtime/debugcall.go2
-rw-r--r--src/runtime/defs_plan9_386.go2
-rw-r--r--src/runtime/defs_plan9_amd64.go2
-rw-r--r--src/runtime/duff_arm64.s4
-rw-r--r--src/runtime/export_debug_test.go2
-rw-r--r--src/runtime/export_test.go22
-rw-r--r--src/runtime/heapdump.go7
-rw-r--r--src/runtime/iface.go9
-rw-r--r--src/runtime/malloc.go11
-rw-r--r--src/runtime/map.go19
-rw-r--r--src/runtime/map_fast32.go11
-rw-r--r--src/runtime/map_fast64.go11
-rw-r--r--src/runtime/map_faststr.go9
-rw-r--r--src/runtime/mbarrier.go6
-rw-r--r--src/runtime/mbitmap.go8
-rw-r--r--src/runtime/memclr_amd64.s3
-rw-r--r--src/runtime/memclr_arm64.s4
-rw-r--r--src/runtime/memmove_amd64.s4
-rw-r--r--src/runtime/memmove_arm64.s4
-rw-r--r--src/runtime/mgc.go33
-rw-r--r--src/runtime/mgcmark.go7
-rw-r--r--src/runtime/mgcscavenge.go4
-rw-r--r--src/runtime/mgcsweep.go4
-rw-r--r--src/runtime/mkduff.go4
-rw-r--r--src/runtime/mkpreempt.go3
-rw-r--r--src/runtime/mprof.go5
-rw-r--r--src/runtime/norace_linux_test.go3
-rw-r--r--src/runtime/os3_plan9.go5
-rw-r--r--src/runtime/os3_solaris.go10
-rw-r--r--src/runtime/os_aix.go3
-rw-r--r--src/runtime/os_darwin.go2
-rw-r--r--src/runtime/os_dragonfly.go9
-rw-r--r--src/runtime/os_freebsd.go7
-rw-r--r--src/runtime/os_freebsd2.go6
-rw-r--r--src/runtime/os_freebsd_amd64.go8
-rw-r--r--src/runtime/os_linux.go13
-rw-r--r--src/runtime/os_netbsd.go7
-rw-r--r--src/runtime/os_netbsd_386.go7
-rw-r--r--src/runtime/os_netbsd_amd64.go7
-rw-r--r--src/runtime/os_netbsd_arm.go7
-rw-r--r--src/runtime/os_netbsd_arm64.go7
-rw-r--r--src/runtime/os_openbsd.go5
-rw-r--r--src/runtime/os_openbsd_libc.go3
-rw-r--r--src/runtime/os_openbsd_syscall.go3
-rw-r--r--src/runtime/os_plan9.go3
-rw-r--r--src/runtime/os_solaris.go3
-rw-r--r--src/runtime/os_windows.go7
-rw-r--r--src/runtime/panic.go301
-rw-r--r--src/runtime/pprof/pprof.go3
-rw-r--r--src/runtime/pprof/pprof_test.go3
-rw-r--r--src/runtime/pprof/proto.go8
-rw-r--r--src/runtime/pprof/proto_test.go9
-rw-r--r--src/runtime/preempt.go5
-rw-r--r--src/runtime/preempt_386.s3
-rw-r--r--src/runtime/preempt_amd64.s3
-rw-r--r--src/runtime/preempt_arm.s3
-rw-r--r--src/runtime/preempt_arm64.s3
-rw-r--r--src/runtime/preempt_mips64x.s3
-rw-r--r--src/runtime/preempt_mipsx.s3
-rw-r--r--src/runtime/preempt_ppc64x.s3
-rw-r--r--src/runtime/preempt_riscv64.s3
-rw-r--r--src/runtime/preempt_s390x.s3
-rw-r--r--src/runtime/preempt_wasm.s3
-rw-r--r--src/runtime/proc.go115
-rw-r--r--src/runtime/race.go3
-rw-r--r--src/runtime/race/output_test.go6
-rw-r--r--src/runtime/race_amd64.s28
-rw-r--r--src/runtime/race_arm64.s34
-rw-r--r--src/runtime/runtime2.go21
-rw-r--r--src/runtime/select.go5
-rw-r--r--src/runtime/signal_386.go5
-rw-r--r--src/runtime/signal_amd64.go5
-rw-r--r--src/runtime/signal_arm.go7
-rw-r--r--src/runtime/signal_arm64.go3
-rw-r--r--src/runtime/signal_linux_s390x.go3
-rw-r--r--src/runtime/signal_mips64x.go3
-rw-r--r--src/runtime/signal_mipsx.go3
-rw-r--r--src/runtime/signal_ppc64x.go5
-rw-r--r--src/runtime/signal_riscv64.go3
-rw-r--r--src/runtime/signal_unix.go11
-rw-r--r--src/runtime/signal_windows.go13
-rw-r--r--src/runtime/slice.go7
-rw-r--r--src/runtime/stack.go13
-rw-r--r--src/runtime/string.go7
-rw-r--r--src/runtime/stubs.go2
-rw-r--r--src/runtime/stubs_arm64.go7
-rw-r--r--src/runtime/sys_darwin_arm64.go5
-rw-r--r--src/runtime/sys_linux_amd64.s23
-rw-r--r--src/runtime/sys_openbsd.go15
-rw-r--r--src/runtime/sys_openbsd1.go13
-rw-r--r--src/runtime/sys_openbsd2.go53
-rw-r--r--src/runtime/sys_openbsd3.go25
-rw-r--r--src/runtime/sys_openbsd_amd64.s2
-rw-r--r--src/runtime/sys_plan9_386.s4
-rw-r--r--src/runtime/sys_plan9_amd64.s4
-rw-r--r--src/runtime/sys_windows_386.s14
-rw-r--r--src/runtime/sys_windows_amd64.s12
-rw-r--r--src/runtime/sys_windows_arm.s12
-rw-r--r--src/runtime/sys_windows_arm64.s18
-rw-r--r--src/runtime/syscall_solaris.go10
-rw-r--r--src/runtime/syscall_windows.go2
-rw-r--r--src/runtime/time.go3
-rw-r--r--src/runtime/time_linux_amd64.s10
-rw-r--r--src/runtime/traceback.go35
-rw-r--r--src/runtime/type.go7
-rw-r--r--src/runtime/wincallback.go6
-rw-r--r--src/runtime/zcallback_windows.s2
-rw-r--r--src/runtime/zcallback_windows_arm.s2
-rw-r--r--src/runtime/zcallback_windows_arm64.s2
-rw-r--r--test/codegen/arithmetic.go4
-rw-r--r--test/codegen/clobberdead.go7
-rw-r--r--test/complit1.go14
-rw-r--r--test/ddd1.go10
-rw-r--r--test/escape2.go44
-rw-r--r--test/escape2n.go44
-rw-r--r--test/escape5.go8
-rw-r--r--test/escape_array.go16
-rw-r--r--test/escape_calls.go2
-rw-r--r--test/escape_closure.go8
-rw-r--r--test/escape_param.go10
-rw-r--r--test/escape_runtime_atomic.go4
-rw-r--r--test/escape_slice.go4
-rw-r--r--test/escape_struct_return.go4
-rw-r--r--test/escape_unsafe.go10
-rw-r--r--test/fixedbugs/bug195.go2
-rw-r--r--test/fixedbugs/bug248.dir/bug2.go4
-rw-r--r--test/fixedbugs/bug345.dir/main.go4
-rw-r--r--test/fixedbugs/bug460.dir/b.go10
-rw-r--r--test/fixedbugs/issue10975.go2
-rw-r--r--test/fixedbugs/issue11614.go2
-rw-r--r--test/fixedbugs/issue12006.go6
-rw-r--r--test/fixedbugs/issue12588.go6
-rw-r--r--test/fixedbugs/issue14999.go4
-rw-r--r--test/fixedbugs/issue24651a.go2
-rw-r--r--test/fixedbugs/issue24651b.go4
-rw-r--r--test/fixedbugs/issue30898.go2
-rw-r--r--test/fixedbugs/issue42284.dir/a.go2
-rw-r--r--test/fixedbugs/issue44432.go4
-rw-r--r--test/fixedbugs/issue46556.go16
-rw-r--r--test/fixedbugs/issue4909b.go2
-rw-r--r--test/inline_big.go2
-rw-r--r--test/inline_variadic.go2
-rw-r--r--test/live.go8
-rw-r--r--test/live_regabi.go2
-rw-r--r--test/run.go388
-rw-r--r--test/typeparam/absdiff.go19
-rw-r--r--test/typeparam/absdiffimp.dir/a.go75
-rw-r--r--test/typeparam/absdiffimp.dir/main.go29
-rw-r--r--test/typeparam/absdiffimp.go7
-rw-r--r--test/typeparam/adder.go10
-rw-r--r--test/typeparam/aliasimp.dir/a.go9
-rw-r--r--test/typeparam/aliasimp.dir/main.go38
-rw-r--r--test/typeparam/aliasimp.go7
-rw-r--r--test/typeparam/chansimp.dir/a.go232
-rw-r--r--test/typeparam/chansimp.dir/main.go189
-rw-r--r--test/typeparam/chansimp.go7
-rw-r--r--test/typeparam/combine.go28
-rw-r--r--test/typeparam/cons.go46
-rw-r--r--test/typeparam/dictionaryCapture-noinline.go126
-rw-r--r--test/typeparam/dictionaryCapture.go126
-rw-r--r--test/typeparam/double.go2
-rw-r--r--test/typeparam/fact.go8
-rw-r--r--test/typeparam/factimp.dir/a.go12
-rw-r--r--test/typeparam/factimp.dir/main.go26
-rw-r--r--test/typeparam/factimp.go7
-rw-r--r--test/typeparam/ifaceconv.go58
-rw-r--r--test/typeparam/index.go8
-rw-r--r--test/typeparam/issue45817.go25
-rw-r--r--test/typeparam/issue46472.go20
-rw-r--r--test/typeparam/list.go14
-rw-r--r--test/typeparam/list2.go9
-rw-r--r--test/typeparam/listimp.dir/a.go53
-rw-r--r--test/typeparam/listimp.dir/main.go52
-rw-r--r--test/typeparam/listimp.go7
-rw-r--r--test/typeparam/listimp2.dir/a.go298
-rw-r--r--test/typeparam/listimp2.dir/main.go316
-rw-r--r--test/typeparam/listimp2.go7
-rw-r--r--test/typeparam/lockable.go16
-rw-r--r--test/typeparam/mapimp.dir/a.go15
-rw-r--r--test/typeparam/mapimp.dir/main.go28
-rw-r--r--test/typeparam/mapimp.go7
-rw-r--r--test/typeparam/mapsimp.dir/a.go108
-rw-r--r--test/typeparam/mapsimp.dir/main.go156
-rw-r--r--test/typeparam/mapsimp.go7
-rw-r--r--test/typeparam/min.go11
-rw-r--r--test/typeparam/mincheck.dir/a.go16
-rw-r--r--test/typeparam/mincheck.dir/main.go38
-rw-r--r--test/typeparam/mincheck.go7
-rw-r--r--test/typeparam/minimp.dir/a.go16
-rw-r--r--test/typeparam/minimp.dir/main.go38
-rw-r--r--test/typeparam/minimp.go7
-rw-r--r--test/typeparam/mutualimp.dir/a.go11
-rw-r--r--test/typeparam/mutualimp.dir/b.go12
-rw-r--r--test/typeparam/mutualimp.go7
-rw-r--r--test/typeparam/ordered.go8
-rw-r--r--test/typeparam/orderedmap.go8
-rw-r--r--test/typeparam/orderedmapsimp.dir/a.go226
-rw-r--r--test/typeparam/orderedmapsimp.dir/main.go64
-rw-r--r--test/typeparam/orderedmapsimp.go7
-rw-r--r--test/typeparam/pair.go1
-rw-r--r--test/typeparam/pairimp.dir/a.go10
-rw-r--r--test/typeparam/pairimp.dir/main.go27
-rw-r--r--test/typeparam/pairimp.go7
-rw-r--r--test/typeparam/setsimp.dir/a.go128
-rw-r--r--test/typeparam/setsimp.dir/main.go156
-rw-r--r--test/typeparam/setsimp.go7
-rw-r--r--test/typeparam/settable.go10
-rw-r--r--test/typeparam/sliceimp.dir/a.go141
-rw-r--r--test/typeparam/sliceimp.dir/main.go179
-rw-r--r--test/typeparam/sliceimp.go7
-rw-r--r--test/typeparam/slices.go12
-rw-r--r--test/typeparam/smallest.go14
-rw-r--r--test/typeparam/smoketest.go2
-rw-r--r--test/typeparam/stringable.go8
-rw-r--r--test/typeparam/stringerimp.dir/a.go16
-rw-r--r--test/typeparam/stringerimp.dir/main.go38
-rw-r--r--test/typeparam/stringerimp.go7
-rw-r--r--test/typeparam/struct.go34
-rw-r--r--test/typeparam/sum.go16
-rw-r--r--test/typeparam/valimp.dir/a.go32
-rw-r--r--test/typeparam/valimp.dir/main.go56
-rw-r--r--test/typeparam/valimp.go7
-rw-r--r--test/typeparam/value.go6
425 files changed, 12265 insertions, 6006 deletions
diff --git a/codereview.cfg b/codereview.cfg
index 77a74f108e..1f58fdbeb2 100644
--- a/codereview.cfg
+++ b/codereview.cfg
@@ -1 +1,2 @@
-branch: master
+branch: dev.typeparams
+parent-branch: master
diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md
index 1ae3c2538f..7aed7efe97 100644
--- a/src/cmd/compile/abi-internal.md
+++ b/src/cmd/compile/abi-internal.md
@@ -505,6 +505,128 @@ control bits specified by the ELF AMD64 ABI.
The x87 floating-point control word is not used by Go on amd64.
+### arm64 architecture
+
+The arm64 architecture uses R0 – R15 for integer arguments and results.
+
+It uses F0 – F15 for floating-point arguments and results.
+
+*Rationale*: 16 integer registers and 16 floating-point registers are
+more than enough for passing arguments and results for practically all
+functions (see Appendix). While there are more registers available,
+using more registers provides little benefit. Additionally, it will add
+overhead on code paths where the number of arguments are not statically
+known (e.g. reflect call), and will consume more stack space when there
+is only limited stack space available to fit in the nosplit limit.
+
+Registers R16 and R17 are permanent scratch registers. They are also
+used as scratch registers by the linker (Go linker and external
+linker) in trampolines.
+
+Register R18 is reserved and never used. It is reserved for the OS
+on some platforms (e.g. macOS).
+
+Registers R19 – R25 are permanent scratch registers. In addition,
+R27 is a permanent scratch register used by the assembler when
+expanding instructions.
+
+Floating-point registers F16 – F31 are also permanent scratch
+registers.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| RSP | Stack pointer | Same | Same |
+| R30 | Link register | Same | Scratch (non-leaf functions) |
+| R29 | Frame pointer | Same | Same |
+| R28 | Current goroutine | Same | Same |
+| R27 | Scratch | Scratch | Scratch |
+| R26 | Closure context pointer | Scratch | Scratch |
+| R18 | Reserved (not used) | Same | Same |
+| ZR | Zero value | Same | Same |
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention.
+
+*Rationale*: The link register, R30, holds the function return
+address at the function entry. For functions that have frames
+(including most non-leaf functions), R30 is saved to stack in the
+function prologue and restored in the epilogue. Within the function
+body, R30 can be used as a scratch register.
+
+*Implementation note*: Registers with fixed meaning at calls but not
+in function bodies must be initialized by "injected" calls such as
+signal-based panics.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 16 bytes.
+
+*Rationale*: The arm64 architecture requires the stack pointer to be
+16-byte aligned.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | return PC | ← RSP points to
+ | frame pointer on entry |
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, R30, as part of the
+arm64 `CALL` operation.
+
+On entry, a function subtracts from RSP to open its stack frame, and
+saves the values of R30 and R29 at the bottom of the frame.
+Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
+after RSP is updated.
+
+A leaf function that does not require any stack space may omit the
+saved R30 and R29.
+
+The Go ABI's use of R29 as a frame pointer register is compatible with
+arm64 architecture requirement so that Go can inter-operate with platform
+debuggers and profilers.
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+#### Flags
+
+The arithmetic status flags (NZCV) are treated like scratch registers
+and not preserved across calls.
+All other bits in PSTATE are system flags and are not modified by Go.
+
+The floating-point status register (FPSR) is treated like scratch
+registers and not preserved across calls.
+
+At calls, the floating-point control register (FPCR) bits are always
+set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| DN | 25 | 0 | Propagate NaN operands |
+| FZ | 24 | 0 | Do not flush to zero |
+| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
+| IDE | 15 | 0 | Denormal operations trap disabled |
+| IXE | 12 | 0 | Inexact trap disabled |
+| UFE | 11 | 0 | Underflow trap disabled |
+| OFE | 10 | 0 | Overflow trap disabled |
+| DZE | 9 | 0 | Divide-by-zero trap disabled |
+| IOE | 8 | 0 | Invalid operations trap disabled |
+| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
+| AH | 1 | 0 | No alternate handling of de-normal inputs |
+| FIZ | 0 | 0 | Do not zero de-normals |
+
+*Rationale*: Having a fixed FPCR control configuration allows Go
+functions to use floating-point and vector (SIMD) operations without
+modifying or saving the FPCR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+
## Future directions
### Spill path improvements
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
index 2785aa0336..3b13e123a7 100644
--- a/src/cmd/compile/internal/amd64/galign.go
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -23,6 +23,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
- arch.LoadRegResults = loadRegResults
+ arch.LoadRegResult = loadRegResult
arch.SpillArgReg = spillArgReg
}
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 1484ad5404..b8dce81a92 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -57,7 +57,6 @@ func dzDI(b int64) int64 {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
r13 = 1 << iota // if R13 is already zeroed.
- x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero.
)
if cnt == 0 {
@@ -85,11 +84,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
}
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
- if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
- p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
- *state |= x15
- }
-
for i := int64(0); i < cnt/16; i++ {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
@@ -98,10 +92,6 @@ func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
- if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
- p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
- *state |= x15
- }
// Save DI to r12. With the amd64 Go register abi, DI can contain
// an incoming parameter, whereas R12 is always scratch.
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index ca5f36e775..30dba057d0 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -823,7 +823,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, sc.Off64())
case ssa.OpAMD64MOVOstorezero:
- if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
+ if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@@ -914,7 +914,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64DUFFZERO:
- if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
+ if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
@@ -997,22 +997,26 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Closure pointer is DX.
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal {
+ if s.ABI == obj.ABIInternal {
v.Fatalf("LoweredGetG should not appear in ABIInternal")
}
r := v.Reg()
getgFromTLS(s, r)
case ssa.OpAMD64CALLstatic:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
+ if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
s.Call(v)
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
+ if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@@ -1304,9 +1308,11 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
+ if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
@@ -1348,20 +1354,15 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
}
}
-func loadRegResults(s *ssagen.State, f *ssa.Func) {
- for _, o := range f.OwnAux.ABIInfo().OutParams() {
- n := o.Name.(*ir.Name)
- rts, offs := o.RegisterTypesAndOffsets()
- for i := range o.Registers {
- p := s.Prog(loadByType(rts[i]))
- p.From.Type = obj.TYPE_MEM
- p.From.Name = obj.NAME_AUTO
- p.From.Sym = n.Linksym()
- p.From.Offset = n.FrameOffset() + offs[i]
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ssa.ObjRegForAbiReg(o.Registers[i], f.Config)
- }
- }
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
}
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
index d3db37e16f..2a61b9dd99 100644
--- a/src/cmd/compile/internal/arm64/galign.go
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -23,4 +23,6 @@ func Init(arch *ssagen.ArchInfo) {
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 0c997bc4b3..c3319f9491 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -10,6 +10,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
@@ -161,6 +162,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@@ -1101,8 +1114,34 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
case ssa.OpARM64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
- case ssa.OpClobber, ssa.OpClobberReg:
- // TODO: implement for clobberdead experiment. Nop is ok for now.
+ case ssa.OpClobber:
+ // MOVW $0xdeaddead, REGTMP
+ // MOVW REGTMP, (slot)
+ // MOVW REGTMP, 4(slot)
+ p := s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux2(&p.To, v, v.AuxInt+4)
+ case ssa.OpClobberReg:
+ x := uint64(0xdeaddeaddeaddead)
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(x)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
@@ -1266,3 +1305,22 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
b.Fatalf("branch not implemented: %s", b.LongString())
}
}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/typecheck/mapfile_mmap.go b/src/cmd/compile/internal/base/mapfile_mmap.go
index 298b385bcb..c1616db8e9 100644
--- a/src/cmd/compile/internal/typecheck/mapfile_mmap.go
+++ b/src/cmd/compile/internal/base/mapfile_mmap.go
@@ -5,7 +5,7 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
// +build darwin dragonfly freebsd linux netbsd openbsd
-package typecheck
+package base
import (
"os"
@@ -19,7 +19,7 @@ import (
// mapFile returns length bytes from the file starting at the
// specified offset as a string.
-func mapFile(f *os.File, offset, length int64) (string, error) {
+func MapFile(f *os.File, offset, length int64) (string, error) {
// POSIX mmap: "The implementation may require that off is a
// multiple of the page size."
x := offset & int64(os.Getpagesize()-1)
diff --git a/src/cmd/compile/internal/typecheck/mapfile_read.go b/src/cmd/compile/internal/base/mapfile_read.go
index 9637ab97ab..01796a9bab 100644
--- a/src/cmd/compile/internal/typecheck/mapfile_read.go
+++ b/src/cmd/compile/internal/base/mapfile_read.go
@@ -5,14 +5,14 @@
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
-package typecheck
+package base
import (
"io"
"os"
)
-func mapFile(f *os.File, offset, length int64) (string, error) {
+func MapFile(f *os.File, offset, length int64) (string, error) {
buf := make([]byte, length)
_, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
if err != nil {
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index 3ac7ff1ebe..842b0f4a7e 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -669,6 +669,13 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
k = e.spill(k, n)
}
e.expr(k.note(n, "interface-converted"), n.X)
+ case ir.OEFACE:
+ n := n.(*ir.BinaryExpr)
+ // Note: n.X is not needed because it can never point to memory that might escape.
+ e.expr(k, n.Y)
+ case ir.OIDATA:
+ n := n.(*ir.UnaryExpr)
+ e.expr(k, n.X)
case ir.OSLICE2ARRPTR:
// the slice pointer flows directly to the result
n := n.(*ir.ConvExpr)
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index 2137f1d196..9bf3c7240a 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -5,46 +5,16 @@
package gc
import (
+ "fmt"
+ "go/constant"
+
"cmd/compile/internal/base"
- "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
- "fmt"
- "go/constant"
)
-func exportf(bout *bio.Writer, format string, args ...interface{}) {
- fmt.Fprintf(bout, format, args...)
- if base.Debug.Export != 0 {
- fmt.Printf(format, args...)
- }
-}
-
-func dumpexport(bout *bio.Writer) {
- p := &exporter{marked: make(map[*types.Type]bool)}
- for _, n := range typecheck.Target.Exports {
- // Must catch it here rather than Export(), because the type can be
- // not fully set (still TFORW) when Export() is called.
- if n.Type() != nil && n.Type().HasTParam() {
- base.Fatalf("Cannot (yet) export a generic type: %v", n)
- }
- p.markObject(n)
- }
-
- // The linker also looks for the $$ marker - use char after $$ to distinguish format.
- exportf(bout, "\n$$B\n") // indicate binary export format
- off := bout.Offset()
- typecheck.WriteExports(bout.Writer)
- size := bout.Offset() - off
- exportf(bout, "\n$$\n")
-
- if base.Debug.Export != 0 {
- fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
- }
-}
-
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
@@ -79,83 +49,3 @@ func dumpasmhdr() {
b.Close()
}
-
-type exporter struct {
- marked map[*types.Type]bool // types already seen by markType
-}
-
-// markObject visits a reachable object.
-func (p *exporter) markObject(n ir.Node) {
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Class == ir.PFUNC {
- inline.Inline_Flood(n, typecheck.Export)
- }
- }
-
- p.markType(n.Type())
-}
-
-// markType recursively visits types reachable from t to identify
-// functions whose inline bodies may be needed.
-func (p *exporter) markType(t *types.Type) {
- if p.marked[t] {
- return
- }
- p.marked[t] = true
-
- // If this is a named type, mark all of its associated
- // methods. Skip interface types because t.Methods contains
- // only their unexpanded method set (i.e., exclusive of
- // interface embeddings), and the switch statement below
- // handles their full method set.
- if t.Sym() != nil && t.Kind() != types.TINTER {
- for _, m := range t.Methods().Slice() {
- if types.IsExported(m.Sym.Name) {
- p.markObject(ir.AsNode(m.Nname))
- }
- }
- }
-
- // Recursively mark any types that can be produced given a
- // value of type t: dereferencing a pointer; indexing or
- // iterating over an array, slice, or map; receiving from a
- // channel; accessing a struct field or interface method; or
- // calling a function.
- //
- // Notably, we don't mark function parameter types, because
- // the user already needs some way to construct values of
- // those types.
- switch t.Kind() {
- case types.TPTR, types.TARRAY, types.TSLICE:
- p.markType(t.Elem())
-
- case types.TCHAN:
- if t.ChanDir().CanRecv() {
- p.markType(t.Elem())
- }
-
- case types.TMAP:
- p.markType(t.Key())
- p.markType(t.Elem())
-
- case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
- if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
- p.markType(f.Type)
- }
- }
-
- case types.TFUNC:
- for _, f := range t.Results().FieldSlice() {
- p.markType(f.Type)
- }
-
- case types.TINTER:
- for _, f := range t.AllMethods().Slice() {
- if types.IsExported(f.Sym.Name) {
- p.markType(f.Type)
- }
- }
- }
-}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index 55a0ab7da7..8a2ff75583 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -7,6 +7,7 @@ package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/noder"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
@@ -103,7 +104,7 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
func dumpCompilerObj(bout *bio.Writer) {
printObjHeader(bout)
- dumpexport(bout)
+ noder.WriteExports(bout)
}
func dumpdata() {
diff --git a/src/cmd/compile/internal/importer/exportdata.go b/src/cmd/compile/internal/importer/exportdata.go
index 3925a64314..6a672be9c1 100644
--- a/src/cmd/compile/internal/importer/exportdata.go
+++ b/src/cmd/compile/internal/importer/exportdata.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/src/cmd/compile/internal/importer/gcimporter.go b/src/cmd/compile/internal/importer/gcimporter.go
index feb18cf2c9..ff40be65bb 100644
--- a/src/cmd/compile/internal/importer/gcimporter.go
+++ b/src/cmd/compile/internal/importer/gcimporter.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -156,7 +155,7 @@ func Import(packages map[string]*types2.Package, path, srcDir string, lookup fun
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
- _, pkg, err = iImportData(packages, data[1:], id)
+ pkg, err = ImportData(packages, string(data[1:]), id)
} else {
err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
}
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
index 7fb8fed59c..44c5e06cd6 100644
--- a/src/cmd/compile/internal/importer/gcimporter_test.go
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -10,7 +9,6 @@ import (
"cmd/compile/internal/types2"
"fmt"
"internal/testenv"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -64,7 +62,7 @@ const maxTime = 30 * time.Second
func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
- list, err := ioutil.ReadDir(dirname)
+ list, err := os.ReadDir(dirname)
if err != nil {
t.Fatalf("testDir(%s): %s", dirname, err)
}
@@ -92,7 +90,7 @@ func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
}
func mktmpdir(t *testing.T) string {
- tmpdir, err := ioutil.TempDir("", "gcimporter_test")
+ tmpdir, err := os.MkdirTemp("", "gcimporter_test")
if err != nil {
t.Fatal("mktmpdir:", err)
}
@@ -142,7 +140,7 @@ func TestVersionHandling(t *testing.T) {
}
const dir = "./testdata/versions"
- list, err := ioutil.ReadDir(dir)
+ list, err := os.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
@@ -195,7 +193,7 @@ func TestVersionHandling(t *testing.T) {
// create file with corrupted export data
// 1) read file
- data, err := ioutil.ReadFile(filepath.Join(dir, name))
+ data, err := os.ReadFile(filepath.Join(dir, name))
if err != nil {
t.Fatal(err)
}
@@ -212,7 +210,7 @@ func TestVersionHandling(t *testing.T) {
// 4) write the file
pkgpath += "_corrupted"
filename := filepath.Join(corruptdir, pkgpath) + ".a"
- ioutil.WriteFile(filename, data, 0666)
+ os.WriteFile(filename, data, 0666)
// test that importing the corrupted file results in an error
_, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil)
@@ -261,8 +259,7 @@ var importedObjectTests = []struct {
{"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
{"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
{"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
- // go/types.Type has grown much larger - excluded for now
- // {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
+ {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
}
func TestImportedTypes(t *testing.T) {
@@ -457,17 +454,17 @@ func TestIssue13898(t *testing.T) {
t.Fatal("go/types not found")
}
- // look for go/types2.Object type
+ // look for go/types.Object type
obj := lookupObj(t, goTypesPkg.Scope(), "Object")
typ, ok := obj.Type().(*types2.Named)
if !ok {
- t.Fatalf("go/types2.Object type is %v; wanted named type", typ)
+ t.Fatalf("go/types.Object type is %v; wanted named type", typ)
}
- // lookup go/types2.Object.Pkg method
+ // lookup go/types.Object.Pkg method
m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg")
if m == nil {
- t.Fatalf("go/types2.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
+ t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
}
// the method must belong to go/types
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index 8ab0b7b989..14e64891b8 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -9,7 +8,6 @@
package importer
import (
- "bytes"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"encoding/binary"
@@ -19,10 +17,11 @@ import (
"io"
"math/big"
"sort"
+ "strings"
)
type intReader struct {
- *bytes.Reader
+ *strings.Reader
path string
}
@@ -42,6 +41,21 @@ func (r *intReader) uint64() uint64 {
return i
}
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+
+ // Start of the unstable series of versions, remove "+ n" before release.
+ iexportVersionCurrent = iexportVersionGenerics + 1
+)
+
+type ident struct {
+ pkg string
+ name string
+}
+
const predeclReserved = 32
type itag uint64
@@ -57,6 +71,9 @@ const (
signatureType
structType
interfaceType
+ typeParamType
+ instType
+ unionType
)
const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
@@ -65,8 +82,8 @@ const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
-func iImportData(imports map[string]*types2.Package, data []byte, path string) (_ int, pkg *types2.Package, err error) {
- const currentVersion = 1
+func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) {
+ const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
@@ -78,13 +95,17 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
}
}()
- r := &intReader{bytes.NewReader(data), path}
+ r := &intReader{strings.NewReader(data), path}
version = int64(r.uint64())
switch version {
- case currentVersion, 0:
+ case currentVersion, iexportVersionPosCol, iexportVersionGo1_11:
default:
- errorf("unknown iexport format version %d", version)
+ if version > iexportVersionGenerics {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
}
sLen := int64(r.uint64())
@@ -96,16 +117,20 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
r.Seek(sLen+dLen, io_SeekCurrent)
p := iimporter{
- ipath: path,
- version: int(version),
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
- stringData: stringData,
- stringCache: make(map[uint64]string),
- pkgCache: make(map[uint64]*types2.Package),
+ stringData: stringData,
+ pkgCache: make(map[uint64]*types2.Package),
+ posBaseCache: make(map[uint64]*syntax.PosBase),
declData: declData,
pkgIndex: make(map[*types2.Package]map[string]uint64),
typCache: make(map[uint64]types2.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name (name with subscript).
+ tparamIndex: make(map[ident]types2.Type),
}
for i, pt := range predeclared {
@@ -117,17 +142,22 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
- _ = r.uint64() // package height; unused by go/types
+ pkgHeight := int(r.uint64())
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
- pkg = types2.NewPackage(pkgPath, pkgName)
+ pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight)
imports[pkgPath] = pkg
- } else if pkg.Name() != pkgName {
- errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ } else {
+ if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+ if pkg.Height() != pkgHeight {
+ errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path)
+ }
}
p.pkgCache[pkgPathOff] = pkg
@@ -165,21 +195,22 @@ func iImportData(imports map[string]*types2.Package, data []byte, path string) (
// package was imported completely and without errors
localpkg.MarkComplete()
- consumed, _ := r.Seek(0, io_SeekCurrent)
- return int(consumed), localpkg, nil
+ return localpkg, nil
}
type iimporter struct {
- ipath string
- version int
+ exportVersion int64
+ ipath string
+ version int
- stringData []byte
- stringCache map[uint64]string
- pkgCache map[uint64]*types2.Package
+ stringData string
+ pkgCache map[uint64]*types2.Package
+ posBaseCache map[uint64]*syntax.PosBase
- declData []byte
- pkgIndex map[*types2.Package]map[string]uint64
- typCache map[uint64]types2.Type
+ declData string
+ pkgIndex map[*types2.Package]map[string]uint64
+ typCache map[uint64]types2.Type
+ tparamIndex map[ident]types2.Type
interfaceList []*types2.Interface
}
@@ -199,24 +230,21 @@ func (p *iimporter) doDecl(pkg *types2.Package, name string) {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off:])
- r.declReader = *bytes.NewReader(p.declData[off:])
+ r.declReader = *strings.NewReader(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
- if s, ok := p.stringCache[off]; ok {
- return s
- }
+ var x [binary.MaxVarintLen64]byte
+ n := copy(x[:], p.stringData[off:])
- slen, n := binary.Uvarint(p.stringData[off:])
+ slen, n := binary.Uvarint(x[:n])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
- s := string(p.stringData[spos : spos+slen])
- p.stringCache[off] = s
- return s
+ return p.stringData[spos : spos+slen]
}
func (p *iimporter) pkgAt(off uint64) *types2.Package {
@@ -228,6 +256,16 @@ func (p *iimporter) pkgAt(off uint64) *types2.Package {
return nil
}
+func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
+ if posBase, ok := p.posBaseCache[off]; ok {
+ return posBase
+ }
+ filename := p.stringAt(off)
+ posBase := syntax.NewFileBase(filename)
+ p.posBaseCache[off] = posBase
+ return posBase
+}
+
func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
@@ -241,7 +279,7 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off-predeclReserved:])
- r.declReader = *bytes.NewReader(p.declData[off-predeclReserved:])
+ r.declReader = *strings.NewReader(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
@@ -251,12 +289,12 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
}
type importReader struct {
- p *iimporter
- declReader bytes.Reader
- currPkg *types2.Package
- prevFile string
- prevLine int64
- prevColumn int64
+ p *iimporter
+ declReader strings.Reader
+ currPkg *types2.Package
+ prevPosBase *syntax.PosBase
+ prevLine int64
+ prevColumn int64
}
func (r *importReader) obj(name string) {
@@ -275,15 +313,26 @@ func (r *importReader) obj(name string) {
r.declare(types2.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
+ var tparams []*types2.TypeName
+ if r.p.exportVersion >= iexportVersionGenerics {
+ tparams = r.tparamList()
+ }
sig := r.signature(nil)
+ sig.SetTParams(tparams)
r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
case 'T':
+ var tparams []*types2.TypeName
+ if r.p.exportVersion >= iexportVersionGenerics {
+ tparams = r.tparamList()
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types2.NewTypeName(pos, r.currPkg, name, nil)
named := types2.NewNamed(obj, nil, nil)
+ named.SetTParams(tparams)
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
@@ -296,10 +345,44 @@ func (r *importReader) obj(name string) {
recv := r.param()
msig := r.signature(recv)
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ targs := baseType(msig.Recv().Type()).TArgs()
+ if len(targs) > 0 {
+ rparams := make([]*types2.TypeName, len(targs))
+ for i, targ := range targs {
+ rparams[i] = types2.AsTypeParam(targ).Obj()
+ }
+ msig.SetRParams(rparams)
+ }
+
named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
}
}
+ case 'P':
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ index := int(r.int64())
+ name0, sub := parseSubscript(name)
+ tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
+ t := (*types2.Checker)(nil).NewTypeParam(tn, index, nil)
+ if sub == 0 {
+ errorf("missing subscript")
+ }
+ t.SetId(sub)
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg.Name(), name}
+ r.p.tparamIndex[id] = t
+
+ t.SetBound(r.typ())
+
case 'V':
typ := r.typ()
@@ -439,12 +522,11 @@ func (r *importReader) pos() syntax.Pos {
r.posv0()
}
- if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
return syntax.Pos{}
}
- // TODO(gri) fix this
- // return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
- return syntax.Pos{}
+
+ return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn))
}
func (r *importReader) posv0() {
@@ -454,7 +536,7 @@ func (r *importReader) posv0() {
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
- r.prevFile = r.string()
+ r.prevPosBase = r.posBase()
r.prevLine = l
}
}
@@ -466,7 +548,7 @@ func (r *importReader) posv1() {
delta = r.int64()
r.prevLine += delta >> 1
if delta&1 != 0 {
- r.prevFile = r.string()
+ r.prevPosBase = r.posBase()
}
}
}
@@ -480,8 +562,9 @@ func isInterface(t types2.Type) bool {
return ok
}
-func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
-func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) }
func (r *importReader) doType(base *types2.Named) types2.Type {
switch k := r.kind(); k {
@@ -554,6 +637,49 @@ func (r *importReader) doType(base *types2.Named) types2.Type {
typ := types2.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg.Name(), name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ pos := r.pos()
+ len := r.uint64()
+ targs := make([]types2.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ t := types2.Instantiate(pos, baseType, targs)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ nt := int(r.uint64())
+ terms := make([]types2.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range terms {
+ terms[i] = r.typ()
+ tildes[i] = r.bool()
+ }
+ return types2.NewUnion(terms, tildes)
}
}
@@ -568,6 +694,19 @@ func (r *importReader) signature(recv *types2.Var) *types2.Signature {
return types2.NewSignature(recv, params, results, variadic)
}
+func (r *importReader) tparamList() []*types2.TypeName {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types2.TypeName, n)
+ for i := range xs {
+ typ := r.typ()
+ xs[i] = types2.AsTypeParam(typ).Obj()
+ }
+ return xs
+}
+
func (r *importReader) paramList() *types2.Tuple {
xs := make([]*types2.Var, r.uint64())
for i := range xs {
@@ -610,3 +749,33 @@ func (r *importReader) byte() byte {
}
return x
}
+
+func baseType(typ types2.Type) *types2.Named {
+ // pointer receivers are never types2.Named types
+ if p, _ := typ.(*types2.Pointer); p != nil {
+ typ = p.Elem()
+ }
+ // receiver base types are always (possibly generic) types2.Named types
+ n, _ := typ.(*types2.Named)
+ return n
+}
+
+func parseSubscript(name string) (string, uint64) {
+ // Extract the subscript value from the type param name. We export
+ // and import the subscript value, so that all type params have
+ // unique names.
+ sub := uint64(0)
+ startsub := -1
+ for i, r := range name {
+ if '₀' <= r && r < '₀'+10 {
+ if startsub == -1 {
+ startsub = i
+ }
+ sub = sub*10 + uint64(r-'₀')
+ }
+ }
+ if startsub >= 0 {
+ name = name[:startsub]
+ }
+ return name, sub
+}
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
index 40b9c7c958..3d1f77afcd 100644
--- a/src/cmd/compile/internal/importer/support.go
+++ b/src/cmd/compile/internal/importer/support.go
@@ -1,4 +1,3 @@
-// UNREVIEWED
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index d6b4ced4e1..0620191bbf 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -179,6 +179,8 @@ func CanInline(fn *ir.Func) {
Cost: inlineMaxBudget - visitor.budget,
Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
Body: inlcopylist(fn.Body),
+
+ CanDelayResults: canDelayResults(fn),
}
if base.Flag.LowerM > 1 {
@@ -191,60 +193,36 @@ func CanInline(fn *ir.Func) {
}
}
-// Inline_Flood marks n's inline body for export and recursively ensures
-// all called functions are marked too.
-func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
- if n == nil {
- return
- }
- if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
- base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
- }
- fn := n.Func
- if fn == nil {
- base.Fatalf("Inline_Flood: missing Func on %v", n)
- }
- if fn.Inl == nil {
- return
- }
-
- if fn.ExportInline() {
- return
- }
- fn.SetExportInline(true)
-
- typecheck.ImportedBody(fn)
-
- var doFlood func(n ir.Node)
- doFlood = func(n ir.Node) {
- switch n.Op() {
- case ir.OMETHEXPR, ir.ODOTMETH:
- Inline_Flood(ir.MethodExprName(n), exportsym)
+// canDelayResults reports whether inlined calls to fn can delay
+// declaring the result parameter until the "return" statement.
+func canDelayResults(fn *ir.Func) bool {
+ // We can delay declaring+initializing result parameters if:
+ // (1) there's exactly one "return" statement in the inlined function;
+ // (2) it's not an empty return statement (#44355); and
+ // (3) the result parameters aren't named.
- case ir.ONAME:
- n := n.(*ir.Name)
- switch n.Class {
- case ir.PFUNC:
- Inline_Flood(n, exportsym)
- exportsym(n)
- case ir.PEXTERN:
- exportsym(n)
+ nreturns := 0
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ if n, ok := n.(*ir.ReturnStmt); ok {
+ nreturns++
+ if len(n.Results) == 0 {
+ nreturns++ // empty return statement (case 2)
}
+ }
+ })
- case ir.OCALLPART:
- // Okay, because we don't yet inline indirect
- // calls to method values.
- case ir.OCLOSURE:
- // VisitList doesn't visit closure bodies, so force a
- // recursive call to VisitList on the body of the closure.
- ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+ if nreturns != 1 {
+ return false // not exactly one return statement (case 1)
+ }
+
+ // temporaries for return values.
+ for _, param := range fn.Type().Results().FieldSlice() {
+ if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
+ return false // found a named result parameter (case 3)
}
}
- // Recursively identify all referenced functions for
- // reexport. We want to include even non-called functions,
- // because after inlining they might be callable.
- ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
+ return true
}
// hairyVisitor visits a function body to determine its inlining
@@ -740,6 +718,11 @@ var inlgen int
// when producing output for debugging the compiler itself.
var SSADumpInline = func(*ir.Func) {}
+// NewInline allows the inliner implementation to be overridden.
+// If it returns nil, the legacy inliner will handle this call
+// instead.
+var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil }
+
// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
// The returned node's Ninit has the parameter assignments, the Nbody is the
@@ -793,30 +776,67 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
defer func() {
inlMap[fn] = false
}()
- if base.Debug.TypecheckInl == 0 {
- typecheck.ImportedBody(fn)
+
+ typecheck.FixVariadicCall(n)
+
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+
+ sym := fn.Linksym()
+ inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
+
+ if base.Flag.GenDwarfInl > 0 {
+ if !sym.WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+ sym.Set(obj.AttrWasInlined, true)
+ }
}
- // We have a function node, and it has an inlineable body.
- if base.Flag.LowerM > 1 {
- fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body))
- } else if base.Flag.LowerM != 0 {
+ if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
if base.Flag.LowerM > 2 {
fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
+ res := NewInline(n, fn, inlIndex)
+ if res == nil {
+ res = oldInline(n, fn, inlIndex)
+ }
+
+ // transitive inlining
+ // might be nice to do this before exporting the body,
+ // but can't emit the body with inlining expanded.
+ // instead we emit the things that the body needs
+ // and each use must redo the inlining.
+ // luckily these are small.
+ ir.EditChildren(res, edit)
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
+ }
+
+ return res
+}
+
+// oldInline creates an InlinedCallExpr to replace the given call
+// expression. fn is the callee function to be inlined. inlIndex is
+// the inlining tree position index, for use with src.NewInliningBase
+// when rewriting positions.
+func oldInline(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ if base.Debug.TypecheckInl == 0 {
+ typecheck.ImportedBody(fn)
+ }
+
SSADumpInline(fn)
- ninit := n.Init()
+ ninit := call.Init()
// For normal function calls, the function callee expression
// may contain side effects (e.g., added by addinit during
// inlconv2expr or inlconv2list). Make sure to preserve these,
// if necessary (#42703).
- if n.Op() == ir.OCALLFUNC {
- callee := n.X
+ if call.Op() == ir.OCALLFUNC {
+ callee := call.X
for callee.Op() == ir.OCONVNOP {
conv := callee.(*ir.ConvExpr)
ninit.Append(ir.TakeInit(conv)...)
@@ -854,25 +874,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
}
// We can delay declaring+initializing result parameters if:
- // (1) there's exactly one "return" statement in the inlined function;
- // (2) it's not an empty return statement (#44355); and
- // (3) the result parameters aren't named.
- delayretvars := true
-
- nreturns := 0
- ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
- if n, ok := n.(*ir.ReturnStmt); ok {
- nreturns++
- if len(n.Results) == 0 {
- delayretvars = false // empty return statement (case 2)
- }
- }
- })
-
- if nreturns != 1 {
- delayretvars = false // not exactly one return statement (case 1)
- }
-
// temporaries for return values.
var retvars []ir.Node
for i, t := range fn.Type().Results().Fields().Slice() {
@@ -882,7 +883,6 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
m = inlvar(n)
m = typecheck.Expr(m).(*ir.Name)
inlvars[n] = m
- delayretvars = false // found a named result parameter (case 3)
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
@@ -905,61 +905,27 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
// Assign arguments to the parameters' temp names.
as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
as.Def = true
- if n.Op() == ir.OCALLMETH {
- sel := n.X.(*ir.SelectorExpr)
+ if call.Op() == ir.OCALLMETH {
+ sel := call.X.(*ir.SelectorExpr)
if sel.X == nil {
- base.Fatalf("method call without receiver: %+v", n)
+ base.Fatalf("method call without receiver: %+v", call)
}
as.Rhs.Append(sel.X)
}
- as.Rhs.Append(n.Args...)
-
- // For non-dotted calls to variadic functions, we assign the
- // variadic parameter's temp name separately.
- var vas *ir.AssignStmt
+ as.Rhs.Append(call.Args...)
if recv := fn.Type().Recv(); recv != nil {
as.Lhs.Append(inlParam(recv, as, inlvars))
}
for _, param := range fn.Type().Params().Fields().Slice() {
- // For ordinary parameters or variadic parameters in
- // dotted calls, just add the variable to the
- // assignment list, and we're done.
- if !param.IsDDD() || n.IsDDD {
- as.Lhs.Append(inlParam(param, as, inlvars))
- continue
- }
-
- // Otherwise, we need to collect the remaining values
- // to pass as a slice.
-
- x := len(as.Lhs)
- for len(as.Lhs) < len(as.Rhs) {
- as.Lhs.Append(argvar(param.Type, len(as.Lhs)))
- }
- varargs := as.Lhs[x:]
-
- vas = ir.NewAssignStmt(base.Pos, nil, nil)
- vas.X = inlParam(param, vas, inlvars)
- if len(varargs) == 0 {
- vas.Y = typecheck.NodNil()
- vas.Y.SetType(param.Type)
- } else {
- lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil)
- lit.List = varargs
- vas.Y = lit
- }
+ as.Lhs.Append(inlParam(param, as, inlvars))
}
if len(as.Rhs) != 0 {
ninit.Append(typecheck.Stmt(as))
}
- if vas != nil {
- ninit.Append(typecheck.Stmt(vas))
- }
-
- if !delayretvars {
+ if !fn.Inl.CanDelayResults {
// Zero the return parameters.
for _, n := range retvars {
ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
@@ -972,40 +938,21 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
inlgen++
- parent := -1
- if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
- parent = b.InliningIndex()
- }
-
- sym := fn.Linksym()
- newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
-
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
- inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH)
- inlMark.SetPos(n.Pos().WithIsStmt())
- inlMark.Index = int64(newIndex)
- ninit.Append(inlMark)
-
- if base.Flag.GenDwarfInl > 0 {
- if !sym.WasInlined() {
- base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
- sym.Set(obj.AttrWasInlined, true)
- }
- }
+ ninit.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(inlIndex)))
subst := inlsubst{
- retlabel: retlabel,
- retvars: retvars,
- delayretvars: delayretvars,
- inlvars: inlvars,
- defnMarker: ir.NilExpr{},
- bases: make(map[*src.PosBase]*src.PosBase),
- newInlIndex: newIndex,
- fn: fn,
+ retlabel: retlabel,
+ retvars: retvars,
+ inlvars: inlvars,
+ defnMarker: ir.NilExpr{},
+ bases: make(map[*src.PosBase]*src.PosBase),
+ newInlIndex: inlIndex,
+ fn: fn,
}
subst.edit = subst.node
@@ -1026,26 +973,11 @@ func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]b
//dumplist("ninit post", ninit);
- call := ir.NewInlinedCallExpr(base.Pos, nil, nil)
- *call.PtrInit() = ninit
- call.Body = body
- call.ReturnVars = retvars
- call.SetType(n.Type())
- call.SetTypecheck(1)
-
- // transitive inlining
- // might be nice to do this before exporting the body,
- // but can't emit the body with inlining expanded.
- // instead we emit the things that the body needs
- // and each use must redo the inlining.
- // luckily these are small.
- ir.EditChildren(call, edit)
-
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
- }
-
- return call
+ res := ir.NewInlinedCallExpr(base.Pos, body, retvars)
+ res.SetInit(ninit)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+ return res
}
// Every time we expand a function we generate a new set of tmpnames,
@@ -1060,6 +992,7 @@ func inlvar(var_ *ir.Name) *ir.Name {
n.SetType(var_.Type())
n.Class = ir.PAUTO
n.SetUsed(true)
+ n.SetAutoTemp(var_.AutoTemp())
n.Curfn = ir.CurFunc // the calling function, not the called one
n.SetAddrtaken(var_.Addrtaken())
@@ -1078,18 +1011,6 @@ func retvar(t *types.Field, i int) *ir.Name {
return n
}
-// Synthesize a variable to store the inlined function's arguments
-// when they come from a multiple return call.
-func argvar(t *types.Type, i int) ir.Node {
- n := typecheck.NewName(typecheck.LookupNum("~arg", i))
- n.SetType(t.Elem())
- n.Class = ir.PAUTO
- n.SetUsed(true)
- n.Curfn = ir.CurFunc // the calling function, not the called one
- ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
- return n
-}
-
// The inlsubst type implements the actual inlining of a single
// function call.
type inlsubst struct {
@@ -1099,10 +1020,6 @@ type inlsubst struct {
// Temporary result variables.
retvars []ir.Node
- // Whether result variables should be initialized at the
- // "return" statement.
- delayretvars bool
-
inlvars map[*ir.Name]*ir.Name
// defnMarker is used to mark a Node for reassignment.
// inlsubst.clovar set this during creating new ONAME.
@@ -1157,17 +1074,21 @@ func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
// clovar creates a new ONAME node for a local variable or param of a closure
// inside a function being inlined.
func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
- // TODO(danscales): want to get rid of this shallow copy, with code like the
- // following, but it is hard to copy all the necessary flags in a maintainable way.
- // m := ir.NewNameAt(n.Pos(), n.Sym())
- // m.Class = n.Class
- // m.SetType(n.Type())
- // m.SetTypecheck(1)
- //if n.IsClosureVar() {
- // m.SetIsClosureVar(true)
- //}
- m := &ir.Name{}
- *m = *n
+ m := ir.NewNameAt(n.Pos(), n.Sym())
+ m.Class = n.Class
+ m.SetType(n.Type())
+ m.SetTypecheck(1)
+ if n.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ if n.Addrtaken() {
+ m.SetAddrtaken(true)
+ }
+ if n.Used() {
+ m.SetUsed(true)
+ }
+ m.Defn = n.Defn
+
m.Curfn = subst.newclofn
switch defn := n.Defn.(type) {
@@ -1222,8 +1143,6 @@ func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// closure does the necessary substitions for a ClosureExpr n and returns the new
// closure node.
func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
- m := ir.Copy(n)
-
// Prior to the subst edit, set a flag in the inlsubst to
// indicated that we don't want to update the source positions in
// the new closure. If we do this, it will appear that the closure
@@ -1231,29 +1150,16 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// issue #46234 for more details.
defer func(prev bool) { subst.noPosUpdate = prev }(subst.noPosUpdate)
subst.noPosUpdate = true
- ir.EditChildren(m, subst.edit)
//fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
- // The following is similar to funcLit
oldfn := n.Func
- newfn := ir.NewFunc(oldfn.Pos())
- // These three lines are not strictly necessary, but just to be clear
- // that new function needs to redo typechecking and inlinability.
- newfn.SetTypecheck(0)
- newfn.SetInlinabilityChecked(false)
- newfn.Inl = nil
- newfn.SetIsHiddenClosure(true)
- newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
- newfn.Nname.Func = newfn
+ newfn := ir.NewClosureFunc(oldfn.Pos(), true)
+
// Ntype can be nil for -G=3 mode.
if oldfn.Nname.Ntype != nil {
newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
}
- newfn.Nname.Defn = newfn
-
- m.(*ir.ClosureExpr).Func = newfn
- newfn.OClosure = m.(*ir.ClosureExpr)
if subst.newclofn != nil {
//fmt.Printf("Inlining a closure with a nested closure\n")
@@ -1303,13 +1209,13 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// Actually create the named function for the closure, now that
// the closure is inlined in a specific function.
- m.SetTypecheck(0)
+ newclo := newfn.OClosure
+ newclo.SetInit(subst.list(n.Init()))
if oldfn.ClosureCalled() {
- typecheck.Callee(m)
+ return typecheck.Callee(newclo)
} else {
- typecheck.Expr(m)
+ return typecheck.Expr(newclo)
}
- return m
}
// node recursively copies a node from the saved pristine body of the
@@ -1391,7 +1297,7 @@ func (subst *inlsubst) node(n ir.Node) ir.Node {
}
as.Rhs = subst.list(n.Results)
- if subst.delayretvars {
+ if subst.fn.Inl.CanDelayResults {
for _, n := range as.Lhs {
as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
n.Name().Defn = as
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index f70645f079..779793b2f2 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -192,8 +192,10 @@ type ClosureExpr struct {
miniExpr
Func *Func `mknode:"-"`
Prealloc *Name
+ IsGoWrap bool // whether this is wrapper closure of a go statement
}
+// Deprecated: Use NewClosureFunc instead.
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
n := &ClosureExpr{Func: fn}
n.op = OCLOSURE
@@ -323,20 +325,18 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
// A StructKeyExpr is an Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
- Field *types.Sym
- Value Node
- Offset int64
+ Field *types.Field
+ Value Node
}
-func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
+func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr {
n := &StructKeyExpr{Field: field, Value: value}
n.pos = pos
n.op = OSTRUCTKEY
- n.Offset = types.BADWIDTH
return n
}
-func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym }
// An InlinedCallExpr is an inlined function call.
type InlinedCallExpr struct {
@@ -448,6 +448,20 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) {
t.SetNod(n)
}
+// A RawOrigExpr represents an arbitrary Go expression as a string value.
+// When printed in diagnostics, the string value is written out exactly as-is.
+type RawOrigExpr struct {
+ miniExpr
+ Raw string
+}
+
+func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
+ n := &RawOrigExpr{Raw: raw}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
// A ResultExpr represents a direct access to a result.
type ResultExpr struct {
miniExpr
@@ -494,8 +508,13 @@ func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type)
// A SelectorExpr is a selector expression X.Sel.
type SelectorExpr struct {
miniExpr
- X Node
- Sel *types.Sym
+ X Node
+ // Sel is the name of the field or method being selected, without (in the
+ // case of methods) any preceding type specifier. If the field/method is
+ // exported, than the Sym uses the local package regardless of the package
+ // of the containing type.
+ Sel *types.Sym
+ // The actual selected field - may not be filled in until typechecking.
Selection *types.Field
Prealloc *Name // preallocated storage for OCALLPART, if any
}
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
index f2ae0f7606..d9cc5f109f 100644
--- a/src/cmd/compile/internal/ir/fmt.go
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -567,6 +567,11 @@ func exprFmt(n Node, s fmt.State, prec int) {
return
}
+ if n, ok := n.(*RawOrigExpr); ok {
+ fmt.Fprint(s, n.Raw)
+ return
+ }
+
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
@@ -1114,16 +1119,21 @@ func dumpNodeHeader(w io.Writer, n Node) {
}
if n.Pos().IsKnown() {
- pfx := ""
+ fmt.Fprint(w, " # ")
switch n.Pos().IsStmt() {
case src.PosNotStmt:
- pfx = "_" // "-" would be confusing
+ fmt.Fprint(w, "_") // "-" would be confusing
case src.PosIsStmt:
- pfx = "+"
+ fmt.Fprint(w, "+")
+ }
+ for i, pos := range base.Ctxt.AllPos(n.Pos(), nil) {
+ if i > 0 {
+ fmt.Fprint(w, ",")
+ }
+ // TODO(mdempsky): Print line pragma details too.
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
}
- pos := base.Ctxt.PosTable.Pos(n.Pos())
- file := filepath.Base(pos.Filename())
- fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line())
}
}
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
index 20fe965711..6480becc93 100644
--- a/src/cmd/compile/internal/ir/func.go
+++ b/src/cmd/compile/internal/ir/func.go
@@ -9,6 +9,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
+ "fmt"
)
// A Func corresponds to a single function in a Go program
@@ -166,6 +167,11 @@ type Inline struct {
// another package is imported.
Dcl []*Name
Body []Node
+
+ // CanDelayResults reports whether it's safe for the inliner to delay
+ // initializing the result parameters until immediately before the
+ // "return" statement.
+ CanDelayResults bool
}
// A Mark represents a scope boundary.
@@ -279,7 +285,7 @@ func FuncSymName(s *types.Sym) string {
// MarkFunc marks a node as a function.
func MarkFunc(n *Name) {
if n.Op() != ONAME || n.Class != Pxxx {
- base.Fatalf("expected ONAME/Pxxx node, got %v", n)
+ base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
}
n.Class = PFUNC
@@ -296,8 +302,8 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
}
}
- if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
- base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func))
}
}
@@ -306,3 +312,109 @@ func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
func IsTrivialClosure(clo *ClosureExpr) bool {
return len(clo.Func.ClosureVars) == 0
}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int32
+
+// closureName generates a new unique name for a closure within outerfn.
+func closureName(outerfn *Func) *types.Sym {
+ pkg := types.LocalPkg
+ outer := "glob."
+ prefix := "func"
+ gen := &globClosgen
+
+ if outerfn != nil {
+ if outerfn.OClosure != nil {
+ prefix = ""
+ }
+
+ pkg = outerfn.Sym().Pkg
+ outer = FuncName(outerfn)
+
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if !IsBlank(outerfn.Nname) {
+ gen = &outerfn.Closgen
+ }
+ }
+
+ *gen++
+ return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// NewClosureFunc creates a new Func to represent a function literal.
+// If hidden is true, then the closure is marked hidden (i.e., as a
+// function literal contained within another function, rather than a
+// package-scope variable initialization expression).
+func NewClosureFunc(pos src.XPos, hidden bool) *Func {
+ fn := NewFunc(pos)
+ fn.SetIsHiddenClosure(hidden)
+
+ fn.Nname = NewNameAt(pos, BlankNode.Sym())
+ fn.Nname.Func = fn
+ fn.Nname.Defn = fn
+
+ fn.OClosure = NewClosureExpr(pos, fn)
+
+ return fn
+}
+
+// NameClosure generates a unique for the given function literal,
+// which must have appeared within outerfn.
+func NameClosure(clo *ClosureExpr, outerfn *Func) {
+ fn := clo.Func
+ if fn.IsHiddenClosure() != (outerfn != nil) {
+ base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
+ }
+
+ name := fn.Nname
+ if !IsBlank(name) {
+ base.FatalfAt(clo.Pos(), "closure already named: %v", name)
+ }
+
+ name.SetSym(closureName(outerfn))
+ MarkFunc(name)
+}
+
+// UseClosure checks that the ginen function literal has been setup
+// correctly, and then returns it as an expression.
+// It must be called after clo.Func.ClosureVars has been set.
+func UseClosure(clo *ClosureExpr, pkg *Package) Node {
+ fn := clo.Func
+ name := fn.Nname
+
+ if IsBlank(name) {
+ base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
+ }
+ // Caution: clo.Typecheck() is still 0 when UseClosure is called by
+ // tcClosure.
+ if fn.Typecheck() != 1 || name.Typecheck() != 1 {
+ base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
+ }
+ if clo.Type() == nil || name.Type() == nil {
+ base.FatalfAt(fn.Pos(), "missing types: %v", fn)
+ }
+ if !types.Identical(clo.Type(), name.Type()) {
+ base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
+ }
+
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("new closure func: %v", fn)
+ Dump(s, fn)
+ }
+
+ if pkg != nil {
+ pkg.Decls = append(pkg.Decls, fn)
+ }
+
+ if false && IsTrivialClosure(clo) {
+ // TODO(mdempsky): Investigate if we can/should optimize this
+ // case. walkClosure already handles it later, but it could be
+ // useful to recognize earlier (e.g., it might allow multiple
+ // inlined calls to a function to share a common trivial closure
+ // func, rather than cloning it for each inlined call).
+ }
+
+ return clo
+}
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index af559cc082..9191eeb1d6 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -563,7 +563,7 @@ func OuterValue(n Node) Node {
for {
switch nn := n; nn.Op() {
case OXDOT:
- base.Fatalf("OXDOT in walk")
+ base.FatalfAt(n.Pos(), "OXDOT in walk: %v", n)
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
index 22855d7163..9a4858d037 100644
--- a/src/cmd/compile/internal/ir/node_gen.go
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -947,6 +947,22 @@ func (n *RangeStmt) editChildren(edit func(Node) Node) {
}
}
+func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *RawOrigExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ResultExpr) copy() Node {
c := *n
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
index a903ea8cd4..431468375a 100644
--- a/src/cmd/compile/internal/ir/type.go
+++ b/src/cmd/compile/internal/ir/type.go
@@ -300,11 +300,22 @@ func (n *typeNode) CanBeNtype() {}
// TypeNode returns the Node representing the type t.
func TypeNode(t *types.Type) Ntype {
+ return TypeNodeAt(src.NoXPos, t)
+}
+
+// TypeNodeAt is like TypeNode, but allows specifying the position
+// information if a new OTYPE needs to be constructed.
+//
+// Deprecated: Use TypeNode instead. For typical use, the position for
+// an anonymous OTYPE node should not matter. However, TypeNodeAt is
+// available for use with toolstash -cmp to refactor existing code
+// that is sensitive to OTYPE position.
+func TypeNodeAt(pos src.XPos, t *types.Type) Ntype {
if n := t.Obj(); n != nil {
if n.Type() != t {
base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
}
return n.(Ntype)
}
- return newTypeNode(src.NoXPos, t)
+ return newTypeNode(pos, t)
}
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
index 03c320e205..bfe7d2bb43 100644
--- a/src/cmd/compile/internal/ir/val.go
+++ b/src/cmd/compile/internal/ir/val.go
@@ -66,7 +66,7 @@ func Float64Val(v constant.Value) float64 {
func AssertValidTypeForConst(t *types.Type, v constant.Value) {
if !ValidTypeForConst(t, v) {
- base.Fatalf("%v does not represent %v", t, v)
+ base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind())
}
}
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
index 71976174b0..41a11b0c70 100644
--- a/src/cmd/compile/internal/logopt/logopt_test.go
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -209,7 +209,7 @@ func s15a8(x *[15]int64) [15]int64 {
want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
// escape analysis explanation
- want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r2 with derefs=0",`+
+ want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
@@ -220,7 +220,7 @@ func s15a8(x *[15]int64) [15]int64 {
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
})
}
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
index 4ca2eb4740..96abbe66ae 100644
--- a/src/cmd/compile/internal/noder/decl.go
+++ b/src/cmd/compile/internal/noder/decl.go
@@ -41,20 +41,27 @@ func (g *irgen) decls(decls []syntax.Decl) []ir.Node {
}
func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
- // TODO(mdempsky): Merge with gcimports so we don't have to import
- // packages twice.
-
g.pragmaFlags(decl.Pragma, 0)
- ipkg := importfile(decl)
- if ipkg == ir.Pkgs.Unsafe {
+ // Get the imported package's path, as resolved already by types2
+ // and gcimporter. This is the same path as would be computed by
+ // parseImportPath.
+ switch pkgNameOf(g.info, decl).Imported().Path() {
+ case "unsafe":
p.importedUnsafe = true
- }
- if ipkg.Path == "embed" {
+ case "embed":
p.importedEmbed = true
}
}
+// pkgNameOf returns the PkgName associated with the given ImportDecl.
+func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
+ if name := decl.LocalPkgName; name != nil {
+ return info.Defs[name].(*types2.PkgName)
+ }
+ return info.Implicits[decl].(*types2.PkgName)
+}
+
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
g.pragmaFlags(decl.Pragma, 0)
@@ -96,6 +103,16 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
}
g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
+ if fn.Type().HasTParam() && fn.Body != nil {
+ // Set pointers to the dcls/body of a generic function/method in
+ // the Inl struct, so it is marked for export, is available for
+ // stenciling, and works with Inline_Flood().
+ fn.Inl = &ir.Inline{
+ Cost: 1,
+ Dcl: fn.Dcl,
+ Body: fn.Body,
+ }
+ }
out.Append(fn)
}
@@ -104,13 +121,7 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
if decl.Alias {
name, _ := g.def(decl.Name)
g.pragmaFlags(decl.Pragma, 0)
-
- // TODO(mdempsky): This matches how typecheckdef marks aliases for
- // export, but this won't generalize to exporting function-scoped
- // type aliases. We should maybe just use n.Alias() instead.
- if ir.CurFunc == nil {
- name.Sym().Def = ir.TypeNode(name.Type())
- }
+ assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
return
@@ -154,11 +165,15 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
// [mdempsky: Subtleties like these are why I always vehemently
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
- if len(decl.TParamList) > 0 {
- // Set HasTParam if there are any tparams, even if no tparams are
- // used in the type itself (e.g., if it is an empty struct, or no
- // fields in the struct use the tparam).
- ntyp.SetHasTParam(true)
+
+ tparams := otyp.(*types2.Named).TParams()
+ if len(tparams) > 0 {
+ rparams := make([]*types.Type, len(tparams))
+ for i := range rparams {
+ rparams[i] = g.typ(tparams[i].Type())
+ }
+ // This will set hasTParam flag if any rparams are not concrete types.
+ ntyp.SetRParams(rparams)
}
types.ResumeCheckSize()
diff --git a/src/cmd/compile/internal/noder/export.go b/src/cmd/compile/internal/noder/export.go
new file mode 100644
index 0000000000..1a296e22c8
--- /dev/null
+++ b/src/cmd/compile/internal/noder/export.go
@@ -0,0 +1,65 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/typecheck"
+ "cmd/internal/bio"
+)
+
+// writeNewExportFunc is a hook that can be added to append extra
+// export data after the normal export data section. It allows
+// experimenting with new export data format designs without requiring
+// immediate support in the go/internal or x/tools importers.
+var writeNewExportFunc func(out io.Writer)
+
+func WriteExports(out *bio.Writer) {
+ // When unified IR exports are enable, we simply append it to the
+ // end of the normal export data (with compiler extensions
+ // disabled), and write an extra header giving its size.
+ //
+ // If the compiler sees this header, it knows to read the new data
+ // instead; meanwhile the go/types importers will silently ignore it
+ // and continue processing the old export instead.
+ //
+ // This allows us to experiment with changes to the new export data
+ // format without needing to update the go/internal/gcimporter or
+ // (worse) x/tools/go/gcexportdata.
+
+ useNewExport := writeNewExportFunc != nil
+
+ var old, new bytes.Buffer
+
+ typecheck.WriteExports(&old, !useNewExport)
+
+ if useNewExport {
+ writeNewExportFunc(&new)
+ }
+
+ oldLen := old.Len()
+ newLen := new.Len()
+
+ if useNewExport {
+ fmt.Fprintf(out, "\nnewexportsize %v\n", newLen)
+ }
+
+ // The linker also looks for the $$ marker - use char after $$ to distinguish format.
+ out.WriteString("\n$$B\n") // indicate binary export format
+ io.Copy(out, &old)
+ out.WriteString("\n$$\n")
+ io.Copy(out, &new)
+
+ if base.Debug.Export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, oldLen)
+ if useNewExport {
+ fmt.Printf("BenchmarkNewExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, newLen)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
index c7695ed920..98dc504ee9 100644
--- a/src/cmd/compile/internal/noder/expr.go
+++ b/src/cmd/compile/internal/noder/expr.go
@@ -5,6 +5,8 @@
package noder
import (
+ "fmt"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
@@ -15,6 +17,8 @@ import (
)
func (g *irgen) expr(expr syntax.Expr) ir.Node {
+ expr = unparen(expr) // skip parens; unneeded after parse+typecheck
+
if expr == nil {
return nil
}
@@ -67,7 +71,9 @@ func (g *irgen) expr(expr syntax.Expr) ir.Node {
// Constant expression.
if tv.Value != nil {
- return Const(g.pos(expr), g.typ(typ), tv.Value)
+ typ := g.typ(typ)
+ value := FixValue(typ, tv.Value)
+ return OrigConst(g.pos(expr), typ, value, constExprOp(expr), syntax.String(expr))
}
n := g.expr0(typ, expr)
@@ -105,11 +111,11 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
// The key for the Inferred map is the CallExpr (if inferring
// types required the function arguments) or the IndexExpr below
// (if types could be inferred without the function arguments).
- if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
+ if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the case where inferring types required the
// types of the function arguments.
- targs := make([]ir.Node, len(inferred.Targs))
- for i, targ := range inferred.Targs {
+ targs := make([]ir.Node, len(inferred.TArgs))
+ for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
if fun.Op() == ir.OFUNCINST {
@@ -131,12 +137,12 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
case *syntax.IndexExpr:
var targs []ir.Node
- if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
+ if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the partial type inference case where the types
// can be inferred from other type arguments without using
// the types of the function arguments.
- targs = make([]ir.Node, len(inferred.Targs))
- for i, targ := range inferred.Targs {
+ targs = make([]ir.Node, len(inferred.TArgs))
+ for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
} else if _, ok := expr.Index.(*syntax.ListExpr); ok {
@@ -161,9 +167,6 @@ func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
typed(g.typ(typ), n)
return n
- case *syntax.ParenExpr:
- return g.expr(expr.X) // skip parens; unneeded after parse+typecheck
-
case *syntax.SelectorExpr:
// Qualified identifier.
if name, ok := expr.X.(*syntax.Name); ok {
@@ -264,9 +267,13 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
// instantiated for this method call.
// selinfo.Recv() is the instantiated type
recvType2 = recvType2Base
- // method is the generic method associated with the gen type
- method := g.obj(types2.AsNamed(recvType2).Method(last))
- n = ir.NewSelectorExpr(pos, ir.OCALLPART, x, method.Sym())
+ recvTypeSym := g.pkg(method2.Pkg()).Lookup(recvType2.(*types2.Named).Obj().Name())
+ recvType := recvTypeSym.Def.(*ir.Name).Type()
+ // method is the generic method associated with
+ // the base generic type. The instantiated type may not
+ // have method bodies filled in, if it was imported.
+ method := recvType.Methods().Index(last).Nname.(*ir.Name)
+ n = ir.NewSelectorExpr(pos, ir.OCALLPART, x, typecheck.Lookup(expr.Sel.Value))
n.(*ir.SelectorExpr).Selection = types.NewField(pos, method.Sym(), method.Type())
n.(*ir.SelectorExpr).Selection.Nname = method
typed(method.Type(), n)
@@ -313,13 +320,17 @@ func getTargs(selinfo *types2.Selection) []types2.Type {
}
func (g *irgen) exprList(expr syntax.Expr) []ir.Node {
+ return g.exprs(unpackListExpr(expr))
+}
+
+func unpackListExpr(expr syntax.Expr) []syntax.Expr {
switch expr := expr.(type) {
case nil:
return nil
case *syntax.ListExpr:
- return g.exprs(expr.ElemList)
+ return expr.ElemList
default:
- return []ir.Node{g.expr(expr)}
+ return []syntax.Expr{expr}
}
}
@@ -344,11 +355,13 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
for i, elem := range lit.ElemList {
switch elem := elem.(type) {
case *syntax.KeyValueExpr:
+ var key ir.Node
if isStruct {
- exprs[i] = ir.NewStructKeyExpr(g.pos(elem), g.name(elem.Key.(*syntax.Name)), g.expr(elem.Value))
+ key = ir.NewIdent(g.pos(elem.Key), g.name(elem.Key.(*syntax.Name)))
} else {
- exprs[i] = ir.NewKeyExpr(g.pos(elem), g.expr(elem.Key), g.expr(elem.Value))
+ key = g.expr(elem.Key)
}
+ exprs[i] = ir.NewKeyExpr(g.pos(elem), key, g.expr(elem.Value))
default:
exprs[i] = g.expr(elem)
}
@@ -360,19 +373,13 @@ func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
}
func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
- fn := ir.NewFunc(g.pos(expr))
- fn.SetIsHiddenClosure(ir.CurFunc != nil)
+ fn := ir.NewClosureFunc(g.pos(expr), ir.CurFunc != nil)
+ ir.NameClosure(fn.OClosure, ir.CurFunc)
- fn.Nname = ir.NewNameAt(g.pos(expr), typecheck.ClosureName(ir.CurFunc))
- ir.MarkFunc(fn.Nname)
typ := g.typ(typ2)
- fn.Nname.Func = fn
- fn.Nname.Defn = fn
typed(typ, fn.Nname)
- fn.SetTypecheck(1)
-
- fn.OClosure = ir.NewClosureExpr(g.pos(expr), fn)
typed(typ, fn.OClosure)
+ fn.SetTypecheck(1)
g.funcBody(fn, nil, expr.Type, expr.Body)
@@ -386,9 +393,7 @@ func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
cv.SetWalkdef(1)
}
- g.target.Decls = append(g.target.Decls, fn)
-
- return fn.OClosure
+ return ir.UseClosure(fn.OClosure, g.target)
}
func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
@@ -398,3 +403,35 @@ func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
}
return n.Type()
}
+
+// constExprOp returns an ir.Op that represents the outermost
+// operation of the given constant expression. It's intended for use
+// with ir.RawOrigExpr.
+func constExprOp(expr syntax.Expr) ir.Op {
+ switch expr := expr.(type) {
+ default:
+ panic(fmt.Sprintf("%s: unexpected expression: %T", expr.Pos(), expr))
+
+ case *syntax.BasicLit:
+ return ir.OLITERAL
+ case *syntax.Name, *syntax.SelectorExpr:
+ return ir.ONAME
+ case *syntax.CallExpr:
+ return ir.OCALL
+ case *syntax.Operation:
+ if expr.Y == nil {
+ return unOps[expr.Op]
+ }
+ return binOps[expr.Op]
+ }
+}
+
+func unparen(expr syntax.Expr) syntax.Expr {
+ for {
+ paren, ok := expr.(*syntax.ParenExpr)
+ if !ok {
+ return expr
+ }
+ expr = paren.X
+ }
+}
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
index 9da0e49300..456df312a6 100644
--- a/src/cmd/compile/internal/noder/helpers.go
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -43,6 +43,32 @@ func Const(pos src.XPos, typ *types.Type, val constant.Value) ir.Node {
return typed(typ, ir.NewBasicLit(pos, val))
}
+func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
+ orig := ir.NewRawOrigExpr(pos, op, raw)
+ return ir.NewConstExpr(val, typed(typ, orig))
+}
+
+// FixValue returns val after converting and truncating it as
+// appropriate for typ.
+func FixValue(typ *types.Type, val constant.Value) constant.Value {
+ assert(typ.Kind() != types.TFORW)
+ switch {
+ case typ.IsInteger():
+ val = constant.ToInt(val)
+ case typ.IsFloat():
+ val = constant.ToFloat(val)
+ case typ.IsComplex():
+ val = constant.ToComplex(val)
+ }
+ if !typ.IsUntyped() {
+ val = typecheck.DefaultLit(ir.NewBasicLit(src.NoXPos, val), typ).Val()
+ }
+ if !typ.IsTypeParam() {
+ ir.AssertValidTypeForConst(typ, val)
+ }
+ return val
+}
+
func Nil(pos src.XPos, typ *types.Type) ir.Node {
return typed(typ, ir.NewNilExpr(pos))
}
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
index 701e9001c8..48f0e48028 100644
--- a/src/cmd/compile/internal/noder/import.go
+++ b/src/cmd/compile/internal/noder/import.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"internal/buildcfg"
- "io"
"os"
pathpkg "path"
"runtime"
@@ -32,8 +31,24 @@ import (
"cmd/internal/src"
)
-// Temporary import helper to get type2-based type-checking going.
+// haveLegacyImports records whether we've imported any packages
+// without a new export data section. This is useful for experimenting
+// with new export data format designs, when you need to support
+// existing tests that manually compile files with inconsistent
+// compiler flags.
+var haveLegacyImports = false
+
+// newReadImportFunc is an extension hook for experimenting with new
+// export data formats. If a new export data payload was written out
+// for an imported package by overloading writeNewExportFunc, then
+// that payload will be mapped into memory and passed to
+// newReadImportFunc.
+var newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ panic("unexpected new export data payload")
+}
+
type gcimports struct {
+ check *types2.Checker
packages map[string]*types2.Package
}
@@ -46,13 +61,8 @@ func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*ty
panic("mode must be 0")
}
- path, err := resolveImportPath(path)
- if err != nil {
- return nil, err
- }
-
- lookup := func(path string) (io.ReadCloser, error) { return openPackage(path) }
- return importer.Import(m.packages, path, srcDir, lookup)
+ _, pkg, err := readImportFile(path, typecheck.Target, m.check, m.packages)
+ return pkg, err
}
func isDriveLetter(b byte) bool {
@@ -175,160 +185,242 @@ func resolveImportPath(path string) (string, error) {
return path, nil
}
-// TODO(mdempsky): Return an error instead.
func importfile(decl *syntax.ImportDecl) *types.Pkg {
- if decl.Path.Kind != syntax.StringLit {
- base.Errorf("import path must be a string")
+ path, err := parseImportPath(decl.Path)
+ if err != nil {
+ base.Errorf("%s", err)
return nil
}
- path, err := strconv.Unquote(decl.Path.Value)
+ pkg, _, err := readImportFile(path, typecheck.Target, nil, nil)
if err != nil {
- base.Errorf("import path must be a string")
+ base.Errorf("%s", err)
return nil
}
+ if pkg != ir.Pkgs.Unsafe && pkg.Height >= myheight {
+ myheight = pkg.Height + 1
+ }
+ return pkg
+}
+
+func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
+ if pathLit.Kind != syntax.StringLit {
+ return "", errors.New("import path must be a string")
+ }
+
+ path, err := strconv.Unquote(pathLit.Value)
+ if err != nil {
+ return "", errors.New("import path must be a string")
+ }
+
if err := checkImportPath(path, false); err != nil {
- base.Errorf("%s", err.Error())
- return nil
+ return "", err
}
+ return path, err
+}
+
+// readImportFile reads the import file for the given package path and
+// returns its types.Pkg representation. If packages is non-nil, the
+// types2.Package representation is also returned.
+func readImportFile(path string, target *ir.Package, check *types2.Checker, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
path, err = resolveImportPath(path)
if err != nil {
- base.Errorf("%s", err)
- return nil
+ return
+ }
+
+ if path == "unsafe" {
+ pkg1, pkg2 = ir.Pkgs.Unsafe, types2.Unsafe
+
+ // TODO(mdempsky): Investigate if this actually matters. Why would
+ // the linker or runtime care whether a package imported unsafe?
+ if !pkg1.Direct {
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+ }
+
+ return
}
- importpkg := types.NewPkg(path, "")
- if importpkg.Direct {
- return importpkg // already fully loaded
+ pkg1 = types.NewPkg(path, "")
+ if packages != nil {
+ pkg2 = packages[path]
+ assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete()))
}
- importpkg.Direct = true
- typecheck.Target.Imports = append(typecheck.Target.Imports, importpkg)
- if path == "unsafe" {
- return importpkg // initialized with universe
+ if pkg1.Direct {
+ return
}
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
f, err := openPackage(path)
if err != nil {
- base.Errorf("could not import %q: %v", path, err)
- base.ErrorExit()
+ return
}
- imp := bio.NewReader(f)
- defer imp.Close()
- file := f.Name()
+ defer f.Close()
- // check object header
- p, err := imp.ReadString('\n')
+ r, end, newsize, err := findExportData(f)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "!<arch>\n" { // package archive
- // package export block should be first
- sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
- if sz <= 0 {
- base.Errorf("import %s: not a package file", file)
- base.ErrorExit()
- }
- p, err = imp.ReadString('\n')
+ if base.Debug.Export != 0 {
+ fmt.Printf("importing %s (%s)\n", path, f.Name())
+ }
+
+ if newsize != 0 {
+ // We have unified IR data. Map it, and feed to the importers.
+ end -= newsize
+ var data string
+ data, err = base.MapFile(r.File(), end, newsize)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- }
- if !strings.HasPrefix(p, "go object ") {
- base.Errorf("import %s: not a go object file: %s", file, p)
- base.ErrorExit()
- }
- q := objabi.HeaderString()
- if p != q {
- base.Errorf("import %s: object is [%s] expected [%s]", file, p, q)
- base.ErrorExit()
- }
+ pkg2, err = newReadImportFunc(data, pkg1, check, packages)
+ } else {
+ // We only have old data. Oh well, fall back to the legacy importers.
+ haveLegacyImports = true
- // process header lines
- for {
- p, err = imp.ReadString('\n')
+ var c byte
+ switch c, err = r.ReadByte(); {
+ case err != nil:
+ return
+
+ case c != 'i':
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ err = fmt.Errorf("unexpected package format byte: %v", c)
+ return
+ }
+
+ pos := r.Offset()
+
+ // Map string (and data) section into memory as a single large
+ // string. This reduces heap fragmentation and allows
+ // returning individual substrings very efficiently.
+ var data string
+ data, err = base.MapFile(r.File(), pos, end-pos)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "\n" {
- break // header ends with blank line
+
+ typecheck.ReadImports(pkg1, data)
+
+ if packages != nil {
+ pkg2, err = importer.ImportData(packages, data, path)
+ if err != nil {
+ return
+ }
}
}
- // Expect $$B\n to signal binary import format.
+ err = addFingerprint(path, f, end)
+ return
+}
+
+// findExportData returns a *bio.Reader positioned at the start of the
+// binary export data section, and a file offset for where to stop
+// reading.
+func findExportData(f *os.File) (r *bio.Reader, end, newsize int64, err error) {
+ r = bio.NewReader(f)
+
+ // check object header
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return
+ }
- // look for $$
- var c byte
- for {
- c, err = imp.ReadByte()
+ if line == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF"))
+ if sz <= 0 {
+ err = errors.New("not a package file")
+ return
+ }
+ end = r.Offset() + sz
+ line, err = r.ReadString('\n')
if err != nil {
- break
+ return
}
- if c == '$' {
- c, err = imp.ReadByte()
- if c == '$' || err != nil {
- break
- }
+ } else {
+ // Not an archive; provide end of file instead.
+ // TODO(mdempsky): I don't think this happens anymore.
+ var fi os.FileInfo
+ fi, err = f.Stat()
+ if err != nil {
+ return
}
+ end = fi.Size()
}
- // get character after $$
- if err == nil {
- c, _ = imp.ReadByte()
+ if !strings.HasPrefix(line, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", line)
+ return
+ }
+ if expect := objabi.HeaderString(); line != expect {
+ err = fmt.Errorf("object is [%s] expected [%s]", line, expect)
+ return
}
- var fingerprint goobj.FingerprintType
- switch c {
- case '\n':
- base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path)
- return nil
-
- case 'B':
- if base.Debug.Export != 0 {
- fmt.Printf("importing %s (%s)\n", path, file)
+ // process header lines
+ for !strings.HasPrefix(line, "$$") {
+ if strings.HasPrefix(line, "newexportsize ") {
+ fields := strings.Fields(line)
+ newsize, err = strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ return
+ }
}
- imp.ReadByte() // skip \n after $$B
- c, err = imp.ReadByte()
+ line, err = r.ReadString('\n')
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
+ }
- // Indexed format is distinguished by an 'i' byte,
- // whereas previous export formats started with 'c', 'd', or 'v'.
- if c != 'i' {
- base.Errorf("import %s: unexpected package format byte: %v", file, c)
- base.ErrorExit()
- }
- fingerprint = typecheck.ReadImports(importpkg, imp)
+ // Expect $$B\n to signal binary import format.
+ if line != "$$B\n" {
+ err = errors.New("old export format no longer supported (recompile library)")
+ return
+ }
+
+ return
+}
+
+// addFingerprint reads the linker fingerprint included at the end of
+// the exportdata.
+func addFingerprint(path string, f *os.File, end int64) error {
+ const eom = "\n$$\n"
+ var fingerprint goobj.FingerprintType
+
+ var buf [len(fingerprint) + len(eom)]byte
+ if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil {
+ return err
+ }
- default:
- base.Errorf("no import in %q", path)
- base.ErrorExit()
+ // Caller should have given us the end position of the export data,
+ // which should end with the "\n$$\n" marker. As a consistency check
+ // to make sure we're reading at the right offset, make sure we
+ // found the marker.
+ if s := string(buf[len(fingerprint):]); s != eom {
+ return fmt.Errorf("expected $$ marker, but found %q", s)
}
+ copy(fingerprint[:], buf[:])
+
// assume files move (get installed) so don't record the full path
if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
base.Ctxt.AddImport(path, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+ file := f.Name()
base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint)
}
-
- if importpkg.Height >= myheight {
- myheight = importpkg.Height + 1
- }
-
- return importpkg
+ return nil
}
// The linker uses the magic symbol prefixes "go." and "type."
@@ -431,7 +523,7 @@ func clearImports() {
s.Def = nil
continue
}
- if types.IsDotAlias(s) {
+ if s.Def != nil && s.Def.Sym() != s {
// throw away top-level name left over
// from previous import . "x"
// We'll report errors after type checking in CheckDotImports.
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index 3e0d3285ab..aac8b5e641 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -18,9 +18,9 @@ import (
"cmd/internal/src"
)
-// check2 type checks a Go package using types2, and then generates IR
-// using the results.
-func check2(noders []*noder) {
+// checkFiles configures and runs the types2 checker on the given
+// parsed source files and then returns the result.
+func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
if base.SyntaxErrors() != 0 {
base.ErrorExit()
}
@@ -34,20 +34,22 @@ func check2(noders []*noder) {
}
// typechecking
+ importer := gcimports{
+ packages: make(map[string]*types2.Package),
+ }
conf := types2.Config{
GoVersion: base.Flag.Lang,
IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode
CompilerErrorMessages: true, // use error strings matching existing compiler errors
+ AllowTypeLists: true, // remove this line once all tests use type set syntax
Error: func(err error) {
terr := err.(types2.Error)
base.ErrorfAt(m.makeXPos(terr.Pos), "%s", terr.Msg)
},
- Importer: &gcimports{
- packages: make(map[string]*types2.Package),
- },
- Sizes: &gcSizes{},
+ Importer: &importer,
+ Sizes: &gcSizes{},
}
- info := types2.Info{
+ info := &types2.Info{
Types: make(map[syntax.Expr]types2.TypeAndValue),
Defs: make(map[*syntax.Name]types2.Object),
Uses: make(map[*syntax.Name]types2.Object),
@@ -57,12 +59,24 @@ func check2(noders []*noder) {
Inferred: make(map[syntax.Expr]types2.Inferred),
// expand as needed
}
- pkg, err := conf.Check(base.Ctxt.Pkgpath, files, &info)
- files = nil
+
+ pkg := types2.NewPackage(base.Ctxt.Pkgpath, "")
+ importer.check = types2.NewChecker(&conf, pkg, info)
+ err := importer.check.Files(files)
+
base.ExitIfErrors()
if err != nil {
base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
}
+
+ return m, pkg, info
+}
+
+// check2 type checks a Go package using types2, and then generates IR
+// using the results.
+func check2(noders []*noder) {
+ m, pkg, info := checkFiles(noders)
+
if base.Flag.G < 2 {
os.Exit(0)
}
@@ -70,7 +84,7 @@ func check2(noders []*noder) {
g := irgen{
target: typecheck.Target,
self: pkg,
- info: &info,
+ info: info,
posMap: m,
objs: make(map[types2.Object]*ir.Name),
typs: make(map[types2.Type]*types.Type),
@@ -94,10 +108,13 @@ type irgen struct {
// Fully-instantiated generic types whose methods should be instantiated
instTypeList []*types.Type
+
+ dnum int // for generating unique dictionary variables
}
func (g *irgen) generate(noders []*noder) {
types.LocalPkg.Name = g.self.Name()
+ types.LocalPkg.Height = g.self.Height()
typecheck.TypecheckAllowed = true
// Prevent size calculations until we set the underlying type
@@ -132,7 +149,6 @@ Outer:
}
}
}
- types.LocalPkg.Height = myheight
// 2. Process all package-block type declarations. As with imports,
// we need to make sure all types are properly instantiated before
@@ -184,9 +200,9 @@ Outer:
// Create any needed stencils of generic functions
g.stencil()
- // For now, remove all generic functions from g.target.Decl, since they
- // have been used for stenciling, but don't compile. TODO: We will
- // eventually export any exportable generic functions.
+ // Remove all generic functions from g.target.Decl, since they have been
+ // used for stenciling, but don't compile. Generic functions will already
+ // have been marked for export as appropriate.
j := 0
for i, decl := range g.target.Decls {
if decl.Op() != ir.ODCLFUNC || !decl.Type().HasTParam() {
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
index 5fcad096c2..d417edcbd5 100644
--- a/src/cmd/compile/internal/noder/noder.go
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -5,6 +5,7 @@
package noder
import (
+ "errors"
"fmt"
"go/constant"
"go/token"
@@ -109,25 +110,35 @@ func LoadPackage(filenames []string) {
// We also defer type alias declarations until phase 2
// to avoid cycles like #18640.
// TODO(gri) Remove this again once we have a fix for #25838.
-
- // Don't use range--typecheck can add closures to Target.Decls.
- base.Timer.Start("fe", "typecheck", "top1")
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) {
- typecheck.Target.Decls[i] = typecheck.Stmt(n)
- }
- }
-
+ //
// Phase 2: Variable assignments.
// To check interface assignments, depends on phase 1.
// Don't use range--typecheck can add closures to Target.Decls.
- base.Timer.Start("fe", "typecheck", "top2")
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() {
- typecheck.Target.Decls[i] = typecheck.Stmt(n)
+ for phase, name := range []string{"top1", "top2"} {
+ base.Timer.Start("fe", "typecheck", name)
+ for i := 0; i < len(typecheck.Target.Decls); i++ {
+ n := typecheck.Target.Decls[i]
+ op := n.Op()
+
+ // Closure function declarations are typechecked as part of the
+ // closure expression.
+ if fn, ok := n.(*ir.Func); ok && fn.OClosure != nil {
+ continue
+ }
+
+ // We don't actually add ir.ODCL nodes to Target.Decls. Make sure of that.
+ if op == ir.ODCL {
+ base.FatalfAt(n.Pos(), "unexpected top declaration: %v", op)
+ }
+
+ // Identify declarations that should be deferred to the second
+ // iteration.
+ late := op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias()
+
+ if late == (phase == 1) {
+ typecheck.Target.Decls[i] = typecheck.Stmt(n)
+ }
}
}
@@ -136,16 +147,15 @@ func LoadPackage(filenames []string) {
base.Timer.Start("fe", "typecheck", "func")
var fcount int64
for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if n.Op() == ir.ODCLFUNC {
+ if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nbefore typecheck %v", n)
- ir.Dump(s, n)
+ s := fmt.Sprintf("\nbefore typecheck %v", fn)
+ ir.Dump(s, fn)
}
- typecheck.FuncBody(n.(*ir.Func))
+ typecheck.FuncBody(fn)
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nafter typecheck %v", n)
- ir.Dump(s, n)
+ s := fmt.Sprintf("\nafter typecheck %v", fn)
+ ir.Dump(s, fn)
}
fcount++
}
@@ -449,7 +459,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
type constState struct {
group *syntax.Group
typ ir.Ntype
- values []ir.Node
+ values syntax.Expr
iota int64
}
@@ -467,16 +477,15 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
names := p.declNames(ir.OLITERAL, decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var values []ir.Node
if decl.Values != nil {
- values = p.exprList(decl.Values)
- cs.typ, cs.values = typ, values
+ cs.typ, cs.values = typ, decl.Values
} else {
if typ != nil {
base.Errorf("const declaration cannot have type without expression")
}
- typ, values = cs.typ, cs.values
+ typ = cs.typ
}
+ values := p.exprList(cs.values)
nn := make([]ir.Node, 0, len(names))
for i, n := range names {
@@ -484,10 +493,16 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
base.Errorf("missing value in const declaration")
break
}
+
v := values[i]
if decl.Values == nil {
- v = ir.DeepCopy(n.Pos(), v)
+ ir.Visit(v, func(v ir.Node) {
+ if ir.HasUniquePos(v) {
+ v.SetPos(n.Pos())
+ }
+ })
}
+
typecheck.Declare(n, typecheck.DeclContext)
n.Ntype = typ
@@ -625,6 +640,9 @@ func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field {
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
+ if i > 0 && params[i].Type == params[i-1].Type {
+ nodes[i].Ntype = nodes[i-1].Ntype
+ }
}
return nodes
}
@@ -914,6 +932,9 @@ func (p *noder) structType(expr *syntax.StructType) ir.Node {
} else {
n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
}
+ if i > 0 && expr.FieldList[i].Type == expr.FieldList[i-1].Type {
+ n.Ntype = l[i-1].Ntype
+ }
if i < len(expr.TagList) && expr.TagList[i] != nil {
n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
}
@@ -977,6 +998,8 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym {
}
func (p *noder) embedded(typ syntax.Expr) *ir.Field {
+ pos := p.pos(syntax.StartPos(typ))
+
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
@@ -986,11 +1009,11 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Field {
}
sym := p.packname(typ)
- n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
+ n := ir.NewField(pos, typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n.Embedded = true
if isStar {
- n.Ntype = ir.NewStarExpr(p.pos(op), n.Ntype)
+ n.Ntype = ir.NewStarExpr(pos, n.Ntype)
}
return n
}
@@ -1780,24 +1803,14 @@ func fakeRecv() *ir.Field {
}
func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
- xtype := p.typeExpr(expr.Type)
-
- fn := ir.NewFunc(p.pos(expr))
- fn.SetIsHiddenClosure(ir.CurFunc != nil)
-
- fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure
- fn.Nname.Func = fn
- fn.Nname.Ntype = xtype
- fn.Nname.Defn = fn
-
- clo := ir.NewClosureExpr(p.pos(expr), fn)
- fn.OClosure = clo
+ fn := ir.NewClosureFunc(p.pos(expr), ir.CurFunc != nil)
+ fn.Nname.Ntype = p.typeExpr(expr.Type)
p.funcBody(fn, expr.Body)
ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn)
- return clo
+ return fn.OClosure
}
// A function named init is a special case.
@@ -1841,33 +1854,14 @@ func oldname(s *types.Sym) ir.Node {
}
func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
- if pragma.Embeds == nil {
- return
- }
-
pragmaEmbeds := pragma.Embeds
pragma.Embeds = nil
- pos := makeXPos(pragmaEmbeds[0].Pos)
-
- if !haveEmbed {
- base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"")
- return
- }
- if len(decl.NameList) > 1 {
- base.ErrorfAt(pos, "go:embed cannot apply to multiple vars")
- return
- }
- if decl.Values != nil {
- base.ErrorfAt(pos, "go:embed cannot apply to var with initializer")
- return
- }
- if decl.Type == nil {
- // Should not happen, since Values == nil now.
- base.ErrorfAt(pos, "go:embed cannot apply to var without type")
+ if len(pragmaEmbeds) == 0 {
return
}
- if typecheck.DeclContext != ir.PEXTERN {
- base.ErrorfAt(pos, "go:embed cannot apply to var inside func")
+
+ if err := checkEmbed(decl, haveEmbed, typecheck.DeclContext != ir.PEXTERN); err != nil {
+ base.ErrorfAt(makeXPos(pragmaEmbeds[0].Pos), "%s", err)
return
}
@@ -1878,3 +1872,24 @@ func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.Va
typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
name.Embed = &embeds
}
+
+func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error {
+ switch {
+ case !haveEmbed:
+ return errors.New("go:embed only allowed in Go files that import \"embed\"")
+ case len(decl.NameList) > 1:
+ return errors.New("go:embed cannot apply to multiple vars")
+ case decl.Values != nil:
+ return errors.New("go:embed cannot apply to var with initializer")
+ case decl.Type == nil:
+ // Should not happen, since Values == nil now.
+ return errors.New("go:embed cannot apply to var without type")
+ case withinFunc:
+ return errors.New("go:embed cannot apply to var inside func")
+ case !types.AllowsGoVersion(types.LocalPkg, 1, 16):
+ return fmt.Errorf("go:embed requires go1.16 or later (-lang was set to %s; check go.mod)", base.Flag.Lang)
+
+ default:
+ return nil
+ }
+}
diff --git a/src/cmd/compile/internal/noder/object.go b/src/cmd/compile/internal/noder/object.go
index 82cce1ace0..581a3652ec 100644
--- a/src/cmd/compile/internal/noder/object.go
+++ b/src/cmd/compile/internal/noder/object.go
@@ -49,6 +49,11 @@ func (g *irgen) obj(obj types2.Object) *ir.Name {
// For imported objects, we use iimport directly instead of mapping
// the types2 representation.
if obj.Pkg() != g.self {
+ if sig, ok := obj.Type().(*types2.Signature); ok && sig.Recv() != nil {
+ // We can't import a method by name - must import the type
+ // and access the method from it.
+ base.FatalfAt(g.pos(obj), "tried to import a method directly")
+ }
sym := g.sym(obj)
if sym.Def != nil {
return sym.Def.(*ir.Name)
@@ -101,25 +106,28 @@ func (g *irgen) obj(obj types2.Object) *ir.Name {
case *types2.TypeName:
if obj.IsAlias() {
name = g.objCommon(pos, ir.OTYPE, g.sym(obj), class, g.typ(obj.Type()))
+ name.SetAlias(true)
} else {
name = ir.NewDeclNameAt(pos, ir.OTYPE, g.sym(obj))
g.objFinish(name, class, types.NewNamed(name))
}
case *types2.Var:
- var sym *types.Sym
- if class == ir.PPARAMOUT {
+ sym := g.sym(obj)
+ if class == ir.PPARAMOUT && (sym == nil || sym.IsBlank()) {
// Backend needs names for result parameters,
// even if they're anonymous or blank.
- switch obj.Name() {
- case "":
- sym = typecheck.LookupNum("~r", len(ir.CurFunc.Dcl)) // 'r' for "result"
- case "_":
- sym = typecheck.LookupNum("~b", len(ir.CurFunc.Dcl)) // 'b' for "blank"
+ nresults := 0
+ for _, n := range ir.CurFunc.Dcl {
+ if n.Class == ir.PPARAMOUT {
+ nresults++
+ }
+ }
+ if sym == nil {
+ sym = typecheck.LookupNum("~r", nresults) // 'r' for "result"
+ } else {
+ sym = typecheck.LookupNum("~b", nresults) // 'b' for "blank"
}
- }
- if sym == nil {
- sym = g.sym(obj)
}
name = g.objCommon(pos, ir.ONAME, sym, class, g.typ(obj.Type()))
@@ -164,9 +172,8 @@ func (g *irgen) objFinish(name *ir.Name, class ir.Class, typ *types.Type) {
break // methods are exported with their receiver type
}
if types.IsExported(sym.Name) {
- if name.Class == ir.PFUNC && name.Type().NumTParams() > 0 {
- base.FatalfAt(name.Pos(), "Cannot export a generic function (yet): %v", name)
- }
+ // Generic functions can be marked for export here, even
+ // though they will not be compiled until instantiated.
typecheck.Export(name)
}
if base.Flag.AsmHdr != "" && !name.Sym().Asm() {
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index 3ebc8dff6d..8b53671dbe 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -8,18 +8,16 @@
package noder
import (
- "bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
- "strings"
+ "go/constant"
)
-// For catching problems as we add more features
-// TODO(danscales): remove assertions or replace with base.FatalfAt()
func assert(p bool) {
if !p {
panic("assertion failed")
@@ -72,56 +70,95 @@ func (g *irgen) stencil() {
// instantiated function if it hasn't been created yet, and change
// to calling that function directly.
modified := false
- foundFuncInst := false
+ closureRequired := false
ir.Visit(decl, func(n ir.Node) {
if n.Op() == ir.OFUNCINST {
- // We found a function instantiation that is not
- // immediately called.
- foundFuncInst = true
+ // generic F, not immediately called
+ closureRequired = true
}
- if n.Op() != ir.OCALL || n.(*ir.CallExpr).X.Op() != ir.OFUNCINST {
- return
+ if n.Op() == ir.OMETHEXPR && len(n.(*ir.SelectorExpr).X.Type().RParams()) > 0 {
+ // T.M, T a type which is generic, not immediately called
+ closureRequired = true
}
- // We have found a function call using a generic function
- // instantiation.
- call := n.(*ir.CallExpr)
- inst := call.X.(*ir.InstExpr)
- st := g.getInstantiationForNode(inst)
- // Replace the OFUNCINST with a direct reference to the
- // new stenciled function
- call.X = st.Nname
- if inst.X.Op() == ir.OCALLPART {
- // When we create an instantiation of a method
- // call, we make it a function. So, move the
- // receiver to be the first arg of the function
- // call.
- withRecv := make([]ir.Node, len(call.Args)+1)
- dot := inst.X.(*ir.SelectorExpr)
- withRecv[0] = dot.X
- copy(withRecv[1:], call.Args)
- call.Args = withRecv
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OFUNCINST {
+ // We have found a function call using a generic function
+ // instantiation.
+ call := n.(*ir.CallExpr)
+ inst := call.X.(*ir.InstExpr)
+ st := g.getInstantiationForNode(inst)
+ // Replace the OFUNCINST with a direct reference to the
+ // new stenciled function
+ call.X = st.Nname
+ if inst.X.Op() == ir.OCALLPART {
+ // When we create an instantiation of a method
+ // call, we make it a function. So, move the
+ // receiver to be the first arg of the function
+ // call.
+ call.Args.Prepend(inst.X.(*ir.SelectorExpr).X)
+ }
+ // Add dictionary to argument list.
+ dict := reflectdata.GetDictionaryForInstantiation(inst)
+ call.Args.Prepend(dict)
+ // Transform the Call now, which changes OCALL
+ // to OCALLFUNC and does typecheckaste/assignconvfn.
+ transformCall(call)
+ modified = true
+ }
+ if n.Op() == ir.OCALLMETH && n.(*ir.CallExpr).X.Op() == ir.ODOTMETH && len(deref(n.(*ir.CallExpr).X.Type().Recv().Type).RParams()) > 0 {
+ // Method call on a generic type, which was instantiated by stenciling.
+ // Method calls on explicitly instantiated types will have an OFUNCINST
+ // and are handled above.
+ call := n.(*ir.CallExpr)
+ meth := call.X.(*ir.SelectorExpr)
+ targs := deref(meth.Type().Recv().Type).RParams()
+
+ t := meth.X.Type()
+ baseSym := deref(t).OrigSym
+ baseType := baseSym.Def.(*ir.Name).Type()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if meth.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+
+ st := g.getInstantiation(gf, targs, true)
+ call.SetOp(ir.OCALL)
+ call.X = st.Nname
+ dict := reflectdata.GetDictionaryForMethod(gf, targs)
+ call.Args.Prepend(dict, meth.X)
+ // Transform the Call now, which changes OCALL
+ // to OCALLFUNC and does typecheckaste/assignconvfn.
+ transformCall(call)
+ modified = true
}
- // Transform the Call now, which changes OCALL
- // to OCALLFUNC and does typecheckaste/assignconvfn.
- transformCall(call)
- modified = true
})
- // If we found an OFUNCINST without a corresponding call in the
- // above decl, then traverse the nodes of decl again (with
+ // If we found a reference to a generic instantiation that wasn't an
+ // immediate call, then traverse the nodes of decl again (with
// EditChildren rather than Visit), where we actually change the
- // OFUNCINST node to an ONAME for the instantiated function.
+ // reference to the instantiation to a closure that captures the
+ // dictionary, then does a direct call.
// EditChildren is more expensive than Visit, so we only do this
- // in the infrequent case of an OFUNCINSt without a corresponding
+ // in the infrequent case of an OFUNCINST without a corresponding
// call.
- if foundFuncInst {
+ if closureRequired {
var edit func(ir.Node) ir.Node
+ var outer *ir.Func
+ if f, ok := decl.(*ir.Func); ok {
+ outer = f
+ }
edit = func(x ir.Node) ir.Node {
- if x.Op() == ir.OFUNCINST {
- st := g.getInstantiationForNode(x.(*ir.InstExpr))
- return st.Nname
- }
ir.EditChildren(x, edit)
+ switch {
+ case x.Op() == ir.OFUNCINST:
+ // TODO: only set outer!=nil if this instantiation uses
+ // a type parameter from outer. See comment in buildClosure.
+ return g.buildClosure(outer, x)
+ case x.Op() == ir.OMETHEXPR && len(deref(x.(*ir.SelectorExpr).X.Type()).RParams()) > 0: // TODO: test for ptr-to-method case
+ return g.buildClosure(outer, x)
+ }
return x
}
edit(decl)
@@ -139,56 +176,305 @@ func (g *irgen) stencil() {
}
+// buildClosure makes a closure to implement x, a OFUNCINST or OMETHEXPR
+// of generic type. outer is the containing function (or nil if closure is
+// in a global assignment instead of a function).
+func (g *irgen) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
+ pos := x.Pos()
+ var target *ir.Func // target instantiated function/method
+ var dictValue ir.Node // dictionary to use
+ var rcvrValue ir.Node // receiver, if a method value
+ typ := x.Type() // type of the closure
+ if x.Op() == ir.OFUNCINST {
+ inst := x.(*ir.InstExpr)
+
+ // Type arguments we're instantiating with.
+ targs := typecheck.TypesOf(inst.Targs)
+
+ // Find the generic function/method.
+ var gf *ir.Name
+ if inst.X.Op() == ir.ONAME {
+ // Instantiating a generic function call.
+ gf = inst.X.(*ir.Name)
+ } else if inst.X.Op() == ir.OCALLPART {
+ // Instantiating a method value x.M.
+ se := inst.X.(*ir.SelectorExpr)
+ rcvrValue = se.X
+ gf = se.Selection.Nname.(*ir.Name)
+ } else {
+ panic("unhandled")
+ }
+
+ // target is the instantiated function we're trying to call.
+ // For functions, the target expects a dictionary as its first argument.
+ // For method values, the target expects a dictionary and the receiver
+ // as its first two arguments.
+ target = g.getInstantiation(gf, targs, rcvrValue != nil)
+
+ // The value to use for the dictionary argument.
+ if rcvrValue == nil {
+ dictValue = reflectdata.GetDictionaryForFunc(gf, targs)
+ } else {
+ dictValue = reflectdata.GetDictionaryForMethod(gf, targs)
+ }
+ } else { // ir.OMETHEXPR
+ // Method expression T.M where T is a generic type.
+ // TODO: Is (*T).M right?
+ se := x.(*ir.SelectorExpr)
+ targs := se.X.Type().RParams()
+ if len(targs) == 0 {
+ if se.X.Type().IsPtr() {
+ targs = se.X.Type().Elem().RParams()
+ if len(targs) == 0 {
+ panic("bad")
+ }
+ }
+ }
+ t := se.X.Type()
+ baseSym := t.OrigSym
+ baseType := baseSym.Def.(*ir.Name).Type()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if se.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+ target = g.getInstantiation(gf, targs, true)
+ dictValue = reflectdata.GetDictionaryForMethod(gf, targs)
+ }
+
+ // Build a closure to implement a function instantiation.
+ //
+ // func f[T any] (int, int) (int, int) { ...whatever... }
+ //
+ // Then any reference to f[int] not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.f[int](.dictN, a0, a1)
+ // }
+ //
+ // Similarly for method expressions,
+ //
+ // type g[T any] ....
+ // func (rcvr g[T]) f(a0, a1 int) (r0, r1 int) { ... }
+ //
+ // Any reference to g[int].f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(rcvr g[int], a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, rcvr, a0, a1)
+ // }
+ //
+ // Also method values
+ //
+ // var x g[int]
+ //
+ // Any reference to x.f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // x2 := x
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, x2, a0, a1)
+ // }
+
+ // Make a new internal function.
+ fn := ir.NewClosureFunc(pos, outer != nil)
+ ir.NameClosure(fn.OClosure, outer)
+
+ // This is the dictionary we want to use.
+ // It may be a constant, or it may be a dictionary acquired from the outer function's dictionary.
+ // For the latter, dictVar is a variable in the outer function's scope, set to the subdictionary
+ // read from the outer function's dictionary.
+ var dictVar *ir.Name
+ var dictAssign *ir.AssignStmt
+ if outer != nil {
+ // Note: for now this is a compile-time constant, so we don't really need a closure
+ // to capture it (a wrapper function would work just as well). But eventually it
+ // will be a read of a subdictionary from the parent dictionary.
+ dictVar = ir.NewNameAt(pos, typecheck.LookupNum(".dict", g.dnum))
+ g.dnum++
+ dictVar.Class = ir.PAUTO
+ typed(types.Types[types.TUINTPTR], dictVar)
+ dictVar.Curfn = outer
+ dictAssign = ir.NewAssignStmt(pos, dictVar, dictValue)
+ dictAssign.SetTypecheck(1)
+ dictVar.Defn = dictAssign
+ outer.Dcl = append(outer.Dcl, dictVar)
+ }
+ // assign the receiver to a temporary.
+ var rcvrVar *ir.Name
+ var rcvrAssign ir.Node
+ if rcvrValue != nil {
+ rcvrVar = ir.NewNameAt(pos, typecheck.LookupNum(".rcvr", g.dnum))
+ g.dnum++
+ rcvrVar.Class = ir.PAUTO
+ typed(rcvrValue.Type(), rcvrVar)
+ rcvrVar.Curfn = outer
+ rcvrAssign = ir.NewAssignStmt(pos, rcvrVar, rcvrValue)
+ rcvrAssign.SetTypecheck(1)
+ rcvrVar.Defn = rcvrAssign
+ outer.Dcl = append(outer.Dcl, rcvrVar)
+ }
+
+ // Build formal argument and return lists.
+ var formalParams []*types.Field // arguments of closure
+ var formalResults []*types.Field // returns of closure
+ for i := 0; i < typ.NumParams(); i++ {
+ t := typ.Params().Field(i).Type
+ arg := ir.NewNameAt(pos, typecheck.LookupNum("a", i))
+ arg.Class = ir.PPARAM
+ typed(t, arg)
+ arg.Curfn = fn
+ fn.Dcl = append(fn.Dcl, arg)
+ f := types.NewField(pos, arg.Sym(), t)
+ f.Nname = arg
+ formalParams = append(formalParams, f)
+ }
+ for i := 0; i < typ.NumResults(); i++ {
+ t := typ.Results().Field(i).Type
+ result := ir.NewNameAt(pos, typecheck.LookupNum("r", i)) // TODO: names not needed?
+ result.Class = ir.PPARAMOUT
+ typed(t, result)
+ result.Curfn = fn
+ fn.Dcl = append(fn.Dcl, result)
+ f := types.NewField(pos, result.Sym(), t)
+ f.Nname = result
+ formalResults = append(formalResults, f)
+ }
+
+ // Build an internal function with the right signature.
+ closureType := types.NewSignature(x.Type().Pkg(), nil, nil, formalParams, formalResults)
+ typed(closureType, fn.Nname)
+ typed(x.Type(), fn.OClosure)
+ fn.SetTypecheck(1)
+
+ // Build body of closure. This involves just calling the wrapped function directly
+ // with the additional dictionary argument.
+
+ // First, figure out the dictionary argument.
+ var dict2Var ir.Node
+ if outer != nil {
+ // If there's an outer function, the dictionary value will be read from
+ // the dictionary of the outer function.
+ // TODO: only use a subdictionary if any of the instantiating types
+ // depend on the type params of the outer function.
+ dict2Var = ir.CaptureName(pos, fn, dictVar)
+ } else {
+ // No outer function, instantiating types are known concrete types.
+ dict2Var = dictValue
+ }
+ // Also capture the receiver variable.
+ var rcvr2Var *ir.Name
+ if rcvrValue != nil {
+ rcvr2Var = ir.CaptureName(pos, fn, rcvrVar)
+ }
+
+ // Build arguments to call inside the closure.
+ var args []ir.Node
+
+ // First the dictionary argument.
+ args = append(args, dict2Var)
+ // Then the receiver.
+ if rcvrValue != nil {
+ args = append(args, rcvr2Var)
+ }
+ // Then all the other arguments (including receiver for method expressions).
+ for i := 0; i < typ.NumParams(); i++ {
+ args = append(args, formalParams[i].Nname.(*ir.Name))
+ }
+
+ // Build call itself.
+ var innerCall ir.Node = ir.NewCallExpr(pos, ir.OCALL, target.Nname, args)
+ if len(formalResults) > 0 {
+ innerCall = ir.NewReturnStmt(pos, []ir.Node{innerCall})
+ }
+ // Finish building body of closure.
+ ir.CurFunc = fn
+ // TODO: set types directly here instead of using typecheck.Stmt
+ typecheck.Stmt(innerCall)
+ ir.CurFunc = nil
+ fn.Body = []ir.Node{innerCall}
+
+ // We're all done with the captured dictionary (and receiver, for method values).
+ ir.FinishCaptureNames(pos, outer, fn)
+
+ // Make a closure referencing our new internal function.
+ c := ir.UseClosure(fn.OClosure, g.target)
+ var init []ir.Node
+ if outer != nil {
+ init = append(init, dictAssign)
+ }
+ if rcvrValue != nil {
+ init = append(init, rcvrAssign)
+ }
+ return ir.InitExpr(init, c)
+}
+
// instantiateMethods instantiates all the methods of all fully-instantiated
// generic types that have been added to g.instTypeList.
func (g *irgen) instantiateMethods() {
for i := 0; i < len(g.instTypeList); i++ {
typ := g.instTypeList[i]
- // Get the base generic type by looking up the symbol of the
- // generic (uninstantiated) name.
- baseSym := typ.Sym().Pkg.Lookup(genericTypeName(typ.Sym()))
+ // Mark runtime type as needed, since this ensures that the
+ // compiler puts out the needed DWARF symbols, when this
+ // instantiated type has a different package from the local
+ // package.
+ typecheck.NeedRuntimeType(typ)
+ // Lookup the method on the base generic type, since methods may
+ // not be set on imported instantiated types.
+ baseSym := typ.OrigSym
baseType := baseSym.Def.(*ir.Name).Type()
- for j, m := range typ.Methods().Slice() {
- name := m.Nname.(*ir.Name)
- targs := make([]ir.Node, len(typ.RParams()))
- for k, targ := range typ.RParams() {
- targs[k] = ir.TypeNode(targ)
- }
+ for j, _ := range typ.Methods().Slice() {
baseNname := baseType.Methods().Slice()[j].Nname.(*ir.Name)
- name.Func = g.getInstantiation(baseNname, targs, true)
+ // Eagerly generate the instantiations that implement these methods.
+ // We don't use the instantiations here, just generate them (and any
+ // further instantiations those generate, etc.).
+ // Note that we don't set the Func for any methods on instantiated
+ // types. Their signatures don't match so that would be confusing.
+ // Direct method calls go directly to the instantiations, implemented above.
+ // Indirect method calls use wrappers generated in reflectcall. Those wrappers
+ // will use these instantiations if they are needed (for interface tables or reflection).
+ _ = g.getInstantiation(baseNname, typ.RParams(), true)
}
}
g.instTypeList = nil
}
-// genericSym returns the name of the base generic type for the type named by
-// sym. It simply returns the name obtained by removing everything after the
-// first bracket ("[").
-func genericTypeName(sym *types.Sym) string {
- return sym.Name[0:strings.Index(sym.Name, "[")]
-}
-
// getInstantiationForNode returns the function/method instantiation for a
// InstExpr node inst.
func (g *irgen) getInstantiationForNode(inst *ir.InstExpr) *ir.Func {
if meth, ok := inst.X.(*ir.SelectorExpr); ok {
- return g.getInstantiation(meth.Selection.Nname.(*ir.Name), inst.Targs, true)
+ return g.getInstantiation(meth.Selection.Nname.(*ir.Name), typecheck.TypesOf(inst.Targs), true)
} else {
- return g.getInstantiation(inst.X.(*ir.Name), inst.Targs, false)
+ return g.getInstantiation(inst.X.(*ir.Name), typecheck.TypesOf(inst.Targs), false)
}
}
// getInstantiation gets the instantiantion of the function or method nameNode
// with the type arguments targs. If the instantiated function is not already
// cached, then it calls genericSubst to create the new instantiation.
-func (g *irgen) getInstantiation(nameNode *ir.Name, targs []ir.Node, isMeth bool) *ir.Func {
- sym := makeInstName(nameNode.Sym(), targs, isMeth)
+func (g *irgen) getInstantiation(nameNode *ir.Name, targs []*types.Type, isMeth bool) *ir.Func {
+ if nameNode.Func.Body == nil && nameNode.Func.Inl != nil {
+ // If there is no body yet but Func.Inl exists, then we can can
+ // import the whole generic body.
+ assert(nameNode.Func.Inl.Cost == 1 && nameNode.Sym().Pkg != types.LocalPkg)
+ typecheck.ImportBody(nameNode.Func)
+ assert(nameNode.Func.Inl.Body != nil)
+ nameNode.Func.Body = nameNode.Func.Inl.Body
+ nameNode.Func.Dcl = nameNode.Func.Inl.Dcl
+ }
+ sym := typecheck.MakeInstName(nameNode.Sym(), targs, isMeth)
st := g.target.Stencils[sym]
if st == nil {
// If instantiation doesn't exist yet, create it and add
// to the list of decls.
st = g.genericSubst(sym, nameNode, targs, isMeth)
+ // This ensures that the linker drops duplicates of this instantiation.
+ // All just works!
+ st.SetDupok(true)
g.target.Stencils[sym] = st
g.target.Decls = append(g.target.Decls, st)
if base.Flag.W > 1 {
@@ -198,56 +484,14 @@ func (g *irgen) getInstantiation(nameNode *ir.Name, targs []ir.Node, isMeth bool
return st
}
-// makeInstName makes the unique name for a stenciled generic function or method,
-// based on the name of the function fy=nsym and the targs. It replaces any
-// existing bracket type list in the name. makeInstName asserts that fnsym has
-// brackets in its name if and only if hasBrackets is true.
-// TODO(danscales): remove the assertions and the hasBrackets argument later.
-//
-// Names of declared generic functions have no brackets originally, so hasBrackets
-// should be false. Names of generic methods already have brackets, since the new
-// type parameter is specified in the generic type of the receiver (e.g. func
-// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set.
-//
-// The standard naming is something like: 'genFn[int,bool]' for functions and
-// '(*genType[int,bool]).methodName' for methods
-func makeInstName(fnsym *types.Sym, targs []ir.Node, hasBrackets bool) *types.Sym {
- b := bytes.NewBufferString("")
- name := fnsym.Name
- i := strings.Index(name, "[")
- assert(hasBrackets == (i >= 0))
- if i >= 0 {
- b.WriteString(name[0:i])
- } else {
- b.WriteString(name)
- }
- b.WriteString("[")
- for i, targ := range targs {
- if i > 0 {
- b.WriteString(",")
- }
- b.WriteString(targ.Type().String())
- }
- b.WriteString("]")
- if i >= 0 {
- i2 := strings.Index(name[i:], "]")
- assert(i2 >= 0)
- b.WriteString(name[i+i2+1:])
- }
- return typecheck.Lookup(b.String())
-}
-
// Struct containing info needed for doing the substitution as we create the
// instantiation of a generic function with specified type arguments.
type subster struct {
- g *irgen
- isMethod bool // If a method is being instantiated
- newf *ir.Func // Func node for the new stenciled function
- tparams []*types.Field
- targs []ir.Node
- // The substitution map from name nodes in the generic function to the
- // name nodes in the new stenciled function.
- vars map[*ir.Name]*ir.Name
+ g *irgen
+ isMethod bool // If a method is being instantiated
+ newf *ir.Func // Func node for the new stenciled function
+ ts typecheck.Tsubster
+ dictionary *ir.Name // Name of dictionary variable
}
// genericSubst returns a new function with name newsym. The function is an
@@ -256,19 +500,20 @@ type subster struct {
// function type where the receiver becomes the first parameter. Otherwise the
// instantiated method would still need to be transformed by later compiler
// phases.
-func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []ir.Node, isMethod bool) *ir.Func {
- var tparams []*types.Field
+func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []*types.Type, isMethod bool) *ir.Func {
+ var tparams []*types.Type
if isMethod {
// Get the type params from the method receiver (after skipping
// over any pointer)
recvType := nameNode.Type().Recv().Type
recvType = deref(recvType)
- tparams = make([]*types.Field, len(recvType.RParams()))
- for i, rparam := range recvType.RParams() {
- tparams[i] = types.NewField(src.NoXPos, nil, rparam)
- }
+ tparams = recvType.RParams()
} else {
- tparams = nameNode.Type().TParams().Fields().Slice()
+ fields := nameNode.Type().TParams().Fields().Slice()
+ tparams = make([]*types.Type, len(fields))
+ for i, f := range fields {
+ tparams[i] = f.Type
+ }
}
gf := nameNode.Func
// Pos of the instantiated function is same as the generic function
@@ -289,72 +534,172 @@ func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []ir.No
g: g,
isMethod: isMethod,
newf: newf,
- tparams: tparams,
- targs: targs,
- vars: make(map[*ir.Name]*ir.Name),
+ ts: typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: targs,
+ Vars: make(map[*ir.Name]*ir.Name),
+ },
}
- newf.Dcl = make([]*ir.Name, len(gf.Dcl))
- for i, n := range gf.Dcl {
- newf.Dcl[i] = subst.node(n).(*ir.Name)
- }
+ newf.Dcl = make([]*ir.Name, 0, len(gf.Dcl)+1)
- // Ugly: we have to insert the Name nodes of the parameters/results into
+ // Replace the types in the function signature.
+ // Ugly: also, we have to insert the Name nodes of the parameters/results into
// the function type. The current function type has no Nname fields set,
// because it came via conversion from the types2 type.
oldt := nameNode.Type()
// We also transform a generic method type to the corresponding
- // instantiated function type where the receiver is the first parameter.
+ // instantiated function type where the dictionary is the first parameter.
+ dictionarySym := newsym.Pkg.Lookup(".dict")
+ dictionaryType := types.Types[types.TUINTPTR]
+ dictionaryName := ir.NewNameAt(gf.Pos(), dictionarySym)
+ typed(dictionaryType, dictionaryName)
+ dictionaryName.Class = ir.PPARAM
+ dictionaryName.Curfn = newf
+ newf.Dcl = append(newf.Dcl, dictionaryName)
+ for _, n := range gf.Dcl {
+ if n.Sym().Name == ".dict" {
+ panic("already has dictionary")
+ }
+ newf.Dcl = append(newf.Dcl, subst.localvar(n))
+ }
+ dictionaryArg := types.NewField(gf.Pos(), dictionarySym, dictionaryType)
+ dictionaryArg.Nname = dictionaryName
+ subst.dictionary = dictionaryName
+ var args []*types.Field
+ args = append(args, dictionaryArg)
+ args = append(args, oldt.Recvs().FieldSlice()...)
+ args = append(args, oldt.Params().FieldSlice()...)
newt := types.NewSignature(oldt.Pkg(), nil, nil,
- subst.fields(ir.PPARAM, append(oldt.Recvs().FieldSlice(), oldt.Params().FieldSlice()...), newf.Dcl),
+ subst.fields(ir.PPARAM, args, newf.Dcl),
subst.fields(ir.PPARAMOUT, oldt.Results().FieldSlice(), newf.Dcl))
- newf.Nname.SetType(newt)
+ typed(newt, newf.Nname)
ir.MarkFunc(newf.Nname)
newf.SetTypecheck(1)
- newf.Nname.SetTypecheck(1)
// Make sure name/type of newf is set before substituting the body.
newf.Body = subst.list(gf.Body)
+
+ // Add code to check that the dictionary is correct.
+ newf.Body.Prepend(g.checkDictionary(dictionaryName, targs)...)
+
ir.CurFunc = savef
+ // Add any new, fully instantiated types seen during the substitution to
+ // g.instTypeList.
+ g.instTypeList = append(g.instTypeList, subst.ts.InstTypeList...)
return newf
}
-// node is like DeepCopy(), but creates distinct ONAME nodes, and also descends
-// into closures. It substitutes type arguments for type parameters in all the new
-// nodes.
+// localvar creates a new name node for the specified local variable and enters it
+// in subst.vars. It substitutes type arguments for type parameters in the type of
+// name as needed.
+func (subst *subster) localvar(name *ir.Name) *ir.Name {
+ m := ir.NewNameAt(name.Pos(), name.Sym())
+ if name.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ m.SetType(subst.ts.Typ(name.Type()))
+ m.BuiltinOp = name.BuiltinOp
+ m.Curfn = subst.newf
+ m.Class = name.Class
+ assert(name.Class != ir.PEXTERN && name.Class != ir.PFUNC)
+ m.Func = name.Func
+ subst.ts.Vars[name] = m
+ m.SetTypecheck(1)
+ return m
+}
+
+// checkDictionary returns code that does runtime consistency checks
+// between the dictionary and the types it should contain.
+func (g *irgen) checkDictionary(name *ir.Name, targs []*types.Type) (code []ir.Node) {
+ if false {
+ return // checking turned off
+ }
+ // TODO: when moving to GCshape, this test will become harder. Call into
+ // runtime to check the expected shape is correct?
+ pos := name.Pos()
+ // Convert dictionary to *[N]uintptr
+ d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], name)
+ d.SetTypecheck(1)
+ d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(len(targs))).PtrTo(), d)
+ d.SetTypecheck(1)
+
+ // Check that each type entry in the dictionary is correct.
+ for i, t := range targs {
+ want := reflectdata.TypePtr(t)
+ typed(types.Types[types.TUINTPTR], want)
+ deref := ir.NewStarExpr(pos, d)
+ typed(d.Type().Elem(), deref)
+ idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), name) // TODO: what to set orig to?
+ typed(types.Types[types.TUINTPTR], idx)
+ got := ir.NewIndexExpr(pos, deref, idx)
+ typed(types.Types[types.TUINTPTR], got)
+ cond := ir.NewBinaryExpr(pos, ir.ONE, want, got)
+ typed(types.Types[types.TBOOL], cond)
+ panicArg := ir.NewNilExpr(pos)
+ typed(types.NewInterface(types.LocalPkg, nil), panicArg)
+ then := ir.NewUnaryExpr(pos, ir.OPANIC, panicArg)
+ then.SetTypecheck(1)
+ x := ir.NewIfStmt(pos, cond, []ir.Node{then}, nil)
+ x.SetTypecheck(1)
+ code = append(code, x)
+ }
+ return
+}
+
+// getDictionaryType returns a *runtime._type from the dictionary corresponding to the input type.
+// The input type must be a type parameter (TODO: or a local derived type).
+func (subst *subster) getDictionaryType(pos src.XPos, t *types.Type) ir.Node {
+ tparams := subst.ts.Tparams
+ var i = 0
+ for i = range tparams {
+ if t == tparams[i] {
+ break
+ }
+ }
+ if i == len(tparams) {
+ base.Fatalf(fmt.Sprintf("couldn't find type param %+v", t))
+ }
+
+ // Convert dictionary to *[N]uintptr
+ // All entries in the dictionary are pointers. They all point to static data, though, so we
+ // treat them as uintptrs so the GC doesn't need to keep track of them.
+ d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], subst.dictionary)
+ d.SetTypecheck(1)
+ d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(len(tparams))).PtrTo(), d)
+ d.SetTypecheck(1)
+
+ // Load entry i out of the dictionary.
+ deref := ir.NewStarExpr(pos, d)
+ typed(d.Type().Elem(), deref)
+ idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), subst.dictionary) // TODO: what to set orig to?
+ typed(types.Types[types.TUINTPTR], idx)
+ r := ir.NewIndexExpr(pos, deref, idx)
+ typed(types.Types[types.TUINT8].PtrTo(), r) // standard typing of a *runtime._type in the compiler is *byte
+ return r
+}
+
+// node is like DeepCopy(), but substitutes ONAME nodes based on subst.ts.vars, and
+// also descends into closures. It substitutes type arguments for type parameters
+// in all the new nodes.
func (subst *subster) node(n ir.Node) ir.Node {
// Use closure to capture all state needed by the ir.EditChildren argument.
var edit func(ir.Node) ir.Node
edit = func(x ir.Node) ir.Node {
switch x.Op() {
case ir.OTYPE:
- return ir.TypeNode(subst.typ(x.Type()))
+ return ir.TypeNode(subst.ts.Typ(x.Type()))
case ir.ONAME:
- name := x.(*ir.Name)
- if v := subst.vars[name]; v != nil {
+ if v := subst.ts.Vars[x.(*ir.Name)]; v != nil {
return v
}
- m := ir.NewNameAt(name.Pos(), name.Sym())
- if name.IsClosureVar() {
- m.SetIsClosureVar(true)
- }
- t := x.Type()
- if t == nil {
- assert(name.BuiltinOp != 0)
- } else {
- newt := subst.typ(t)
- m.SetType(newt)
- }
- m.BuiltinOp = name.BuiltinOp
- m.Curfn = subst.newf
- m.Class = name.Class
- m.Func = name.Func
- subst.vars[name] = m
- m.SetTypecheck(1)
- return m
+ return x
+ case ir.ONONAME:
+ // This handles the identifier in a type switch guard
+ fallthrough
case ir.OLITERAL, ir.ONIL:
if x.Sym() != nil {
return x
@@ -374,50 +719,48 @@ func (subst *subster) node(n ir.Node) ir.Node {
base.Fatalf(fmt.Sprintf("Nil type for %v", x))
}
} else if x.Op() != ir.OCLOSURE {
- m.SetType(subst.typ(x.Type()))
+ m.SetType(subst.ts.Typ(x.Type()))
}
}
ir.EditChildren(m, edit)
- if x.Typecheck() == 3 {
- // These are nodes whose transforms were delayed until
- // their instantiated type was known.
- m.SetTypecheck(1)
- if typecheck.IsCmp(x.Op()) {
- transformCompare(m.(*ir.BinaryExpr))
- } else {
- switch x.Op() {
- case ir.OSLICE, ir.OSLICE3:
- transformSlice(m.(*ir.SliceExpr))
-
- case ir.OADD:
- m = transformAdd(m.(*ir.BinaryExpr))
-
- case ir.OINDEX:
- transformIndex(m.(*ir.IndexExpr))
-
- case ir.OAS2:
- as2 := m.(*ir.AssignListStmt)
- transformAssign(as2, as2.Lhs, as2.Rhs)
-
- case ir.OAS:
- as := m.(*ir.AssignStmt)
+ m.SetTypecheck(1)
+ if typecheck.IsCmp(x.Op()) {
+ transformCompare(m.(*ir.BinaryExpr))
+ } else {
+ switch x.Op() {
+ case ir.OSLICE, ir.OSLICE3:
+ transformSlice(m.(*ir.SliceExpr))
+
+ case ir.OADD:
+ m = transformAdd(m.(*ir.BinaryExpr))
+
+ case ir.OINDEX:
+ transformIndex(m.(*ir.IndexExpr))
+
+ case ir.OAS2:
+ as2 := m.(*ir.AssignListStmt)
+ transformAssign(as2, as2.Lhs, as2.Rhs)
+
+ case ir.OAS:
+ as := m.(*ir.AssignStmt)
+ if as.Y != nil {
+ // transformAssign doesn't handle the case
+ // of zeroing assignment of a dcl (rhs[0] is nil).
lhs, rhs := []ir.Node{as.X}, []ir.Node{as.Y}
transformAssign(as, lhs, rhs)
+ }
- case ir.OASOP:
- as := m.(*ir.AssignOpStmt)
- transformCheckAssign(as, as.X)
+ case ir.OASOP:
+ as := m.(*ir.AssignOpStmt)
+ transformCheckAssign(as, as.X)
- case ir.ORETURN:
- transformReturn(m.(*ir.ReturnStmt))
+ case ir.ORETURN:
+ transformReturn(m.(*ir.ReturnStmt))
- case ir.OSEND:
- transformSend(m.(*ir.SendStmt))
+ case ir.OSEND:
+ transformSend(m.(*ir.SendStmt))
- default:
- base.Fatalf("Unexpected node with Typecheck() == 3")
- }
}
}
@@ -506,24 +849,18 @@ func (subst *subster) node(n ir.Node) ir.Node {
}
case ir.OCLOSURE:
+ // We're going to create a new closure from scratch, so clear m
+ // to avoid using the ir.Copy by accident until we reassign it.
+ m = nil
+
x := x.(*ir.ClosureExpr)
// Need to duplicate x.Func.Nname, x.Func.Dcl, x.Func.ClosureVars, and
// x.Func.Body.
oldfn := x.Func
- newfn := ir.NewFunc(oldfn.Pos())
- if oldfn.ClosureCalled() {
- newfn.SetClosureCalled(true)
- }
- newfn.SetIsHiddenClosure(true)
- m.(*ir.ClosureExpr).Func = newfn
- // Closure name can already have brackets, if it derives
- // from a generic method
- newsym := makeInstName(oldfn.Nname.Sym(), subst.targs, subst.isMethod)
- newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), newsym)
- newfn.Nname.Func = newfn
- newfn.Nname.Defn = newfn
- ir.MarkFunc(newfn.Nname)
- newfn.OClosure = m.(*ir.ClosureExpr)
+ newfn := ir.NewClosureFunc(oldfn.Pos(), subst.newf != nil)
+ ir.NameClosure(newfn.OClosure, subst.newf)
+
+ newfn.SetClosureCalled(oldfn.ClosureCalled())
saveNewf := subst.newf
ir.CurFunc = newfn
@@ -531,8 +868,8 @@ func (subst *subster) node(n ir.Node) ir.Node {
newfn.Dcl = subst.namelist(oldfn.Dcl)
newfn.ClosureVars = subst.namelist(oldfn.ClosureVars)
- typed(subst.typ(oldfn.Nname.Type()), newfn.Nname)
- typed(newfn.Nname.Type(), m)
+ typed(subst.ts.Typ(oldfn.Nname.Type()), newfn.Nname)
+ typed(newfn.Nname.Type(), newfn.OClosure)
newfn.SetTypecheck(1)
// Make sure type of closure function is set before doing body.
@@ -540,7 +877,36 @@ func (subst *subster) node(n ir.Node) ir.Node {
subst.newf = saveNewf
ir.CurFunc = saveNewf
- subst.g.target.Decls = append(subst.g.target.Decls, newfn)
+ m = ir.UseClosure(newfn.OClosure, subst.g.target)
+ m.(*ir.ClosureExpr).SetInit(subst.list(x.Init()))
+
+ case ir.OCONVIFACE:
+ x := x.(*ir.ConvExpr)
+ // TODO: handle converting from derived types. For now, just from naked
+ // type parameters.
+ if x.X.Type().IsTypeParam() {
+ // Load the actual runtime._type of the type parameter from the dictionary.
+ rt := subst.getDictionaryType(m.Pos(), x.X.Type())
+
+ // At this point, m is an interface type with a data word we want.
+ // But the type word represents a gcshape type, which we don't want.
+ // Replace with the instantiated type loaded from the dictionary.
+ m = ir.NewUnaryExpr(m.Pos(), ir.OIDATA, m)
+ typed(types.Types[types.TUNSAFEPTR], m)
+ m = ir.NewBinaryExpr(m.Pos(), ir.OEFACE, rt, m)
+ if !x.Type().IsEmptyInterface() {
+ // We just built an empty interface{}. Type it as such,
+ // then assert it to the required non-empty interface.
+ typed(types.NewInterface(types.LocalPkg, nil), m)
+ m = ir.NewTypeAssertExpr(m.Pos(), m, nil)
+ }
+ typed(x.Type(), m)
+ // TODO: we're throwing away the type word of the original version
+ // of m here (it would be OITAB(m)), which probably took some
+ // work to generate. Can we avoid generating it at all?
+ // (The linker will throw them away if not needed, so it would just
+ // save toolchain work, not binary size.)
+ }
}
return m
}
@@ -551,7 +917,7 @@ func (subst *subster) node(n ir.Node) ir.Node {
func (subst *subster) namelist(l []*ir.Name) []*ir.Name {
s := make([]*ir.Name, len(l))
for i, n := range l {
- s[i] = subst.node(n).(*ir.Name)
+ s[i] = subst.localvar(n)
if n.Defn != nil {
s[i].Defn = subst.node(n.Defn)
}
@@ -570,302 +936,6 @@ func (subst *subster) list(l []ir.Node) []ir.Node {
return s
}
-// tstruct substitutes type params in types of the fields of a structure type. For
-// each field, if Nname is set, tstruct also translates the Nname using
-// subst.vars, if Nname is in subst.vars. To always force the creation of a new
-// (top-level) struct, regardless of whether anything changed with the types or
-// names of the struct's fields, set force to true.
-func (subst *subster) tstruct(t *types.Type, force bool) *types.Type {
- if t.NumFields() == 0 {
- if t.HasTParam() {
- // For an empty struct, we need to return a new type,
- // since it may now be fully instantiated (HasTParam
- // becomes false).
- return types.NewStruct(t.Pkg(), nil)
- }
- return t
- }
- var newfields []*types.Field
- if force {
- newfields = make([]*types.Field, t.NumFields())
- }
- for i, f := range t.Fields().Slice() {
- t2 := subst.typ(f.Type)
- if (t2 != f.Type || f.Nname != nil) && newfields == nil {
- newfields = make([]*types.Field, t.NumFields())
- for j := 0; j < i; j++ {
- newfields[j] = t.Field(j)
- }
- }
- if newfields != nil {
- // TODO(danscales): make sure this works for the field
- // names of embedded types (which should keep the name of
- // the type param, not the instantiated type).
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
- if f.Nname != nil {
- // f.Nname may not be in subst.vars[] if this is
- // a function name or a function instantiation type
- // that we are translating
- v := subst.vars[f.Nname.(*ir.Name)]
- // Be careful not to put a nil var into Nname,
- // since Nname is an interface, so it would be a
- // non-nil interface.
- if v != nil {
- newfields[i].Nname = v
- }
- }
- }
- }
- if newfields != nil {
- return types.NewStruct(t.Pkg(), newfields)
- }
- return t
-
-}
-
-// tinter substitutes type params in types of the methods of an interface type.
-func (subst *subster) tinter(t *types.Type) *types.Type {
- if t.Methods().Len() == 0 {
- return t
- }
- var newfields []*types.Field
- for i, f := range t.Methods().Slice() {
- t2 := subst.typ(f.Type)
- if (t2 != f.Type || f.Nname != nil) && newfields == nil {
- newfields = make([]*types.Field, t.Methods().Len())
- for j := 0; j < i; j++ {
- newfields[j] = t.Methods().Index(j)
- }
- }
- if newfields != nil {
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
- }
- }
- if newfields != nil {
- return types.NewInterface(t.Pkg(), newfields)
- }
- return t
-}
-
-// instTypeName creates a name for an instantiated type, based on the name of the
-// generic type and the type args
-func instTypeName(name string, targs []*types.Type) string {
- b := bytes.NewBufferString(name)
- b.WriteByte('[')
- for i, targ := range targs {
- if i > 0 {
- b.WriteByte(',')
- }
- b.WriteString(targ.String())
- }
- b.WriteByte(']')
- return b.String()
-}
-
-// typ computes the type obtained by substituting any type parameter in t with the
-// corresponding type argument in subst. If t contains no type parameters, the
-// result is t; otherwise the result is a new type. It deals with recursive types
-// by using TFORW types and finding partially or fully created types via sym.Def.
-func (subst *subster) typ(t *types.Type) *types.Type {
- if !t.HasTParam() && t.Kind() != types.TFUNC {
- // Note: function types need to be copied regardless, as the
- // types of closures may contain declarations that need
- // to be copied. See #45738.
- return t
- }
-
- if t.Kind() == types.TTYPEPARAM {
- for i, tp := range subst.tparams {
- if tp.Type == t {
- return subst.targs[i].Type()
- }
- }
- // If t is a simple typeparam T, then t has the name/symbol 'T'
- // and t.Underlying() == t.
- //
- // However, consider the type definition: 'type P[T any] T'. We
- // might use this definition so we can have a variant of type T
- // that we can add new methods to. Suppose t is a reference to
- // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM,
- // because P[T] is defined as T. If we look at t.Underlying(), it
- // is different, because the name of t.Underlying() is 'T' rather
- // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM.
- // In this case, we do the needed recursive substitution in the
- // case statement below.
- if t.Underlying() == t {
- // t is a simple typeparam that didn't match anything in tparam
- return t
- }
- // t is a more complex typeparam (e.g. P[T], as above, whose
- // definition is just T).
- assert(t.Sym() != nil)
- }
-
- var newsym *types.Sym
- var neededTargs []*types.Type
- var forw *types.Type
-
- if t.Sym() != nil {
- // Translate the type params for this type according to
- // the tparam/targs mapping from subst.
- neededTargs = make([]*types.Type, len(t.RParams()))
- for i, rparam := range t.RParams() {
- neededTargs[i] = subst.typ(rparam)
- }
- // For a named (defined) type, we have to change the name of the
- // type as well. We do this first, so we can look up if we've
- // already seen this type during this substitution or other
- // definitions/substitutions.
- genName := genericTypeName(t.Sym())
- newsym = t.Sym().Pkg.Lookup(instTypeName(genName, neededTargs))
- if newsym.Def != nil {
- // We've already created this instantiated defined type.
- return newsym.Def.Type()
- }
-
- // In order to deal with recursive generic types, create a TFORW
- // type initially and set the Def field of its sym, so it can be
- // found if this type appears recursively within the type.
- forw = newIncompleteNamedType(t.Pos(), newsym)
- //println("Creating new type by sub", newsym.Name, forw.HasTParam())
- forw.SetRParams(neededTargs)
- }
-
- var newt *types.Type
-
- switch t.Kind() {
- case types.TTYPEPARAM:
- if t.Sym() == newsym {
- // The substitution did not change the type.
- return t
- }
- // Substitute the underlying typeparam (e.g. T in P[T], see
- // the example describing type P[T] above).
- newt = subst.typ(t.Underlying())
- assert(newt != t)
-
- case types.TARRAY:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewArray(newelem, t.NumElem())
- }
-
- case types.TPTR:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewPtr(newelem)
- }
-
- case types.TSLICE:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewSlice(newelem)
- }
-
- case types.TSTRUCT:
- newt = subst.tstruct(t, false)
- if newt == t {
- newt = nil
- }
-
- case types.TFUNC:
- newrecvs := subst.tstruct(t.Recvs(), false)
- newparams := subst.tstruct(t.Params(), false)
- newresults := subst.tstruct(t.Results(), false)
- if newrecvs != t.Recvs() || newparams != t.Params() || newresults != t.Results() {
- // If any types have changed, then the all the fields of
- // of recv, params, and results must be copied, because they have
- // offset fields that are dependent, and so must have an
- // independent copy for each new signature.
- var newrecv *types.Field
- if newrecvs.NumFields() > 0 {
- if newrecvs == t.Recvs() {
- newrecvs = subst.tstruct(t.Recvs(), true)
- }
- newrecv = newrecvs.Field(0)
- }
- if newparams == t.Params() {
- newparams = subst.tstruct(t.Params(), true)
- }
- if newresults == t.Results() {
- newresults = subst.tstruct(t.Results(), true)
- }
- newt = types.NewSignature(t.Pkg(), newrecv, t.TParams().FieldSlice(), newparams.FieldSlice(), newresults.FieldSlice())
- }
-
- case types.TINTER:
- newt = subst.tinter(t)
- if newt == t {
- newt = nil
- }
-
- case types.TMAP:
- newkey := subst.typ(t.Key())
- newval := subst.typ(t.Elem())
- if newkey != t.Key() || newval != t.Elem() {
- newt = types.NewMap(newkey, newval)
- }
-
- case types.TCHAN:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewChan(newelem, t.ChanDir())
- if !newt.HasTParam() {
- // TODO(danscales): not sure why I have to do this
- // only for channels.....
- types.CheckSize(newt)
- }
- }
- }
- if newt == nil {
- // Even though there were typeparams in the type, there may be no
- // change if this is a function type for a function call (which will
- // have its own tparams/targs in the function instantiation).
- return t
- }
-
- if t.Sym() == nil {
- // Not a named type, so there was no forwarding type and there are
- // no methods to substitute.
- assert(t.Methods().Len() == 0)
- return newt
- }
-
- forw.SetUnderlying(newt)
- newt = forw
-
- if t.Kind() != types.TINTER && t.Methods().Len() > 0 {
- // Fill in the method info for the new type.
- var newfields []*types.Field
- newfields = make([]*types.Field, t.Methods().Len())
- for i, f := range t.Methods().Slice() {
- t2 := subst.typ(f.Type)
- oldsym := f.Nname.Sym()
- newsym := makeInstName(oldsym, subst.targs, true)
- var nname *ir.Name
- if newsym.Def != nil {
- nname = newsym.Def.(*ir.Name)
- } else {
- nname = ir.NewNameAt(f.Pos, newsym)
- nname.SetType(t2)
- newsym.Def = nname
- }
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
- newfields[i].Nname = nname
- }
- newt.Methods().Set(newfields)
- if !newt.HasTParam() {
- // Generate all the methods for a new fully-instantiated type.
- subst.g.instTypeList = append(subst.g.instTypeList, newt)
- }
- }
- return newt
-}
-
// fields sets the Nname field for the Field nodes inside a type signature, based
// on the corresponding in/out parameters in dcl. It depends on the in and out
// parameters being in order in dcl.
@@ -885,12 +955,15 @@ func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir
newfields := make([]*types.Field, len(oldfields))
for j := range oldfields {
newfields[j] = oldfields[j].Copy()
- newfields[j].Type = subst.typ(oldfields[j].Type)
- // A param field will be missing from dcl if its name is
+ newfields[j].Type = subst.ts.Typ(oldfields[j].Type)
+ // A PPARAM field will be missing from dcl if its name is
// unspecified or specified as "_". So, we compare the dcl sym
- // with the field sym. If they don't match, this dcl (if there is
- // one left) must apply to a later field.
- if i < len(dcl) && dcl[i].Sym() == oldfields[j].Sym {
+ // with the field sym (or sym of the field's Nname node). (Unnamed
+ // results still have a name like ~r2 in their Nname node.) If
+ // they don't match, this dcl (if there is one left) must apply to
+ // a later field.
+ if i < len(dcl) && (dcl[i].Sym() == oldfields[j].Sym ||
+ (oldfields[j].Nname != nil && dcl[i].Sym() == oldfields[j].Nname.Sym())) {
newfields[j].Nname = dcl[i]
i++
}
@@ -905,13 +978,3 @@ func deref(t *types.Type) *types.Type {
}
return t
}
-
-// newIncompleteNamedType returns a TFORW type t with name specified by sym, such
-// that t.nod and sym.Def are set correctly.
-func newIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type {
- name := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
- forw := types.NewNamed(name)
- name.SetType(forw)
- sym.Def = name
- return forw
-}
diff --git a/src/cmd/compile/internal/noder/transform.go b/src/cmd/compile/internal/noder/transform.go
index 2859089e69..946d335f07 100644
--- a/src/cmd/compile/internal/noder/transform.go
+++ b/src/cmd/compile/internal/noder/transform.go
@@ -340,12 +340,12 @@ assignOK:
}
}
-// Corresponds to typecheck.typecheckargs.
+// Corresponds to, but slightly more general than, typecheck.typecheckargs.
func transformArgs(n ir.InitNode) {
var list []ir.Node
switch n := n.(type) {
default:
- base.Fatalf("typecheckargs %+v", n.Op())
+ base.Fatalf("transformArgs %+v", n.Op())
case *ir.CallExpr:
list = n.Args
if n.IsDDD {
@@ -354,25 +354,31 @@ func transformArgs(n ir.InitNode) {
case *ir.ReturnStmt:
list = n.Results
}
- if len(list) != 1 {
- return
- }
- t := list[0].Type()
- if t == nil || !t.IsFuncArgStruct() {
+ // Look to see if we have any multi-return functions as arguments.
+ extra := 0
+ for _, arg := range list {
+ t := arg.Type()
+ if t.IsFuncArgStruct() {
+ num := t.Fields().Len()
+ if num <= 1 {
+ base.Fatalf("multi-return type with only %d parts", num)
+ }
+ extra += num - 1
+ }
+ }
+ // If not, nothing to do.
+ if extra == 0 {
return
}
- // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+ // Rewrite f(..., g(), ...) into t1, ..., tN = g(); f(..., t1, ..., tN, ...).
// Save n as n.Orig for fmt.go.
if ir.Orig(n) == n {
n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
}
- as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- as.Rhs.Append(list...)
-
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
@@ -382,27 +388,42 @@ func transformArgs(n ir.InitNode) {
if static {
ir.CurFunc = typecheck.InitTodoFunc
}
- list = nil
- for _, f := range t.FieldSlice() {
- t := typecheck.Temp(f.Type)
- as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
- as.Lhs.Append(t)
- list = append(list, t)
+
+ // Expand multi-return function calls.
+ // The spec only allows a multi-return function as an argument
+ // if it is the only argument. This code must handle calls to
+ // stenciled generic functions which have extra arguments
+ // (like the dictionary) so it must handle a slightly more general
+ // cases, like f(n, g()) where g is multi-return.
+ newList := make([]ir.Node, 0, len(list)+extra)
+ for _, arg := range list {
+ t := arg.Type()
+ if t.IsFuncArgStruct() {
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, []ir.Node{arg})
+ for _, f := range t.FieldSlice() {
+ t := typecheck.Temp(f.Type)
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
+ as.Lhs.Append(t)
+ newList = append(newList, t)
+ }
+ transformAssign(as, as.Lhs, as.Rhs)
+ as.SetTypecheck(1)
+ n.PtrInit().Append(as)
+ } else {
+ newList = append(newList, arg)
+ }
}
+
if static {
ir.CurFunc = nil
}
switch n := n.(type) {
case *ir.CallExpr:
- n.Args = list
+ n.Args = newList
case *ir.ReturnStmt:
- n.Results = list
+ n.Results = newList
}
-
- transformAssign(as, as.Lhs, as.Rhs)
- as.SetTypecheck(1)
- n.PtrInit().Append(as)
}
// assignconvfn converts node n for assignment to type t. Corresponds to
@@ -416,7 +437,10 @@ func assignconvfn(n ir.Node, t *types.Type) ir.Node {
return n
}
- op, _ := typecheck.Assignop(n.Type(), t)
+ op, why := typecheck.Assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Fatalf("found illegal assignment %+v -> %+v; %s", n.Type(), t, why)
+ }
r := ir.NewConvExpr(base.Pos, op, t, n)
r.SetTypecheck(1)
@@ -562,6 +586,11 @@ func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node {
if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && !isCall {
n.SetOp(ir.OCALLPART)
+ if len(n.X.Type().RParams()) > 0 || n.X.Type().IsPtr() && len(n.X.Type().Elem().RParams()) > 0 {
+ // TODO: MethodValueWrapper needed for generics?
+ // Or did we successfully desugar all that at stencil time?
+ return n
+ }
n.SetType(typecheck.MethodValueWrapper(n).Type())
}
return n
@@ -911,9 +940,7 @@ func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
f := t.Field(i)
n1 = assignconvfn(n1, f.Type)
- sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
- sk.Offset = f.Offset
- ls[i] = sk
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
}
assert(len(ls) >= t.NumFields())
} else {
@@ -922,33 +949,26 @@ func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
for i, l := range ls {
ir.SetPos(l)
- if l.Op() == ir.OKEY {
- kv := l.(*ir.KeyExpr)
- key := kv.Key
-
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
- s = typecheck.Lookup(s.Name)
- }
-
- // An OXDOT uses the Sym field to hold
- // the field to the right of the dot,
- // so s will be non-nil, but an OXDOT
- // is never a valid struct literal key.
- assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
+ kv := l.(*ir.KeyExpr)
+ key := kv.Key
- l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
- ls[i] = l
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ s := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
+ s = typecheck.Lookup(s.Name)
}
- assert(l.Op() == ir.OSTRUCTKEY)
- l := l.(*ir.StructKeyExpr)
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
- f := typecheck.Lookdot1(nil, l.Field, t, t.Fields(), 0)
- l.Offset = f.Offset
+ f := typecheck.Lookdot1(nil, s, t, t.Fields(), 0)
+ l := ir.NewStructKeyExpr(l.Pos(), f, kv.Value)
+ ls[i] = l
l.Value = assignconvfn(l.Value, f.Type)
}
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
index 8680559a41..b37793b2d0 100644
--- a/src/cmd/compile/internal/noder/types.go
+++ b/src/cmd/compile/internal/noder/types.go
@@ -68,8 +68,10 @@ func instTypeName2(name string, targs []types2.Type) string {
if i > 0 {
b.WriteByte(',')
}
+ // Include package names for all types, including typeparams, to
+ // make sure type arguments are uniquely specified.
tname := types2.TypeString(targ,
- func(*types2.Package) string { return "" })
+ func(pkg *types2.Package) string { return pkg.Name() })
if strings.Index(tname, ", ") >= 0 {
// types2.TypeString puts spaces after a comma in a type
// list, but we don't want spaces in our actual type names
@@ -89,50 +91,49 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
case *types2.Basic:
return g.basic(typ)
case *types2.Named:
- if typ.TParams() != nil {
+ // If tparams is set, but targs is not, typ is a base generic
+ // type. typ is appearing as part of the source type of an alias,
+ // since that is the only use of a generic type that doesn't
+ // involve instantiation. We just translate the named type in the
+ // normal way below using g.obj().
+ if typ.TParams() != nil && typ.TArgs() != nil {
// typ is an instantiation of a defined (named) generic type.
// This instantiation should also be a defined (named) type.
// types2 gives us the substituted type in t.Underlying()
// The substituted type may or may not still have type
// params. We might, for example, be substituting one type
// param for another type param.
-
- if typ.TArgs() == nil {
- base.Fatalf("In typ0, Targs should be set if TParams is set")
- }
-
- // When converted to types.Type, typ must have a name,
- // based on the names of the type arguments. We need a
- // name to deal with recursive generic types (and it also
- // looks better when printing types).
+ //
+ // When converted to types.Type, typ has a unique name,
+ // based on the names of the type arguments.
instName := instTypeName2(typ.Obj().Name(), typ.TArgs())
s := g.pkg(typ.Obj().Pkg()).Lookup(instName)
if s.Def != nil {
- // We have already encountered this instantiation,
- // so use the type we previously created, since there
+ // We have already encountered this instantiation.
+ // Use the type we previously created, since there
// must be exactly one instance of a defined type.
return s.Def.Type()
}
// Create a forwarding type first and put it in the g.typs
- // map, in order to deal with recursive generic types.
- // Fully set up the extra ntyp information (Def, RParams,
- // which may set HasTParam) before translating the
- // underlying type itself, so we handle recursion
- // correctly, including via method signatures.
- ntyp := newIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
+ // map, in order to deal with recursive generic types
+ // (including via method signatures).. Set up the extra
+ // ntyp information (Def, RParams, which may set
+ // HasTParam) before translating the underlying type
+ // itself, so we handle recursion correctly.
+ ntyp := typecheck.NewIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
g.typs[typ] = ntyp
// If ntyp still has type params, then we must be
// referencing something like 'value[T2]', as when
- // specifying the generic receiver of a method,
- // where value was defined as "type value[T any]
- // ...". Save the type args, which will now be the
- // new type of the current type.
+ // specifying the generic receiver of a method, where
+ // value was defined as "type value[T any] ...". Save the
+ // type args, which will now be the new typeparams of the
+ // current type.
//
// If ntyp does not have type params, we are saving the
- // concrete types used to instantiate this type. We'll use
- // these when instantiating the methods of the
+ // non-generic types used to instantiate this type. We'll
+ // use these when instantiating the methods of the
// instantiated type.
rparams := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
@@ -143,6 +144,8 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
ntyp.SetUnderlying(g.typ1(typ.Underlying()))
g.fillinMethods(typ, ntyp)
+ // Save the symbol for the base generic type.
+ ntyp.OrigSym = g.pkg(typ.Obj().Pkg()).Lookup(typ.Obj().Name())
return ntyp
}
obj := g.obj(typ.Obj())
@@ -183,11 +186,16 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
for i := range embeddeds {
// TODO(mdempsky): Get embedding position.
e := typ.EmbeddedType(i)
- if t := types2.AsInterface(e); t != nil && t.IsComparable() {
- // Ignore predefined type 'comparable', since it
- // doesn't resolve and it doesn't have any
- // relevant methods.
- continue
+
+ // With Go 1.18, an embedded element can be any type, not
+ // just an interface.
+ if t := types2.AsInterface(e); t != nil {
+ if t.IsComparable() {
+ // Ignore predefined type 'comparable', since it
+ // doesn't resolve and it doesn't have any
+ // relevant methods.
+ continue
+ }
}
embeddeds[j] = types.NewField(src.NoXPos, nil, g.typ1(e))
j++
@@ -204,20 +212,39 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...))
case *types2.TypeParam:
- tp := types.NewTypeParam(g.tpkg(typ))
// Save the name of the type parameter in the sym of the type.
// Include the types2 subscript in the sym name
- sym := g.pkg(typ.Obj().Pkg()).Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
- tp.SetSym(sym)
+ pkg := g.tpkg(typ)
+ sym := pkg.Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.Type()
+ }
+ tp := types.NewTypeParam(sym, typ.Index())
+ nname := ir.NewDeclNameAt(g.pos(typ.Obj().Pos()), ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(tp)
+ tp.SetNod(nname)
// Set g.typs[typ] in case the bound methods reference typ.
g.typs[typ] = tp
- // TODO(danscales): we don't currently need to use the bounds
- // anywhere, so eventually we can probably remove.
bound := g.typ1(typ.Bound())
- *tp.Methods() = *bound.Methods()
+ tp.SetBound(bound)
return tp
+ case *types2.Union:
+ nt := typ.NumTerms()
+ tlist := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range tlist {
+ term, tilde := typ.Term(i)
+ tlist[i] = g.typ1(term)
+ tildes[i] = tilde
+ }
+ return types.NewUnion(tlist, tildes)
+
case *types2.Tuple:
// Tuples are used for the type of a function call (i.e. the
// return value of the function).
@@ -243,20 +270,28 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// and for actually generating the methods for instantiated types.
func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
if typ.NumMethods() != 0 {
- targs := make([]ir.Node, len(typ.TArgs()))
+ targs := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
- targs[i] = ir.TypeNode(g.typ1(targ))
+ targs[i] = g.typ1(targ)
}
methods := make([]*types.Field, typ.NumMethods())
for i := range methods {
m := typ.Method(i)
- meth := g.obj(m)
recvType := types2.AsSignature(m.Type()).Recv().Type()
ptr := types2.AsPointer(recvType)
if ptr != nil {
recvType = ptr.Elem()
}
+ var meth *ir.Name
+ if m.Pkg() != g.self {
+ // Imported methods cannot be loaded by name (what
+ // g.obj() does) - they must be loaded via their
+ // type.
+ meth = g.obj(recvType.(*types2.Named).Obj()).Type().Methods().Index(i).Nname.(*ir.Name)
+ } else {
+ meth = g.obj(m)
+ }
if recvType != types2.Type(typ) {
// Unfortunately, meth is the type of the method of the
// generic type, so we have to do a substitution to get
@@ -276,18 +311,21 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
} else {
meth2 = ir.NewNameAt(meth.Pos(), newsym)
rparams := types2.AsSignature(m.Type()).RParams()
- tparams := make([]*types.Field, len(rparams))
+ tparams := make([]*types.Type, len(rparams))
for i, rparam := range rparams {
- tparams[i] = types.NewField(src.NoXPos, nil, g.typ1(rparam.Type()))
+ tparams[i] = g.typ1(rparam.Type())
}
assert(len(tparams) == len(targs))
- subst := &subster{
- g: g,
- tparams: tparams,
- targs: targs,
+ ts := typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: targs,
}
// Do the substitution of the type
- meth2.SetType(subst.typ(meth.Type()))
+ meth2.SetType(ts.Typ(meth.Type()))
+ // Add any new fully instantiated types
+ // seen during the substitution to
+ // g.instTypeList.
+ g.instTypeList = append(g.instTypeList, ts.InstTypeList...)
newsym.Def = meth2
}
meth = meth2
@@ -346,7 +384,7 @@ func (g *irgen) selector(obj types2.Object) *types.Sym {
return pkg.Lookup(name)
}
-// tpkg returns the package that a function, interface, or struct type
+// tpkg returns the package that a function, interface, struct, or typeparam type
// expression appeared in.
//
// Caveat: For the degenerate types "func()", "interface{}", and
@@ -356,36 +394,39 @@ func (g *irgen) selector(obj types2.Object) *types.Sym {
// particular types is because go/types does *not* report it for
// them. So in practice this limitation is probably moot.
func (g *irgen) tpkg(typ types2.Type) *types.Pkg {
- anyObj := func() types2.Object {
- switch typ := typ.(type) {
- case *types2.Signature:
- if recv := typ.Recv(); recv != nil {
- return recv
- }
- if params := typ.Params(); params.Len() > 0 {
- return params.At(0)
- }
- if results := typ.Results(); results.Len() > 0 {
- return results.At(0)
- }
- case *types2.Struct:
- if typ.NumFields() > 0 {
- return typ.Field(0)
- }
- case *types2.Interface:
- if typ.NumExplicitMethods() > 0 {
- return typ.ExplicitMethod(0)
- }
- }
- return nil
- }
-
- if obj := anyObj(); obj != nil {
+ if obj := anyObj(typ); obj != nil {
return g.pkg(obj.Pkg())
}
return types.LocalPkg
}
+// anyObj returns some object accessible from typ, if any.
+func anyObj(typ types2.Type) types2.Object {
+ switch typ := typ.(type) {
+ case *types2.Signature:
+ if recv := typ.Recv(); recv != nil {
+ return recv
+ }
+ if params := typ.Params(); params.Len() > 0 {
+ return params.At(0)
+ }
+ if results := typ.Results(); results.Len() > 0 {
+ return results.At(0)
+ }
+ case *types2.Struct:
+ if typ.NumFields() > 0 {
+ return typ.Field(0)
+ }
+ case *types2.Interface:
+ if typ.NumExplicitMethods() > 0 {
+ return typ.ExplicitMethod(0)
+ }
+ case *types2.TypeParam:
+ return typ.Obj()
+ }
+ return nil
+}
+
func (g *irgen) basic(typ *types2.Basic) *types.Type {
switch typ.Name() {
case "byte":
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index e07294be0f..f16034ea70 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -321,13 +321,6 @@ func methods(t *types.Type) []*typeSig {
}
typecheck.CalcMethods(mt)
- // type stored in interface word
- it := t
-
- if !types.IsDirectIface(it) {
- it = types.NewPtr(t)
- }
-
// make list of methods for t,
// generating code if necessary.
var ms []*typeSig
@@ -355,8 +348,8 @@ func methods(t *types.Type) []*typeSig {
sig := &typeSig{
name: f.Sym,
- isym: methodWrapper(it, f),
- tsym: methodWrapper(t, f),
+ isym: methodWrapper(t, f, true),
+ tsym: methodWrapper(t, f, false),
type_: typecheck.NewMethodType(f.Type, t),
mtype: typecheck.NewMethodType(f.Type, nil),
}
@@ -394,7 +387,7 @@ func imethods(t *types.Type) []*typeSig {
// IfaceType.Method is not in the reflect data.
// Generate the method body, so that compiled
// code can refer to it.
- methodWrapper(t, f)
+ methodWrapper(t, f, false)
}
return methods
@@ -951,8 +944,12 @@ func writeType(t *types.Type) *obj.LSym {
}
if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
- // named types from other files are defined only by those files
- if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg {
+ // Named types from other files are defined only by those files.
+ // However, as an exception, we can write out instantiated types
+ // in the local package, even if they may be marked as part of
+ // another package (the package of their base generic type).
+ if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg &&
+ !tbase.IsFullyInstantiated() {
if i := typecheck.BaseTypeIndex(t); i >= 0 {
lsym.Pkg = tbase.Sym().Pkg.Prefix
lsym.SymIdx = int32(i)
@@ -1761,7 +1758,28 @@ func CollectPTabs() {
//
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
-func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
+//
+// Also wraps methods on instantiated generic types for use in itab entries.
+// For an instantiated generic type G[int], we generate wrappers like:
+// G[int] pointer shaped:
+// func (x G[int]) f(arg) {
+// .inst.G[int].f(dictionary, x, arg)
+// }
+// G[int] not pointer shaped:
+// func (x *G[int]) f(arg) {
+// .inst.G[int].f(dictionary, *x, arg)
+// }
+// These wrappers are always fully stenciled.
+func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
+ orig := rcvr
+ if forItab && !types.IsDirectIface(rcvr) {
+ rcvr = rcvr.PtrTo()
+ }
+ generic := false
+ if !rcvr.IsInterface() && len(rcvr.RParams()) > 0 || rcvr.IsPtr() && len(rcvr.Elem().RParams()) > 0 { // TODO: right detection?
+ // TODO: check that we do the right thing when rcvr.IsInterface().
+ generic = true
+ }
newnam := ir.MethodSym(rcvr, method.Sym)
lsym := newnam.Linksym()
if newnam.Siggen() {
@@ -1769,19 +1787,24 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
}
newnam.SetSiggen(true)
- if types.Identical(rcvr, method.Type.Recv().Type) {
+ if !generic && types.Identical(rcvr, method.Type.Recv().Type) {
return lsym
}
- // Only generate (*T).M wrappers for T.M in T's own package.
+ // Only generate (*T).M wrappers for T.M in T's own package, except for
+ // instantiated methods.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
+ rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg &&
+ !rcvr.Elem().IsFullyInstantiated() {
return lsym
}
// Only generate I.M wrappers for I in I's own package
- // but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
+ // but keep doing it for error.Error (was issue #29304)
+ // and methods of instantiated interfaces.
+ if rcvr.IsInterface() && rcvr != types.ErrorType &&
+ rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg &&
+ !rcvr.IsFullyInstantiated() {
return lsym
}
@@ -1802,9 +1825,10 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
+ indirect := rcvr.IsPtr() && rcvr.Elem() == methodrcvr
// generate nil pointer check for better error
- if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
+ if indirect {
// generating wrapper from *T to T.
n := ir.NewIfStmt(base.Pos, nil, nil, nil)
n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
@@ -1826,7 +1850,7 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
// Disable tailcall for RegabiArgs for now. The IR does not connect the
// arguments with the OTAILCALL node, and the arguments are not marshaled
// correctly.
- if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs {
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs && !generic {
// generate tail call: adjust pointer receiver and jump to embedded method.
left := dot.X // skip final .M
if !left.Type().IsPtr() {
@@ -1837,8 +1861,44 @@ func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
- call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
- call.Args = ir.ParamNames(tfn.Type())
+ var call *ir.CallExpr
+ if generic {
+ var args []ir.Node
+ var targs []*types.Type
+ if rcvr.IsPtr() { // TODO: correct condition?
+ targs = rcvr.Elem().RParams()
+ } else {
+ targs = rcvr.RParams()
+ }
+ if strings.HasPrefix(ir.MethodSym(orig, method.Sym).Name, ".inst.") {
+ fmt.Printf("%s\n", ir.MethodSym(orig, method.Sym).Name)
+ panic("multiple .inst.")
+ }
+ args = append(args, getDictionary(".inst."+ir.MethodSym(orig, method.Sym).Name, targs)) // TODO: remove .inst.
+ if indirect {
+ args = append(args, ir.NewStarExpr(base.Pos, nthis))
+ } else {
+ args = append(args, nthis)
+ }
+ args = append(args, ir.ParamNames(tfn.Type())...)
+
+ // TODO: Once we enter the gcshape world, we'll need a way to look up
+ // the stenciled implementation to use for this concrete type. Essentially,
+ // erase the concrete types and replace them with gc shape representatives.
+ sym := typecheck.MakeInstName(ir.MethodSym(methodrcvr, method.Sym), targs, true)
+ if sym.Def == nil {
+ // Currently we make sure that we have all the instantiations
+ // we need by generating them all in ../noder/stencil.go:instantiateMethods
+ // TODO: maybe there's a better, more incremental way to generate
+ // only the instantiations we need?
+ base.Fatalf("instantiation %s not found", sym.Name)
+ }
+ target := ir.AsNode(sym.Def)
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, target, args)
+ } else {
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ }
call.IsDDD = tfn.Type().IsVariadic()
if method.Type.NumResults() > 0 {
ret := ir.NewReturnStmt(base.Pos, nil)
@@ -1903,3 +1963,71 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
r.Add = InterfaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
+
+// getDictionaryForInstantiation returns the dictionary that should be used for invoking
+// the concrete instantiation described by inst.
+func GetDictionaryForInstantiation(inst *ir.InstExpr) ir.Node {
+ targs := typecheck.TypesOf(inst.Targs)
+ if meth, ok := inst.X.(*ir.SelectorExpr); ok {
+ return GetDictionaryForMethod(meth.Selection.Nname.(*ir.Name), targs)
+ }
+ return GetDictionaryForFunc(inst.X.(*ir.Name), targs)
+}
+
+func GetDictionaryForFunc(fn *ir.Name, targs []*types.Type) ir.Node {
+ return getDictionary(typecheck.MakeInstName(fn.Sym(), targs, false).Name, targs)
+}
+func GetDictionaryForMethod(meth *ir.Name, targs []*types.Type) ir.Node {
+ return getDictionary(typecheck.MakeInstName(meth.Sym(), targs, true).Name, targs)
+}
+
+// getDictionary returns the dictionary for the given named generic function
+// or method, with the given type arguments.
+// TODO: pass a reference to the generic function instead? We might need
+// that to look up protodictionaries.
+func getDictionary(name string, targs []*types.Type) ir.Node {
+ if len(targs) == 0 {
+ base.Fatalf("%s should have type arguments", name)
+ }
+
+ // The dictionary for this instantiation is named after the function
+ // and concrete types it is instantiated with.
+ // TODO: decouple this naming from the instantiation naming. The instantiation
+ // naming will be based on GC shapes, this naming must be fully stenciled.
+ if !strings.HasPrefix(name, ".inst.") {
+ base.Fatalf("%s should start in .inst.", name)
+ }
+ name = ".dict." + name[6:]
+
+ // Get a symbol representing the dictionary.
+ sym := typecheck.Lookup(name)
+
+ // Initialize the dictionary, if we haven't yet already.
+ if lsym := sym.Linksym(); len(lsym.P) == 0 {
+ off := 0
+ // Emit an entry for each concrete type.
+ for _, t := range targs {
+ s := TypeLinksym(t)
+ off = objw.SymPtr(lsym, off, s, 0)
+ }
+ // TODO: subdictionaries
+ objw.Global(lsym, int32(off), obj.DUPOK|obj.RODATA)
+ }
+
+ // Make a node referencing the dictionary symbol.
+ n := typecheck.NewName(sym)
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+
+ // Return the address of the dictionary.
+ np := typecheck.NodAddr(n)
+ // Note: treat dictionary pointers as uintptrs, so they aren't pointers
+ // with respect to GC. That saves on stack scanning work, write barriers, etc.
+ // We can get away with it because dictionaries are global variables.
+ // TODO: use a cast, or is typing directly ok?
+ np.SetType(types.Types[types.TUINTPTR])
+ np.SetTypecheck(1)
+ return np
+}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index a8393a1999..61c65f9e54 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -196,7 +196,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.floatParamRegs = paramFloatRegAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
- c.hasGReg = buildcfg.Experiment.RegabiG
+ c.hasGReg = true
case "386":
c.PtrSize = 4
c.RegSize = 4
@@ -228,6 +228,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.registers = registersARM64[:]
c.gpRegMask = gpRegMaskARM64
c.fpRegMask = fpRegMaskARM64
+ c.intParamRegs = paramIntRegARM64
+ c.floatParamRegs = paramFloatRegARM64
c.FPReg = framepointerRegARM64
c.LinkReg = linkRegARM64
c.hasGReg = true
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 4cd00732fc..45c0238317 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -460,7 +460,7 @@
(IsInBounds idx len) => (SETB (CMPQ idx len))
(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
(NilCheck ...) => (LoweredNilCheck ...)
-(GetG mem) && !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
+(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
(GetCallerSP ...) => (LoweredGetCallerSP ...)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index 62699f290c..530e48bcb2 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -2868,3 +2868,12 @@
&& isInlinableMemmove(dst, src, sz, config)
&& clobber(s1, s2, s3, call)
=> (Move [sz] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index 18a5666b40..5de0b5f020 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -482,9 +482,9 @@ func init() {
{name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
// function calls
- {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
- {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
- {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
@@ -759,15 +759,17 @@ func init() {
}
archs = append(archs, arch{
- name: "ARM64",
- pkg: "cmd/internal/obj/arm64",
- genfile: "../../arm64/ssa.go",
- ops: ops,
- blocks: blocks,
- regnames: regNamesARM64,
- gpregmask: gp,
- fpregmask: fp,
- framepointerreg: -1, // not used
- linkreg: int8(num["R30"]),
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ ParamIntRegNames: "R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15",
+ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R30"]),
})
}
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 1c37fbe0db..df15c2edda 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -20664,7 +20664,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLstatic",
auxType: auxCallOff,
- argLen: 1,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@@ -20674,7 +20674,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLclosure",
auxType: auxCallOff,
- argLen: 3,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@@ -20688,7 +20688,7 @@ var opcodeTable = [...]opInfo{
{
name: "CALLinter",
auxType: auxCallOff,
- argLen: 2,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
@@ -36400,8 +36400,8 @@ var registersARM64 = [...]Register{
{62, arm64.REG_F31, -1, "F31"},
{63, 0, -1, "SB"},
}
-var paramIntRegARM64 = []int8(nil)
-var paramFloatRegARM64 = []int8(nil)
+var paramIntRegARM64 = []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+var paramFloatRegARM64 = []int8{31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46}
var gpRegMaskARM64 = regMask(670826495)
var fpRegMaskARM64 = regMask(9223372034707292160)
var specialRegMaskARM64 = regMask(0)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 5045ba7351..89d32c0657 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -3,7 +3,6 @@
package ssa
-import "internal/buildcfg"
import "math"
import "cmd/internal/obj"
import "cmd/compile/internal/types"
@@ -29339,11 +29338,11 @@ func rewriteValueAMD64_OpFloor(v *Value) bool {
func rewriteValueAMD64_OpGetG(v *Value) bool {
v_0 := v.Args[0]
// match: (GetG mem)
- // cond: !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)
+ // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal
// result: (LoweredGetG mem)
for {
mem := v_0
- if !(!(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)) {
+ if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
break
}
v.reset(OpAMD64LoweredGetG)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 3cdc4d36cb..f7840c5503 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -25997,7 +25997,7 @@ func rewriteValueARM64_OpSelectN(v *Value) bool {
break
}
call := v_0
- if call.Op != OpARM64CALLstatic {
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 1 {
break
}
sym := auxToCall(call.Aux)
@@ -26031,6 +26031,34 @@ func rewriteValueARM64_OpSelectN(v *Value) bool {
v.AddArg3(dst, src, mem)
return true
}
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpARM64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
return false
}
func rewriteValueARM64_OpSlicemask(v *Value) bool {
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index 4e3e5e75e3..c5130b2ee5 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -220,7 +220,7 @@ func schedule(f *Func) {
// unless they are phi values (which must be first).
// OpArg also goes first -- if it is stack it register allocates
// to a LoadReg, if it is register it is from the beginning anyway.
- if c.Op == OpPhi || c.Op == OpArg {
+ if score[c.ID] == ScorePhi || score[c.ID] == ScoreArg {
continue
}
score[c.ID] = ScoreControl
diff --git a/src/cmd/compile/internal/ssagen/arch.go b/src/cmd/compile/internal/ssagen/arch.go
index 7215f42c05..957fb3e84a 100644
--- a/src/cmd/compile/internal/ssagen/arch.go
+++ b/src/cmd/compile/internal/ssagen/arch.go
@@ -42,10 +42,10 @@ type ArchInfo struct {
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *State, b, next *ssa.Block)
- // LoadRegResults emits instructions that loads register-assigned results
- // into registers. They are already in memory (PPARAMOUT nodes).
- // Used in open-coded defer return path.
- LoadRegResults func(s *State, f *ssa.Func)
+ // LoadRegResult emits instructions that loads register-assigned result
+ // at n+off (n is PPARAMOUT) to register reg. The result is already in
+ // memory. Used in open-coded defer return path.
+ LoadRegResult func(s *State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
// SpillArgReg emits instructions that spill reg to n+off.
SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
index 62567535d7..93157bfa11 100644
--- a/src/cmd/compile/internal/ssagen/pgen.go
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -114,7 +114,10 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
}
}
- sort.Sort(byStackVar(fn.Dcl))
+ // Use sort.Stable instead of sort.Sort so stack layout (and thus
+ // compiler output) is less sensitive to frontend changes that
+ // introduce or remove unused variables.
+ sort.Stable(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 004e084f72..0fbb39cfbb 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -324,66 +324,21 @@ func dvarint(x *obj.LSym, off int, v int64) int {
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
-// - Max total argument size among all the defers
// - Offset of the deferBits variable
// - Number of defers in the function
// - Information about each defer call, in reverse order of appearance in the function:
-// - Total argument size of the call
// - Offset of the closure value to call
-// - Number of arguments (including interface receiver or method receiver as first arg)
-// - Information about each argument
-// - Offset of the stored defer argument in this function's frame
-// - Size of the argument
-// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
-
- // Compute maxargsize (max size of arguments for all defers)
- // first, so we can output it first to the funcdata
- var maxargsize int64
- for i := len(s.openDefers) - 1; i >= 0; i-- {
- r := s.openDefers[i]
- argsize := r.n.X.Type().ArgWidth() // TODO register args: but maybe use of abi0 will make this easy
- if argsize > maxargsize {
- maxargsize = argsize
- }
- }
- off = dvarint(x, off, maxargsize)
off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.X.Type().ArgWidth())
off = dvarint(x, off, -r.closureNode.FrameOffset())
- numArgs := len(r.argNodes)
- if r.rcvrNode != nil {
- // If there's an interface receiver, treat/place it as the first
- // arg. (If there is a method receiver, it's already included as
- // first arg in r.argNodes.)
- numArgs++
- }
- off = dvarint(x, off, int64(numArgs))
- argAdjust := 0 // presence of receiver offsets the parameter count.
- if r.rcvrNode != nil {
- off = dvarint(x, off, -okOffset(r.rcvrNode.FrameOffset()))
- off = dvarint(x, off, s.config.PtrSize)
- off = dvarint(x, off, 0) // This is okay because defer records use ABI0 (for now)
- argAdjust++
- }
-
- // TODO(register args) assume abi0 for this?
- ab := s.f.ABI0
- pri := ab.ABIAnalyzeFuncType(r.n.X.Type().FuncType())
- for j, arg := range r.argNodes {
- f := getParam(r.n, j)
- off = dvarint(x, off, -okOffset(arg.FrameOffset()))
- off = dvarint(x, off, f.Type.Size())
- off = dvarint(x, off, okOffset(pri.InParam(j+argAdjust).FrameOffset(pri)))
- }
}
}
@@ -650,7 +605,6 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
// it mimics the behavior of the former ABI (everything stored) and because it's not 100%
// clear if naming conventions are respected in autogenerated code.
// TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
- // TODO non-amd64 architectures have link registers etc that may require adjustment here.
for _, p := range params.InParams() {
typs, offs := p.RegisterTypesAndOffsets()
for i, t := range typs {
@@ -865,16 +819,6 @@ type openDeferInfo struct {
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
closureNode *ir.Name
- // If defer call is interface call, the address of the argtmp where the
- // receiver is stored
- rcvr *ssa.Value
- // The node representing the argtmp where the receiver is stored
- rcvrNode *ir.Name
- // The addresses of the argtmps where the evaluated arguments of the defer
- // function call are stored.
- argVals []*ssa.Value
- // The nodes representing the argtmps where the args of the defer are stored
- argNodes []*ir.Name
}
type state struct {
@@ -4687,17 +4631,14 @@ func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
return args
}
-// openDeferRecord adds code to evaluate and store the args for an open-code defer
+// openDeferRecord adds code to evaluate and store the function for an open-code defer
// call, and records info about the defer, so we can generate proper code on the
// exit paths. n is the sub-node of the defer node that is the actual function
-// call. We will also record funcdata information on where the args are stored
+// call. We will also record funcdata information on where the function is stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
- var args []*ssa.Value
- var argNodes []*ir.Name
-
- if buildcfg.Experiment.RegabiDefer && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
s.Fatalf("defer call with arguments or results: %v", n)
}
@@ -4705,48 +4646,20 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
n: n,
}
fn := n.X
- if n.Op() == ir.OCALLFUNC {
- // We must always store the function value in a stack slot for the
- // runtime panic code to use. But in the defer exit code, we will
- // call the function directly if it is a static function.
- closureVal := s.expr(fn)
- closure := s.openDeferSave(nil, fn.Type(), closureVal)
- opendefer.closureNode = closure.Aux.(*ir.Name)
- if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
- opendefer.closure = closure
- }
- } else if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- } else {
- if fn.Op() != ir.ODOTINTER {
- base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
- }
- fn := fn.(*ir.SelectorExpr)
- closure, rcvr := s.getClosureAndRcvr(fn)
- opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
- // Important to get the receiver type correct, so it is recognized
- // as a pointer for GC purposes.
- opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
- }
- for _, argn := range n.Args {
- var v *ssa.Value
- if TypeOK(argn.Type()) {
- v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
- } else {
- v = s.openDeferSave(argn, argn.Type(), nil)
- }
- args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*ir.Name))
+ // We must always store the function value in a stack slot for the
+ // runtime panic code to use. But in the defer exit code, we will
+ // call the function directly if it is a static function.
+ closureVal := s.expr(fn)
+ closure := s.openDeferSave(fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(*ir.Name)
+ if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
+ opendefer.closure = closure
}
- opendefer.argVals = args
- opendefer.argNodes = argNodes
index := len(s.openDefers)
s.openDefers = append(s.openDefers, opendefer)
// Update deferBits only after evaluation and storage to stack of
- // args/receiver/interface is successful.
+ // the function is successful.
bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
s.vars[deferBitsVar] = newDeferBits
@@ -4755,57 +4668,47 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
// openDeferSave generates SSA nodes to store a value (with type t) for an
// open-coded defer at an explicit autotmp location on the stack, so it can be
-// reloaded and used for the appropriate call on exit. If type t is SSAable, then
-// val must be non-nil (and n should be nil) and val is the value to be stored. If
-// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
-// evaluated (via s.addr() below) to get the value that is to be stored. The
-// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
- canSSA := TypeOK(t)
- var pos src.XPos
- if canSSA {
- pos = val.Pos
- } else {
- pos = n.Pos()
+// reloaded and used for the appropriate call on exit. Type t must be a function type
+// (therefore SSAable). val is the value to be stored. The function returns an SSA
+// value representing a pointer to the autotmp location.
+func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
+ if !TypeOK(t) {
+ s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
+ }
+ if !t.HasPointers() {
+ s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
}
- argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.SetOpenDeferSlot(true)
- var addrArgTemp *ssa.Value
- // Use OpVarLive to make sure stack slots for the args, etc. are not
- // removed by dead-store elimination
+ pos := val.Pos
+ temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
+ temp.SetOpenDeferSlot(true)
+ var addrTemp *ssa.Value
+ // Use OpVarLive to make sure stack slot for the closure is not removed by
+ // dead-store elimination
if s.curBlock.ID != s.f.Entry.ID {
- // Force the argtmp storing this defer function/receiver/arg to be
- // declared in the entry block, so that it will be live for the
- // defer exit code (which will actually access it only if the
- // associated defer call has been activated).
- s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
- s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
- addrArgTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
+ // Force the tmp storing this defer function to be declared in the entry
+ // block, so that it will be live for the defer exit code (which will
+ // actually access it only if the associated defer call has been activated).
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
- addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
- }
- if t.HasPointers() {
- // Since we may use this argTemp during exit depending on the
- // deferBits, we must define it unconditionally on entry.
- // Therefore, we must make sure it is zeroed out in the entry
- // block if it contains pointers, else GC may wrongly follow an
- // uninitialized pointer value.
- argTemp.SetNeedzero(true)
- }
- if !canSSA {
- a := s.addr(n)
- s.move(t, addrArgTemp, a)
- return addrArgTemp
- }
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
+ addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
+ }
+ // Since we may use this temp during exit depending on the
+ // deferBits, we must define it unconditionally on entry.
+ // Therefore, we must make sure it is zeroed out in the entry
+ // block if it contains pointers, else GC may wrongly follow an
+ // uninitialized pointer value.
+ temp.SetNeedzero(true)
// We are storing to the stack, hence we can avoid the full checks in
// storeType() (no write barrier) and do a simple store().
- s.store(t, addrArgTemp, val)
- return addrArgTemp
+ s.store(t, addrTemp, val)
+ return addrTemp
}
// openDeferExit generates SSA for processing all the open coded defers at exit.
@@ -4849,45 +4752,26 @@ func (s *state) openDeferExit() {
s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
- // closure/receiver/args that were stored in argtmps at the point
- // of the defer statement.
+ // closure that were stored in argtmps at the point of the defer
+ // statement.
fn := r.n.X
stksize := fn.Type().ArgWidth()
- var ACArgs []*types.Type
- var ACResults []*types.Type
var callArgs []*ssa.Value
- if r.rcvr != nil {
- // rcvr in case of OCALLINTER
- v := s.load(r.rcvr.Type.Elem(), r.rcvr)
- ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
- callArgs = append(callArgs, v)
- }
- for j, argAddrVal := range r.argVals {
- f := getParam(r.n, j)
- ACArgs = append(ACArgs, f.Type)
- var a *ssa.Value
- if !TypeOK(f.Type) {
- a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
- } else {
- a = s.load(f.Type, argAddrVal)
- }
- callArgs = append(callArgs, a)
- }
var call *ssa.Value
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
- aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
- aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
call.AddArgs(callArgs...)
call.AuxInt = stksize
- s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
@@ -4895,16 +4779,6 @@ func (s *state) openDeferExit() {
if r.closureNode != nil {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
- if r.rcvrNode != nil {
- if r.rcvrNode.Type().HasPointers() {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
- }
- }
- for _, argNode := range r.argNodes {
- if argNode.Type().HasPointers() {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
- }
- }
s.endBlock()
s.startBlock(bEnd)
@@ -4952,7 +4826,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
}
- if buildcfg.Experiment.RegabiDefer && k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
@@ -5023,51 +4897,31 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
var call *ssa.Value
if k == callDeferStack {
// Make a defer struct d on the stack.
- t := deferstruct(stksize)
+ if stksize != 0 {
+ s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
+ }
+
+ t := deferstruct()
d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
- // Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
- // 0: siz
- s.store(types.Types[types.TUINT32],
- s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
- s.constInt32(types.Types[types.TUINT32], int32(stksize)))
- // 1: started, set in deferprocStack
- // 2: heap, set in deferprocStack
- // 3: openDefer
- // 4: sp, set in deferprocStack
- // 5: pc, set in deferprocStack
- // 6: fn
+ // Must match deferstruct() below and src/runtime/runtime2.go:_defer.
+ // 0: started, set in deferprocStack
+ // 1: heap, set in deferprocStack
+ // 2: openDefer
+ // 3: sp, set in deferprocStack
+ // 4: pc, set in deferprocStack
+ // 5: fn
s.store(closure.Type,
- s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
+ s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
closure)
- // 7: panic, set in deferprocStack
- // 8: link, set in deferprocStack
- // 9: framepc
- // 10: varp
- // 11: fd
-
- // Then, store all the arguments of the defer call.
- ft := fn.Type()
- off := t.FieldOff(12) // TODO register args: be sure this isn't a hardcoded param stack offset.
- args := n.Args
-
- // Set receiver (for interface calls). Always a pointer.
- if rcvr != nil {
- p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
- s.store(types.Types[types.TUINTPTR], p, rcvr)
- }
- // Set receiver (for method calls).
- if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- }
- // Set other args.
- for _, f := range ft.Params().Fields().Slice() {
- s.storeArgWithBase(args[0], f.Type, addr, off+abi.FieldOffsetOf(f))
- args = args[1:]
- }
+ // 6: panic, set in deferprocStack
+ // 7: link, set in deferprocStack
+ // 8: fd
+ // 9: varp
+ // 10: framepc
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
@@ -5075,27 +4929,18 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
- if stksize < int64(types.PtrSize) {
- // We need room for both the call to deferprocStack and the call to
- // the deferred function.
- // TODO(register args) Revisit this if/when we pass args in registers.
- stksize = int64(types.PtrSize)
- }
- call.AuxInt = stksize
+ call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
- // Write argsize and closure (args to newproc/deferproc).
- argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
- ACArgs = append(ACArgs, types.Types[types.TUINT32]) // not argExtra
- callArgs = append(callArgs, argsize)
- ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
+ // Write closure (arg to newproc/deferproc).
+ ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
callArgs = append(callArgs, closure)
- stksize += 2 * int64(types.PtrSize)
- argStart += 2 * int64(types.PtrSize)
+ stksize += int64(types.PtrSize)
+ argStart += int64(types.PtrSize)
}
// Set receiver (for interface calls).
@@ -6931,8 +6776,12 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// recovers a panic, it will return to caller with right results.
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
- if f.OwnAux.ABIInfo().OutRegistersUsed() != 0 {
- Arch.LoadRegResults(&s, f)
+ for _, o := range f.OwnAux.ABIInfo().OutParams() {
+ n := o.Name.(*ir.Name)
+ rts, offs := o.RegisterTypesAndOffsets()
+ for i := range o.Registers {
+ Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
+ }
}
pp.Prog(obj.ARET)
@@ -7686,9 +7535,8 @@ func max8(a, b int8) int8 {
return b
}
-// deferstruct makes a runtime._defer structure, with additional space for
-// stksize bytes of args.
-func deferstruct(stksize int64) *types.Type {
+// deferstruct makes a runtime._defer structure.
+func deferstruct() *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
@@ -7696,13 +7544,9 @@ func deferstruct(stksize int64) *types.Type {
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
- argtype := types.NewArray(types.Types[types.TUINT8], stksize)
- argtype.Width = stksize
- argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
- // cmd/compile/internal/gc/ssa.go:(*state).call.
+ // (*state).call above.
fields := []*types.Field{
- makefield("siz", types.Types[types.TUINT32]),
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
@@ -7714,10 +7558,9 @@ func deferstruct(stksize int64) *types.Type {
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
- makefield("framepc", types.Types[types.TUINTPTR]),
- makefield("varp", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
- makefield("args", argtype),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
}
// build struct holding the above fields
diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go
index 8936c4f5b4..0730d346b2 100644
--- a/src/cmd/compile/internal/staticdata/embed.go
+++ b/src/cmd/compile/internal/staticdata/embed.go
@@ -108,13 +108,6 @@ func WriteEmbed(v *ir.Name) {
// TODO(mdempsky): User errors should be reported by the frontend.
commentPos := (*v.Embed)[0].Pos
- if !types.AllowsGoVersion(types.LocalPkg, 1, 16) {
- prevPos := base.Pos
- base.Pos = commentPos
- base.ErrorfVers("go1.16", "go:embed")
- base.Pos = prevPos
- return
- }
if base.Flag.Cfg.Embed.Patterns == nil {
base.ErrorfAt(commentPos, "invalid go:embed: build system did not supply embed configuration")
return
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index 0c97b6de74..9329a46989 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -403,10 +403,10 @@ func (s *Schedule) initplan(n ir.Node) {
base.Fatalf("initplan structlit")
}
a := a.(*ir.StructKeyExpr)
- if a.Field.IsBlank() {
+ if a.Sym().IsBlank() {
continue
}
- s.addvalue(p, a.Offset, a.Value)
+ s.addvalue(p, a.Field.Offset, a.Value)
}
case ir.OMAPLIT:
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index e7b8840b33..503dea7fae 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -1100,7 +1100,7 @@ loop:
complit_ok = true
}
case *IndexExpr:
- if p.xnest >= 0 {
+ if p.xnest >= 0 && !isValue(t) {
// x is possibly a composite literal type
complit_ok = true
}
@@ -1127,6 +1127,21 @@ loop:
return x
}
+// isValue reports whether x syntactically must be a value (and not a type) expression.
+func isValue(x Expr) bool {
+ switch x := x.(type) {
+ case *BasicLit, *CompositeLit, *FuncLit, *SliceExpr, *AssertExpr, *TypeSwitchGuard, *CallExpr:
+ return true
+ case *Operation:
+ return x.Op != Mul || x.Y != nil // *T may be a type
+ case *ParenExpr:
+ return isValue(x.X)
+ case *IndexExpr:
+ return isValue(x.X) || isValue(x.Index)
+ }
+ return false
+}
+
// Element = Expression | LiteralValue .
func (p *parser) bare_complitexpr() Expr {
if trace {
@@ -1443,6 +1458,18 @@ func (p *parser) interfaceType() *InterfaceType {
}
return false
}
+
+ default:
+ if p.mode&AllowGenerics != 0 {
+ pos := p.pos()
+ if t := p.typeOrNil(); t != nil {
+ f := new(Field)
+ f.pos = pos
+ f.Type = t
+ typ.MethodList = append(typ.MethodList, p.embeddedElem(f))
+ return false
+ }
+ }
}
if p.mode&AllowGenerics != 0 {
diff --git a/src/cmd/compile/internal/syntax/testdata/interface.go2 b/src/cmd/compile/internal/syntax/testdata/interface.go2
index a817327a43..b399d75148 100644
--- a/src/cmd/compile/internal/syntax/testdata/interface.go2
+++ b/src/cmd/compile/internal/syntax/testdata/interface.go2
@@ -25,7 +25,6 @@ type _ interface {
~int | ~string
}
-
type _ interface {
m()
~int
@@ -34,3 +33,48 @@ type _ interface {
~int | ~string
type bool, int, float64
}
+
+type _ interface {
+ int
+ []byte
+ [10]int
+ struct{}
+ *int
+ func()
+ interface{}
+ map[string]int
+ chan T
+ chan<- T
+ <-chan T
+ T[int]
+}
+
+type _ interface {
+ int | string
+ []byte | string
+ [10]int | string
+ struct{} | string
+ *int | string
+ func() | string
+ interface{} | string
+ map[string]int | string
+ chan T | string
+ chan<- T | string
+ <-chan T | string
+ T[int] | string
+}
+
+type _ interface {
+ ~int | string
+ ~[]byte | string
+ ~[10]int | string
+ ~struct{} | string
+ ~*int | string
+ ~func() | string
+ ~interface{} | string
+ ~map[string]int | string
+ ~chan T | string
+ ~chan<- T | string
+ ~<-chan T | string
+ ~T[int] | string
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue46558.src b/src/cmd/compile/internal/syntax/testdata/issue46558.src
new file mode 100644
index 0000000000..a22b600825
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue46558.src
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func F(s string) {
+ switch s[0] {
+ case 'a':
+ case s[2] { // ERROR unexpected {
+ case 'b':
+ }
+ }
+} // ERROR non-declaration statement
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index 6f100033cf..bbdbe0c37c 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -42,13 +42,10 @@ func TestIntendedInlining(t *testing.T) {
"bucketMask",
"bucketShift",
"chanbuf",
- "deferArgs",
- "deferclass",
"evacuated",
"fastlog2",
"fastrand",
"float64bits",
- "funcPC",
"getArgInfoFast",
"getm",
"getMCache",
@@ -65,7 +62,6 @@ func TestIntendedInlining(t *testing.T) {
"subtract1",
"subtractb",
"tophash",
- "totaldefersize",
"(*bmap).keys",
"(*bmap).overflow",
"(*waitq).enqueue",
diff --git a/src/cmd/compile/internal/typecheck/crawler.go b/src/cmd/compile/internal/typecheck/crawler.go
new file mode 100644
index 0000000000..c78a604a8d
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/crawler.go
@@ -0,0 +1,179 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// crawlExports crawls the type/object graph rooted at the given list
+// of exported objects. Any functions that are found to be potentially
+// callable by importers are marked with ExportInline so that
+// iexport.go knows to re-export their inline body.
+func crawlExports(exports []*ir.Name) {
+ p := crawler{marked: make(map[*types.Type]bool)}
+ for _, n := range exports {
+ p.markObject(n)
+ }
+}
+
+type crawler struct {
+ marked map[*types.Type]bool // types already seen by markType
+}
+
+// markObject visits a reachable object.
+func (p *crawler) markObject(n *ir.Name) {
+ if n.Op() == ir.ONAME && n.Class == ir.PFUNC {
+ p.markInlBody(n)
+ }
+
+ p.markType(n.Type())
+}
+
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *crawler) markType(t *types.Type) {
+ if t.IsInstantiatedGeneric() {
+ // Re-instantiated types don't add anything new, so don't follow them.
+ return
+ }
+ if p.marked[t] {
+ return
+ }
+ p.marked[t] = true
+
+ // If this is a named type, mark all of its associated
+ // methods. Skip interface types because t.Methods contains
+ // only their unexpanded method set (i.e., exclusive of
+ // interface embeddings), and the switch statement below
+ // handles their full method set.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ if types.IsExported(m.Sym.Name) {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+ }
+
+ // Recursively mark any types that can be produced given a
+ // value of type t: dereferencing a pointer; indexing or
+ // iterating over an array, slice, or map; receiving from a
+ // channel; accessing a struct field or interface method; or
+ // calling a function.
+ //
+ // Notably, we don't mark function parameter types, because
+ // the user already needs some way to construct values of
+ // those types.
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
+ p.markType(t.Elem())
+
+ case types.TCHAN:
+ if t.ChanDir().CanRecv() {
+ p.markType(t.Elem())
+ }
+
+ case types.TMAP:
+ p.markType(t.Key())
+ p.markType(t.Elem())
+
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TFUNC:
+ for _, f := range t.Results().FieldSlice() {
+ p.markType(f.Type)
+ }
+
+ case types.TINTER:
+ // TODO(danscales) - will have to deal with the types in interface
+ // elements here when implemented in types2 and represented in types1.
+ for _, f := range t.AllMethods().Slice() {
+ if types.IsExported(f.Sym.Name) {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TTYPEPARAM:
+ // No other type that needs to be followed.
+ }
+}
+
+// markInlBody marks n's inline body for export and recursively
+// ensures all called functions are marked too.
+func (p *crawler) markInlBody(n *ir.Name) {
+ if n == nil {
+ return
+ }
+ if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+ base.Fatalf("markInlBody: unexpected %v, %v, %v", n, n.Op(), n.Class)
+ }
+ fn := n.Func
+ if fn == nil {
+ base.Fatalf("markInlBody: missing Func on %v", n)
+ }
+ if fn.Inl == nil {
+ return
+ }
+
+ if fn.ExportInline() {
+ return
+ }
+ fn.SetExportInline(true)
+
+ ImportedBody(fn)
+
+ var doFlood func(n ir.Node)
+ doFlood = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OMETHEXPR, ir.ODOTMETH:
+ p.markInlBody(ir.MethodExprName(n))
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ switch n.Class {
+ case ir.PFUNC:
+ p.markInlBody(n)
+ Export(n)
+ case ir.PEXTERN:
+ Export(n)
+ }
+ p.checkGenericType(n.Type())
+ case ir.OTYPE:
+ p.checkGenericType(n.Type())
+ case ir.OCALLPART:
+ // Okay, because we don't yet inline indirect
+ // calls to method values.
+ case ir.OCLOSURE:
+ // VisitList doesn't visit closure bodies, so force a
+ // recursive call to VisitList on the body of the closure.
+ ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+ }
+ }
+
+ // Recursively identify all referenced functions for
+ // reexport. We want to include even non-called functions,
+ // because after inlining they might be callable.
+ ir.VisitList(fn.Inl.Body, doFlood)
+}
+
+// checkGenerictype ensures that we call markType() on any base generic type that
+// is written to the export file (even if not explicitly marked
+// for export), so its methods will be available for inlining if needed.
+func (p *crawler) checkGenericType(t *types.Type) {
+ if t != nil && t.HasTParam() {
+ if t.OrigSym != nil {
+ // Convert to the base generic type.
+ t = t.OrigSym.Def.Type()
+ }
+ p.markType(t)
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
index f3058d8811..5f8b8b3d41 100644
--- a/src/cmd/compile/internal/typecheck/dcl.go
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -353,12 +353,10 @@ func funcargs(nt *ir.FuncType) {
}
// declare the out arguments.
- gen := len(nt.Params)
- for _, n := range nt.Results {
+ for i, n := range nt.Results {
if n.Sym == nil {
// Name so that escape analysis can track it. ~r stands for 'result'.
- n.Sym = LookupNum("~r", gen)
- gen++
+ n.Sym = LookupNum("~r", i)
}
if n.Sym.IsBlank() {
// Give it a name so we can assign to it during return. ~b stands for 'blank'.
@@ -367,8 +365,7 @@ func funcargs(nt *ir.FuncType) {
// func g() int
// f is allowed to use a plain 'return' with no arguments, while g is not.
// So the two cases must be distinguished.
- n.Sym = LookupNum("~b", gen)
- gen++
+ n.Sym = LookupNum("~b", i)
}
funcarg(n, ir.PPARAMOUT)
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index 24d141e8a2..d52f011072 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -311,14 +311,23 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
f := t.Field(i)
s := f.Sym
- if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
- base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+
+ // Do the test for assigning to unexported fields.
+ // But if this is an instantiated function, then
+ // the function has already been typechecked. In
+ // that case, don't do the test, since it can fail
+ // for the closure structs created in
+ // walkClosure(), because the instantiated
+ // function is compiled as if in the source
+ // package of the generic function.
+ if !(ir.CurFunc != nil && strings.Index(ir.CurFunc.Nname.Sym().Name, "[") >= 0) {
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ }
}
// No pushtype allowed here. Must name fields for that.
n1 = AssignConv(n1, f.Type, "field value")
- sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
- sk.Offset = f.Offset
- ls[i] = sk
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
}
if len(ls) < t.NumFields() {
base.Errorf("too few values in %v", n)
@@ -328,77 +337,33 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
// keyed list
ls := n.List
- for i, l := range ls {
- ir.SetPos(l)
-
- if l.Op() == ir.OKEY {
- kv := l.(*ir.KeyExpr)
- key := kv.Key
-
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
- s = Lookup(s.Name)
- }
-
- // An OXDOT uses the Sym field to hold
- // the field to the right of the dot,
- // so s will be non-nil, but an OXDOT
- // is never a valid struct literal key.
- if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
- base.Errorf("invalid field name %v in struct initializer", key)
- continue
- }
-
- l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
- ls[i] = l
- }
-
- if l.Op() != ir.OSTRUCTKEY {
- if !errored {
- base.Errorf("mixture of field:value and value initializers")
- errored = true
- }
- ls[i] = Expr(ls[i])
- continue
- }
- l := l.(*ir.StructKeyExpr)
-
- f := Lookdot1(nil, l.Field, t, t.Fields(), 0)
- if f == nil {
- if ci := Lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
- if visible(ci.Sym) {
- base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym)
- } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
- base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t)
- } else {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+ for i, n := range ls {
+ ir.SetPos(n)
+
+ sk, ok := n.(*ir.StructKeyExpr)
+ if !ok {
+ kv, ok := n.(*ir.KeyExpr)
+ if !ok {
+ if !errored {
+ base.Errorf("mixture of field:value and value initializers")
+ errored = true
}
+ ls[i] = Expr(n)
continue
}
- var f *types.Field
- p, _ := dotpath(l.Field, t, &f, true)
- if p == nil || f.IsMethod() {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+
+ sk = tcStructLitKey(t, kv)
+ if sk == nil {
continue
}
- // dotpath returns the parent embedded types in reverse order.
- var ep []string
- for ei := len(p) - 1; ei >= 0; ei-- {
- ep = append(ep, p[ei].field.Sym.Name)
- }
- ep = append(ep, l.Field.Name)
- base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
- continue
+
+ fielddup(sk.Sym().Name, hash)
}
- fielddup(f.Sym.Name, hash)
- l.Offset = f.Offset
// No pushtype allowed here. Tried and rejected.
- l.Value = Expr(l.Value)
- l.Value = AssignConv(l.Value, f.Type, "field value")
+ sk.Value = Expr(sk.Value)
+ sk.Value = AssignConv(sk.Value, sk.Field.Type, "field value")
+ ls[i] = sk
}
}
@@ -409,6 +374,60 @@ func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
return n
}
+// tcStructLitKey typechecks an OKEY node that appeared within a
+// struct literal.
+func tcStructLitKey(typ *types.Type, kv *ir.KeyExpr) *ir.StructKeyExpr {
+ key := kv.Key
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ sym := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
+ sym = Lookup(sym.Name)
+ }
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ if sym == nil || sym.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || sym.IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
+ return nil
+ }
+
+ if f := Lookdot1(nil, sym, typ, typ.Fields(), 0); f != nil {
+ return ir.NewStructKeyExpr(kv.Pos(), f, kv.Value)
+ }
+
+ if ci := Lookdot1(nil, sym, typ, typ.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if visible(ci.Sym) {
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", sym, typ, ci.Sym)
+ } else if nonexported(sym) && sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", sym, typ)
+ } else {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ }
+ return nil
+ }
+
+ var f *types.Field
+ p, _ := dotpath(sym, typ, &f, true)
+ if p == nil || f.IsMethod() {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ return nil
+ }
+
+ // dotpath returns the parent embedded types in reverse order.
+ var ep []string
+ for ei := len(p) - 1; ei >= 0; ei-- {
+ ep = append(ep, p[ei].field.Sym.Name)
+ }
+ ep = append(ep, sym.Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), typ)
+ return nil
+}
+
// tcConv typechecks an OCONV node.
func tcConv(n *ir.ConvExpr) ir.Node {
types.CheckSize(n.Type()) // ensure width is calculated for backend
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index a6dfbbf569..bd21977f26 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/src"
"fmt"
"go/constant"
@@ -15,21 +16,21 @@ import (
)
// package all the arguments that match a ... T parameter into a []T.
-func MakeDotArgs(typ *types.Type, args []ir.Node) ir.Node {
+func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
var n ir.Node
if len(args) == 0 {
- n = NodNil()
+ n = ir.NewNilExpr(pos)
n.SetType(typ)
} else {
- lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
- lit.List.Append(args...)
+ args = append([]ir.Node(nil), args...)
+ lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(typ), args)
lit.SetImplicit(true)
n = lit
}
n = Expr(n)
if n.Type() == nil {
- base.Fatalf("mkdotargslice: typecheck failed")
+ base.FatalfAt(pos, "mkdotargslice: typecheck failed")
}
return n
}
@@ -47,7 +48,7 @@ func FixVariadicCall(call *ir.CallExpr) {
args := call.Args
extra := args[vi:]
- slice := MakeDotArgs(vt, extra)
+ slice := MakeDotArgs(call.Pos(), vt, extra)
for i := range extra {
extra[i] = nil // allow GC
}
@@ -73,8 +74,25 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type {
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
+
+ // Make sure the .F field is in the same package as the rest of the
+ // fields. This deals with closures in instantiated functions, which are
+ // compiled as if from the source package of the generic function.
+ var pkg *types.Pkg
+ if len(clo.Func.ClosureVars) == 0 {
+ pkg = types.LocalPkg
+ } else {
+ for _, v := range clo.Func.ClosureVars {
+ if pkg == nil {
+ pkg = v.Sym().Pkg
+ } else if pkg != v.Sym().Pkg {
+ base.Fatalf("Closure variables from multiple packages")
+ }
+ }
+ }
+
fields := []*types.Field{
- types.NewField(base.Pos, Lookup(".F"), types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, pkg.Lookup(".F"), types.Types[types.TUINTPTR]),
}
for _, v := range clo.Func.ClosureVars {
typ := v.Type()
@@ -181,35 +199,6 @@ func fnpkg(fn *ir.Name) *types.Pkg {
return fn.Sym().Pkg
}
-// ClosureName generates a new unique name for a closure within
-// outerfunc.
-func ClosureName(outerfunc *ir.Func) *types.Sym {
- outer := "glob."
- prefix := "func"
- gen := &globClosgen
-
- if outerfunc != nil {
- if outerfunc.OClosure != nil {
- prefix = ""
- }
-
- outer = ir.FuncName(outerfunc)
-
- // There may be multiple functions named "_". In those
- // cases, we can't use their individual Closgens as it
- // would lead to name clashes.
- if !ir.IsBlank(outerfunc.Nname) {
- gen = &outerfunc.Closgen
- }
- }
-
- *gen++
- return Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
-}
-
-// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int32
-
// MethodValueWrapper returns the DCLFUNC node representing the
// wrapper function (*-fm) needed for the given method value. If the
// wrapper function hasn't already been created yet, it's created and
@@ -294,8 +283,20 @@ func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func {
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
-func tcClosure(clo *ir.ClosureExpr, top int) {
+func tcClosure(clo *ir.ClosureExpr, top int) ir.Node {
fn := clo.Func
+
+ // We used to allow IR builders to typecheck the underlying Func
+ // themselves, but that led to too much variety and inconsistency
+ // around who's responsible for naming the function, typechecking
+ // it, or adding it to Target.Decls.
+ //
+ // It's now all or nothing. Callers are still allowed to do these
+ // themselves, but then they assume responsibility for all of them.
+ if fn.Typecheck() == 1 {
+ base.FatalfAt(fn.Pos(), "underlying closure func already typechecked: %v", fn)
+ }
+
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
@@ -304,30 +305,14 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
fn.SetClosureCalled(top&ctxCallee != 0)
- // Do not typecheck fn twice, otherwise, we will end up pushing
- // fn to Target.Decls multiple times, causing InitLSym called twice.
- // See #30709
- if fn.Typecheck() == 1 {
- clo.SetType(fn.Type())
- return
- }
-
- // Don't give a name and add to Target.Decls if we are typechecking an inlined
- // body in ImportedBody(), since we only want to create the named function
- // when the closure is actually inlined (and then we force a typecheck
- // explicitly in (*inlsubst).node()).
- if !inTypeCheckInl {
- fn.Nname.SetSym(ClosureName(ir.CurFunc))
- ir.MarkFunc(fn.Nname)
- }
+ ir.NameClosure(clo, ir.CurFunc)
Func(fn)
- clo.SetType(fn.Type())
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to Target.Decls.
- if ir.CurFunc != nil && clo.Type() != nil {
+ if ir.CurFunc != nil {
oldfn := ir.CurFunc
ir.CurFunc = fn
Stmts(fn.Body)
@@ -353,14 +338,17 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
}
fn.ClosureVars = fn.ClosureVars[:out]
- if base.Flag.W > 1 {
- s := fmt.Sprintf("New closure func: %s", ir.FuncName(fn))
- ir.Dump(s, fn)
- }
- if !inTypeCheckInl {
- // Add function to Target.Decls once only when we give it a name
- Target.Decls = append(Target.Decls, fn)
+ clo.SetType(fn.Type())
+
+ target := Target
+ if inTypeCheckInl {
+ // We're typechecking an imported function, so it's not actually
+ // part of Target. Skip adding it to Target.Decls so we don't
+ // compile it again.
+ target = nil
}
+
+ return ir.UseClosure(clo, target)
}
// type check function definition
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index 64d68ef625..10d4bd6e7e 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -173,6 +173,8 @@
// }
//
//
+// TODO(danscales): fill in doc for 'type TypeParamType' and 'type InstType'
+//
// type Signature struct {
// Params []Param
// Results []Param
@@ -202,7 +204,6 @@
package typecheck
import (
- "bufio"
"bytes"
"crypto/md5"
"encoding/binary"
@@ -221,9 +222,17 @@ import (
)
// Current indexed export format version. Increase with each format change.
-// 1: added column details to Pos
// 0: Go1.11 encoding
-const iexportVersion = 1
+// 1: added column details to Pos
+// 2: added information for generic function/types (currently unstable)
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+
+ // Start of the unstable series of versions, remove "+ n" before release.
+ iexportVersionCurrent = iexportVersionGenerics + 1
+)
// predeclReserved is the number of type offsets reserved for types
// implicitly declared in the universe block.
@@ -244,6 +253,9 @@ const (
signatureType
structType
interfaceType
+ typeParamType
+ instType
+ unionType
)
const (
@@ -251,13 +263,22 @@ const (
magic = 0x6742937dc293105
)
-func WriteExports(out *bufio.Writer) {
+// WriteExports writes the indexed export format to out. If extensions
+// is true, then the compiler-only extensions are included.
+func WriteExports(out io.Writer, extensions bool) {
+ if extensions {
+ // If we're exporting inline bodies, invoke the crawler to mark
+ // which bodies to include.
+ crawlExports(Target.Exports)
+ }
+
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
declIndex: map[*types.Sym]uint64{},
inlineIndex: map[*types.Sym]uint64{},
typIndex: map[*types.Type]uint64{},
+ extensions: extensions,
}
for i, pt := range predeclared() {
@@ -293,7 +314,12 @@ func WriteExports(out *bufio.Writer) {
// Assemble header.
var hdr intWriter
hdr.WriteByte('i')
- hdr.uint64(iexportVersion)
+ if base.Flag.G > 0 {
+ hdr.uint64(iexportVersionCurrent)
+ } else {
+ // Use old export format if doing -G=0 (no generics)
+ hdr.uint64(iexportVersionPosCol)
+ }
hdr.uint64(uint64(p.strings.Len()))
hdr.uint64(dataLen)
@@ -379,6 +405,8 @@ type iexporter struct {
declIndex map[*types.Sym]uint64
inlineIndex map[*types.Sym]uint64
typIndex map[*types.Type]uint64
+
+ extensions bool
}
// stringOff returns the offset of s within the string section.
@@ -449,7 +477,9 @@ func (p *iexporter) doDecl(n *ir.Name) {
w.tag('V')
w.pos(n.Pos())
w.typ(n.Type())
- w.varExt(n)
+ if w.p.extensions {
+ w.varExt(n)
+ }
case ir.PFUNC:
if ir.IsMethod(n) {
@@ -459,8 +489,19 @@ func (p *iexporter) doDecl(n *ir.Name) {
// Function.
w.tag('F')
w.pos(n.Pos())
+ // The tparam list of the function type is the
+ // declaration of the type params. So, write out the type
+ // params right now. Then those type params will be
+ // referenced via their type offset (via typOff) in all
+ // other places in the signature and function that they
+ // are used.
+ if base.Flag.G > 0 {
+ w.tparamList(n.Type().TParams().FieldSlice())
+ }
w.signature(n.Type())
- w.funcExt(n)
+ if w.p.extensions {
+ w.funcExt(n)
+ }
default:
base.Fatalf("unexpected class: %v, %v", n, n.Class)
@@ -476,10 +517,26 @@ func (p *iexporter) doDecl(n *ir.Name) {
w.tag('C')
w.pos(n.Pos())
w.value(n.Type(), n.Val())
- w.constExt(n)
+ if w.p.extensions {
+ w.constExt(n)
+ }
case ir.OTYPE:
- if types.IsDotAlias(n.Sym()) {
+ if n.Type().IsTypeParam() && n.Type().Underlying() == n.Type() {
+ // Even though it has local scope, a typeparam requires a
+ // declaration via its package and unique name, because it
+ // may be referenced within its type bound during its own
+ // definition.
+ w.tag('P')
+ // A typeparam has a name, and has a type bound rather
+ // than an underlying type.
+ w.pos(n.Pos())
+ w.int64(int64(n.Type().Index()))
+ w.typ(n.Type().Bound())
+ break
+ }
+
+ if n.Alias() {
// Alias.
w.tag('A')
w.pos(n.Pos())
@@ -491,6 +548,11 @@ func (p *iexporter) doDecl(n *ir.Name) {
w.tag('T')
w.pos(n.Pos())
+ if base.Flag.G > 0 {
+ // Export type parameters, if any, needed for this type
+ w.typeList(n.Type().RParams())
+ }
+
underlying := n.Type().Underlying()
if underlying == types.ErrorType.Underlying() {
// For "type T error", use error as the
@@ -505,22 +567,29 @@ func (p *iexporter) doDecl(n *ir.Name) {
t := n.Type()
if t.IsInterface() {
- w.typeExt(t)
+ if w.p.extensions {
+ w.typeExt(t)
+ }
break
}
- ms := t.Methods()
- w.uint64(uint64(ms.Len()))
- for _, m := range ms.Slice() {
+ // Sort methods, for consistency with types2.
+ methods := append([]*types.Field(nil), t.Methods().Slice()...)
+ sort.Sort(types.MethodsByName(methods))
+
+ w.uint64(uint64(len(methods)))
+ for _, m := range methods {
w.pos(m.Pos)
w.selector(m.Sym)
w.param(m.Type.Recv())
w.signature(m.Type)
}
- w.typeExt(t)
- for _, m := range ms.Slice() {
- w.methExt(m)
+ if w.p.extensions {
+ w.typeExt(t)
+ for _, m := range methods {
+ w.methExt(m)
+ }
}
default:
@@ -803,8 +872,46 @@ func (w *exportWriter) startType(k itag) {
}
func (w *exportWriter) doTyp(t *types.Type) {
- if t.Sym() != nil {
- if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == ir.Pkgs.Unsafe {
+ s := t.Sym()
+ if s != nil && t.OrigSym != nil {
+ assert(base.Flag.G > 0)
+ // This is an instantiated type - could be a re-instantiation like
+ // Value[T2] or a full instantiation like Value[int].
+ if strings.Index(s.Name, "[") < 0 {
+ base.Fatalf("incorrect name for instantiated type")
+ }
+ w.startType(instType)
+ w.pos(t.Pos())
+ // Export the type arguments for the instantiated type. The
+ // instantiated type could be in a method header (e.g. "func (v
+ // *Value[T2]) set (...) { ... }"), so the type args are "new"
+ // typeparams. Or the instantiated type could be in a
+ // function/method body, so the type args are either concrete
+ // types or existing typeparams from the function/method header.
+ w.typeList(t.RParams())
+ // Export a reference to the base type.
+ baseType := t.OrigSym.Def.(*ir.Name).Type()
+ w.typ(baseType)
+ return
+ }
+
+ // The 't.Underlying() == t' check is to confirm this is a base typeparam
+ // type, rather than a defined type with typeparam underlying type, like:
+ // type orderedAbs[T any] T
+ if t.IsTypeParam() && t.Underlying() == t {
+ assert(base.Flag.G > 0)
+ if s.Pkg == types.BuiltinPkg || s.Pkg == ir.Pkgs.Unsafe {
+ base.Fatalf("builtin type missing from typIndex: %v", t)
+ }
+ // Write out the first use of a type param as a qualified ident.
+ // This will force a "declaration" of the type param.
+ w.startType(typeParamType)
+ w.qualifiedIdent(t.Obj().(*ir.Name))
+ return
+ }
+
+ if s != nil {
+ if s.Pkg == types.BuiltinPkg || s.Pkg == ir.Pkgs.Unsafe {
base.Fatalf("builtin type missing from typIndex: %v", t)
}
@@ -865,6 +972,12 @@ func (w *exportWriter) doTyp(t *types.Type) {
}
}
+ // Sort methods and embedded types, for consistency with types2.
+ // Note: embedded types may be anonymous, and types2 sorts them
+ // with sort.Stable too.
+ sort.Sort(types.MethodsByName(methods))
+ sort.Stable(types.EmbeddedsByName(embeddeds))
+
w.startType(interfaceType)
w.setPkg(t.Pkg(), true)
@@ -881,6 +994,19 @@ func (w *exportWriter) doTyp(t *types.Type) {
w.signature(f.Type)
}
+ case types.TUNION:
+ assert(base.Flag.G > 0)
+ // TODO(danscales): possibly put out the tilde bools in more
+ // compact form.
+ w.startType(unionType)
+ nt := t.NumTerms()
+ w.uint64(uint64(nt))
+ for i := 0; i < nt; i++ {
+ t, b := t.Term(i)
+ w.typ(t)
+ w.bool(b)
+ }
+
default:
base.Fatalf("unexpected type: %v", t)
}
@@ -906,6 +1032,23 @@ func (w *exportWriter) signature(t *types.Type) {
}
}
+func (w *exportWriter) typeList(ts []*types.Type) {
+ w.uint64(uint64(len(ts)))
+ for _, rparam := range ts {
+ w.typ(rparam)
+ }
+}
+
+func (w *exportWriter) tparamList(fs []*types.Field) {
+ w.uint64(uint64(len(fs)))
+ for _, f := range fs {
+ if !f.Type.IsTypeParam() {
+ base.Fatalf("unexpected non-typeparam")
+ }
+ w.typ(f.Type)
+ }
+}
+
func (w *exportWriter) paramList(fs []*types.Field) {
w.uint64(uint64(len(fs)))
for _, f := range fs {
@@ -948,26 +1091,50 @@ func constTypeOf(typ *types.Type) constant.Kind {
}
func (w *exportWriter) value(typ *types.Type, v constant.Value) {
- ir.AssertValidTypeForConst(typ, v)
w.typ(typ)
+ var kind constant.Kind
+ var valType *types.Type
+
+ if typ.IsTypeParam() {
+ // A constant will have a TYPEPARAM type if it appears in a place
+ // where it must match that typeparam type (e.g. in a binary
+ // operation with a variable of that typeparam type). If so, then
+ // we must write out its actual constant kind as well, so its
+ // constant val can be read in properly during import.
+ kind = v.Kind()
+ w.int64(int64(kind))
+
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ ir.AssertValidTypeForConst(typ, v)
+ kind = constTypeOf(typ)
+ valType = typ
+ }
- // Each type has only one admissible constant representation,
- // so we could type switch directly on v.U here. However,
- // switching on the type increases symmetry with import logic
- // and provides a useful consistency check.
+ // Each type has only one admissible constant representation, so we could
+ // type switch directly on v.Kind() here. However, switching on the type
+ // (in the non-typeparam case) increases symmetry with import logic and
+ // provides a useful consistency check.
- switch constTypeOf(typ) {
+ switch kind {
case constant.Bool:
w.bool(constant.BoolVal(v))
case constant.String:
w.string(constant.StringVal(v))
case constant.Int:
- w.mpint(v, typ)
+ w.mpint(v, valType)
case constant.Float:
- w.mpfloat(v, typ)
+ w.mpfloat(v, valType)
case constant.Complex:
- w.mpfloat(constant.Real(v), typ)
- w.mpfloat(constant.Imag(v), typ)
+ w.mpfloat(constant.Real(v), valType)
+ w.mpfloat(constant.Imag(v), valType)
}
}
@@ -1185,10 +1352,14 @@ func (w *exportWriter) funcExt(n *ir.Name) {
}
}
- // Inline body.
+ // Write out inline body or body of a generic function/method.
+ if n.Type().HasTParam() && n.Func.Body != nil && n.Func.Inl == nil {
+ base.FatalfAt(n.Pos(), "generic function is not marked inlineable")
+ }
if n.Func.Inl != nil {
w.uint64(1 + uint64(n.Func.Inl.Cost))
- if n.Func.ExportInline() {
+ w.bool(n.Func.Inl.CanDelayResults)
+ if n.Func.ExportInline() || n.Type().HasTParam() {
w.p.doInline(n)
}
@@ -1432,7 +1603,12 @@ func (w *exportWriter) commList(cases []*ir.CommClause) {
w.uint64(uint64(len(cases)))
for _, cas := range cases {
w.pos(cas.Pos())
- w.node(cas.Comm)
+ defaultCase := cas.Comm == nil
+ w.bool(defaultCase)
+ if !defaultCase {
+ // Only call w.node for non-default cause (cas.Comm is non-nil)
+ w.node(cas.Comm)
+ }
w.stmtList(cas.Body)
}
}
@@ -1469,7 +1645,11 @@ func (w *exportWriter) expr(n ir.Node) {
case ir.OLITERAL:
w.op(ir.OLITERAL)
- w.pos(n.Pos())
+ if ir.HasUniquePos(n) {
+ w.pos(n.Pos())
+ } else {
+ w.pos(src.NoXPos)
+ }
w.value(n.Type(), n.Val())
case ir.ONAME:
@@ -1488,6 +1668,16 @@ func (w *exportWriter) expr(n ir.Node) {
// We don't need a type here, as the type will be provided at the
// declaration of n.
w.op(ir.ONAME)
+
+ // This handles the case where we haven't yet transformed a call
+ // to a builtin, so we must write out the builtin as a name in the
+ // builtin package.
+ isBuiltin := n.BuiltinOp != ir.OXXX
+ w.bool(isBuiltin)
+ if isBuiltin {
+ w.string(n.Sym().Name)
+ break
+ }
w.localName(n)
// case OPACK, ONONAME:
@@ -1588,9 +1778,8 @@ func (w *exportWriter) expr(n ir.Node) {
case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
n := n.(*ir.SelectorExpr)
if go117ExportTypes {
- if n.Op() == ir.OXDOT {
- base.Fatalf("shouldn't encounter XDOT in new exporter")
- }
+ // For go117ExportTypes, we usually see all ops except
+ // OXDOT, but we can see OXDOT for generic functions.
w.op(n.Op())
} else {
w.op(ir.OXDOT)
@@ -1604,7 +1793,8 @@ func (w *exportWriter) expr(n ir.Node) {
w.exoticField(n.Selection)
}
// n.Selection is not required for OMETHEXPR, ODOTMETH, and OCALLPART. It will
- // be reconstructed during import.
+ // be reconstructed during import. n.Selection is computed during
+ // transformDot() for OXDOT.
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
@@ -1629,7 +1819,7 @@ func (w *exportWriter) expr(n ir.Node) {
w.expr(n.X)
w.expr(n.Index)
if go117ExportTypes {
- w.typ(n.Type())
+ w.exoticType(n.Type())
if n.Op() == ir.OINDEXMAP {
w.bool(n.Assigned)
}
@@ -1759,6 +1949,14 @@ func (w *exportWriter) expr(n ir.Node) {
w.op(ir.OEND)
}
+ case ir.OLINKSYMOFFSET:
+ n := n.(*ir.LinksymOffsetExpr)
+ w.op(ir.OLINKSYMOFFSET)
+ w.pos(n.Pos())
+ w.string(n.Linksym.Name)
+ w.uint64(uint64(n.Offset_))
+ w.typ(n.Type())
+
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
n := n.(*ir.UnaryExpr)
@@ -1829,6 +2027,26 @@ func (w *exportWriter) expr(n ir.Node) {
// if exporting, DCLCONST should just be removed as its usage
// has already been replaced with literals
+ case ir.OFUNCINST:
+ n := n.(*ir.InstExpr)
+ w.op(ir.OFUNCINST)
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.uint64(uint64(len(n.Targs)))
+ for _, targ := range n.Targs {
+ w.typ(targ.Type())
+ }
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ w.op(ir.OSELRECV2)
+ w.pos(n.Pos())
+ w.exprList(n.Lhs)
+ w.exprList(n.Rhs)
+
default:
base.Fatalf("cannot export %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
@@ -1864,11 +2082,8 @@ func (w *exportWriter) fieldList(list ir.Nodes) {
for _, n := range list {
n := n.(*ir.StructKeyExpr)
w.pos(n.Pos())
- w.selector(n.Field)
+ w.exoticField(n.Field)
w.expr(n.Value)
- if go117ExportTypes {
- w.uint64(uint64(n.Offset))
- }
}
}
@@ -1903,7 +2118,7 @@ func (w *exportWriter) localIdent(s *types.Sym) {
}
// TODO(mdempsky): Fix autotmp hack.
- if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
+ if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") && !strings.HasPrefix(name, ".dict") { // TODO: just use autotmp names for dictionaries?
base.Fatalf("unexpected dot in identifier: %v", name)
}
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
index a5ddbb5a74..81f8ea05d9 100644
--- a/src/cmd/compile/internal/typecheck/iimport.go
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -8,10 +8,10 @@
package typecheck
import (
+ "bytes"
"encoding/binary"
"fmt"
"go/constant"
- "io"
"math/big"
"os"
"strings"
@@ -19,8 +19,6 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
- "cmd/internal/bio"
- "cmd/internal/goobj"
"cmd/internal/obj"
"cmd/internal/src"
)
@@ -94,7 +92,7 @@ func importReaderFor(sym *types.Sym, importers map[*types.Sym]iimporterAndOffset
}
type intReader struct {
- *bio.Reader
+ *strings.Reader
pkg *types.Pkg
}
@@ -116,33 +114,34 @@ func (r *intReader) uint64() uint64 {
return i
}
-func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
- ird := &intReader{in, pkg}
+func ReadImports(pkg *types.Pkg, data string) {
+ ird := &intReader{strings.NewReader(data), pkg}
version := ird.uint64()
- if version != iexportVersion {
- base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ switch version {
+ case iexportVersionCurrent, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGenerics {
+ base.Errorf("import %q: unstable export format version %d, just recompile", pkg.Path, version)
+ } else {
+ base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ }
base.ErrorExit()
}
- sLen := ird.uint64()
- dLen := ird.uint64()
-
- // Map string (and data) section into memory as a single large
- // string. This reduces heap fragmentation and allows
- // returning individual substrings very efficiently.
- data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
- if err != nil {
- base.Errorf("import %q: mapping input: %v", pkg.Path, err)
- base.ErrorExit()
- }
- stringData := data[:sLen]
- declData := data[sLen:]
+ sLen := int64(ird.uint64())
+ dLen := int64(ird.uint64())
- in.MustSeek(int64(sLen+dLen), os.SEEK_CUR)
+ // TODO(mdempsky): Replace os.SEEK_CUR with io.SeekCurrent after
+ // #44505 is fixed.
+ whence, _ := ird.Seek(0, os.SEEK_CUR)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ ird.Seek(sLen+dLen, os.SEEK_CUR)
p := &iimporter{
- ipkg: pkg,
+ exportVersion: version,
+ ipkg: pkg,
pkgCache: map[uint64]*types.Pkg{},
posBaseCache: map[uint64]*src.PosBase{},
@@ -200,18 +199,11 @@ func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintT
}
}
}
-
- // Fingerprint.
- _, err = io.ReadFull(in, fingerprint[:])
- if err != nil {
- base.Errorf("import %s: error reading fingerprint", pkg.Path)
- base.ErrorExit()
- }
- return fingerprint
}
type iimporter struct {
- ipkg *types.Pkg
+ exportVersion uint64
+ ipkg *types.Pkg
pkgCache map[uint64]*types.Pkg
posBaseCache map[uint64]*src.PosBase
@@ -313,26 +305,42 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
return n
case 'F':
- typ := r.signature(nil)
+ var tparams []*types.Field
+ if r.p.exportVersion >= iexportVersionGenerics {
+ tparams = r.tparamList()
+ }
+ typ := r.signature(nil, tparams)
n := importfunc(r.p.ipkg, pos, sym, typ)
r.funcExt(n)
return n
case 'T':
+ var rparams []*types.Type
+ if r.p.exportVersion >= iexportVersionGenerics {
+ rparams = r.typeList()
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
n := importtype(r.p.ipkg, pos, sym)
t := n.Type()
+ if rparams != nil {
+ t.SetRParams(rparams)
+ }
// We also need to defer width calculations until
// after the underlying type has been assigned.
types.DeferCheckSize()
+ deferDoInst()
underlying := r.typ()
t.SetUnderlying(underlying)
- types.ResumeCheckSize()
if underlying.IsInterface() {
+ // Finish up all type instantiations and CheckSize calls
+ // now that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
r.typeExt(t)
return n
}
@@ -342,7 +350,7 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
mpos := r.pos()
msym := r.selector()
recv := r.param()
- mtyp := r.signature(recv)
+ mtyp := r.signature(recv, nil)
// MethodSym already marked m.Sym as a function.
m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym))
@@ -358,12 +366,38 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
}
t.Methods().Set(ms)
+ // Finish up all instantiations and CheckSize calls now
+ // that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
+
r.typeExt(t)
for _, m := range ms {
r.methExt(m)
}
return n
+ case 'P':
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.(*ir.Name)
+ }
+ index := int(r.int64())
+ t := types.NewTypeParam(sym, index)
+ // Nname needed to save the pos.
+ nname := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(t)
+ t.SetNod(nname)
+
+ t.SetBound(r.typ())
+ return nname
+
case 'V':
typ := r.typ()
@@ -378,19 +412,39 @@ func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
}
func (p *importReader) value(typ *types.Type) constant.Value {
- switch constTypeOf(typ) {
+ var kind constant.Kind
+ var valType *types.Type
+
+ if typ.IsTypeParam() {
+ // If a constant had a typeparam type, then we wrote out its
+ // actual constant kind as well.
+ kind = constant.Kind(p.int64())
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ kind = constTypeOf(typ)
+ valType = typ
+ }
+
+ switch kind {
case constant.Bool:
return constant.MakeBool(p.bool())
case constant.String:
return constant.MakeString(p.string())
case constant.Int:
var i big.Int
- p.mpint(&i, typ)
+ p.mpint(&i, valType)
return constant.Make(&i)
case constant.Float:
- return p.float(typ)
+ return p.float(valType)
case constant.Complex:
- return makeComplex(p.float(typ), p.float(typ))
+ return makeComplex(p.float(valType), p.float(valType))
}
base.Fatalf("unexpected value type: %v", typ)
@@ -503,7 +557,12 @@ func (r *importReader) pos() src.XPos {
}
func (r *importReader) typ() *types.Type {
- return r.p.typAt(r.uint64())
+ // If this is a top-level type call, defer type instantiations until the
+ // type is fully constructed.
+ deferDoInst()
+ t := r.p.typAt(r.uint64())
+ resumeDoInst()
+ return t
}
func (r *importReader) exoticType() *types.Type {
@@ -641,7 +700,13 @@ func (p *iimporter) typAt(off uint64) *types.Type {
// are pushed to compile queue, then draining from the queue for compiling.
// During this process, the size calculation is disabled, so it is not safe for
// calculating size during SSA generation anymore. See issue #44732.
- types.CheckSize(t)
+ //
+ // No need to calc sizes for re-instantiated generic types, and
+ // they are not necessarily resolved until the top-level type is
+ // defined (because of recursive types).
+ if t.OrigSym == nil || !t.HasTParam() {
+ types.CheckSize(t)
+ }
p.typCache[off] = t
}
return t
@@ -680,7 +745,7 @@ func (r *importReader) typ1() *types.Type {
case signatureType:
r.setPkg()
- return r.signature(nil)
+ return r.signature(nil, nil)
case structType:
r.setPkg()
@@ -718,16 +783,64 @@ func (r *importReader) typ1() *types.Type {
for i := range methods {
pos := r.pos()
sym := r.selector()
- typ := r.signature(fakeRecvField())
+ typ := r.signature(fakeRecvField(), nil)
methods[i] = types.NewField(pos, sym, typ)
}
+ if len(embeddeds)+len(methods) == 0 {
+ return types.Types[types.TINTER]
+ }
+
t := types.NewInterface(r.currPkg, append(embeddeds, methods...))
// Ensure we expand the interface in the frontend (#25055).
types.CheckSize(t)
return t
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ // Similar to code for defined types, since we "declared"
+ // typeparams to deal with recursion (typeparam is used within its
+ // own type bound).
+ ident := r.qualifiedIdent()
+ if ident.Sym().Def != nil {
+ return ident.Sym().Def.(*ir.Name).Type()
+ }
+ n := expandDecl(ident)
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
+ }
+ return n.Type()
+
+ case instType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ pos := r.pos()
+ len := r.uint64()
+ targs := make([]*types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ t := Instantiate(pos, baseType, targs)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ nt := int(r.uint64())
+ terms := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range terms {
+ terms[i] = r.typ()
+ tildes[i] = r.bool()
+ }
+ return types.NewUnion(terms, tildes)
}
}
@@ -735,13 +848,38 @@ func (r *importReader) kind() itag {
return itag(r.uint64())
}
-func (r *importReader) signature(recv *types.Field) *types.Type {
+func (r *importReader) signature(recv *types.Field, tparams []*types.Field) *types.Type {
params := r.paramList()
results := r.paramList()
if n := len(params); n > 0 {
params[n-1].SetIsDDD(r.bool())
}
- return types.NewSignature(r.currPkg, recv, nil, params, results)
+ return types.NewSignature(r.currPkg, recv, tparams, params, results)
+}
+
+func (r *importReader) typeList() []*types.Type {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ ts := make([]*types.Type, n)
+ for i := range ts {
+ ts[i] = r.typ()
+ }
+ return ts
+}
+
+func (r *importReader) tparamList() []*types.Field {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ fs := make([]*types.Field, n)
+ for i := range fs {
+ typ := r.typ()
+ fs[i] = types.NewField(typ.Pos(), typ.Sym(), typ)
+ }
+ return fs
}
func (r *importReader) paramList() []*types.Field {
@@ -809,7 +947,9 @@ func (r *importReader) funcExt(n *ir.Name) {
n.Func.ABI = obj.ABI(r.uint64())
- n.SetPragma(ir.PragmaFlag(r.uint64()))
+ // Make sure //go:noinline pragma is imported (so stenciled functions have
+ // same noinline status as the corresponding generic function.)
+ n.Func.Pragma = ir.PragmaFlag(r.uint64())
// Escape analysis.
for _, fs := range &types.RecvsParams {
@@ -821,7 +961,8 @@ func (r *importReader) funcExt(n *ir.Name) {
// Inline body.
if u := r.uint64(); u > 0 {
n.Func.Inl = &ir.Inline{
- Cost: int32(u - 1),
+ Cost: int32(u - 1),
+ CanDelayResults: r.bool(),
}
n.Func.Endlineno = r.pos()
}
@@ -852,7 +993,13 @@ func (r *importReader) symIdx(s *types.Sym) {
func (r *importReader) typeExt(t *types.Type) {
t.SetNotInHeap(r.bool())
- i, pi := r.int64(), r.int64()
+ SetBaseTypeIndex(t, r.int64(), r.int64())
+}
+
+func SetBaseTypeIndex(t *types.Type, i, pi int64) {
+ if t.Obj() == nil {
+ base.Fatalf("SetBaseTypeIndex on non-defined type %v", t)
+ }
if i != -1 && pi != -1 {
typeSymIdx[t] = [2]int64{i, pi}
}
@@ -860,6 +1007,7 @@ func (r *importReader) typeExt(t *types.Type) {
// Map imported type T to the index of type descriptor symbols of T and *T,
// so we can use index to reference the symbol.
+// TODO(mdempsky): Store this information directly in the Type's Name.
var typeSymIdx = make(map[*types.Type][2]int64)
func BaseTypeIndex(t *types.Type) int64 {
@@ -936,6 +1084,10 @@ func (r *importReader) funcBody(fn *ir.Func) {
fn.Inl.Body = body
r.curfn = outerfn
+ if base.Flag.W >= 3 {
+ fmt.Printf("Imported for %v", fn)
+ ir.DumpList("", fn.Inl.Body)
+ }
}
func (r *importReader) readNames(fn *ir.Func) []*ir.Name {
@@ -1032,7 +1184,13 @@ func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause {
func (r *importReader) commList() []*ir.CommClause {
cases := make([]*ir.CommClause, r.uint64())
for i := range cases {
- cases[i] = ir.NewCommStmt(r.pos(), r.node(), r.stmtList())
+ pos := r.pos()
+ defaultCase := r.bool()
+ var comm ir.Node
+ if !defaultCase {
+ comm = r.node()
+ }
+ cases[i] = ir.NewCommStmt(pos, comm, r.stmtList())
}
return cases
}
@@ -1095,6 +1253,10 @@ func (r *importReader) node() ir.Node {
return n
case ir.ONAME:
+ isBuiltin := r.bool()
+ if isBuiltin {
+ return types.BuiltinPkg.Lookup(r.string()).Def.(*ir.Name)
+ }
return r.localName()
// case OPACK, ONONAME:
@@ -1117,16 +1279,11 @@ func (r *importReader) node() ir.Node {
case ir.OCLOSURE:
//println("Importing CLOSURE")
pos := r.pos()
- typ := r.signature(nil)
+ typ := r.signature(nil, nil)
// All the remaining code below is similar to (*noder).funcLit(), but
// with Dcls and ClosureVars lists already set up
- fn := ir.NewFunc(pos)
- fn.SetIsHiddenClosure(true)
- fn.Nname = ir.NewNameAt(pos, ir.BlankNode.Sym())
- fn.Nname.Func = fn
- fn.Nname.Ntype = ir.TypeNode(typ)
- fn.Nname.Defn = fn
+ fn := ir.NewClosureFunc(pos, true)
fn.Nname.SetType(typ)
cvars := make([]*ir.Name, r.int64())
@@ -1159,12 +1316,10 @@ func (r *importReader) node() ir.Node {
ir.FinishCaptureNames(pos, r.curfn, fn)
- clo := ir.NewClosureExpr(pos, fn)
- fn.OClosure = clo
+ clo := fn.OClosure
if go117ExportTypes {
clo.SetType(typ)
}
-
return clo
case ir.OSTRUCTLIT:
@@ -1202,35 +1357,32 @@ func (r *importReader) node() ir.Node {
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- case ir.OXDOT:
- // see parser.new_dotname
- if go117ExportTypes {
- base.Fatalf("shouldn't encounter XDOT in new importer")
- }
- return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.exoticSelector())
-
- case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
- if !go117ExportTypes {
- // unreachable - mapped to case OXDOT by exporter
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+ // For !go117ExportTypes, we should only see OXDOT.
+ // For go117ExportTypes, we usually see all the other ops, but can see
+ // OXDOT for generic functions.
+ if op != ir.OXDOT && !go117ExportTypes {
goto error
}
pos := r.pos()
expr := r.expr()
sel := r.exoticSelector()
n := ir.NewSelectorExpr(pos, op, expr, sel)
- n.SetType(r.exoticType())
- switch op {
- case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER:
- n.Selection = r.exoticField()
- case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
- // These require a Lookup to link to the correct declaration.
- rcvrType := expr.Type()
- typ := n.Type()
- n.Selection = Lookdot(n, rcvrType, 1)
- if op == ir.OCALLPART || op == ir.OMETHEXPR {
- // Lookdot clobbers the opcode and type, undo that.
- n.SetOp(op)
- n.SetType(typ)
+ if go117ExportTypes {
+ n.SetType(r.exoticType())
+ switch op {
+ case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER:
+ n.Selection = r.exoticField()
+ case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+ // These require a Lookup to link to the correct declaration.
+ rcvrType := expr.Type()
+ typ := n.Type()
+ n.Selection = Lookdot(n, rcvrType, 1)
+ if op == ir.OCALLPART || op == ir.OMETHEXPR {
+ // Lookdot clobbers the opcode and type, undo that.
+ n.SetOp(op)
+ n.SetType(typ)
+ }
}
}
return n
@@ -1247,7 +1399,7 @@ func (r *importReader) node() ir.Node {
n := ir.NewIndexExpr(r.pos(), r.expr(), r.expr())
if go117ExportTypes {
n.SetOp(op)
- n.SetType(r.typ())
+ n.SetType(r.exoticType())
if op == ir.OINDEXMAP {
n.Assigned = r.bool()
}
@@ -1343,6 +1495,13 @@ func (r *importReader) node() ir.Node {
n.Args.Append(r.exprList()...)
return n
+ case ir.OLINKSYMOFFSET:
+ pos := r.pos()
+ name := r.string()
+ off := r.uint64()
+ typ := r.typ()
+ return ir.NewLinksymOffsetExpr(pos, Lookup(name).Linksym(), int64(off), typ)
+
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
n := ir.NewUnaryExpr(r.pos(), op, r.expr())
@@ -1496,6 +1655,26 @@ func (r *importReader) node() ir.Node {
case ir.OEND:
return nil
+ case ir.OFUNCINST:
+ pos := r.pos()
+ x := r.expr()
+ ntargs := r.uint64()
+ var targs []ir.Node
+ if ntargs > 0 {
+ targs = make([]ir.Node, ntargs)
+ for i := range targs {
+ targs[i] = ir.TypeNode(r.typ())
+ }
+ }
+ n := ir.NewInstExpr(pos, ir.OFUNCINST, x, targs)
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OSELRECV2:
+ return ir.NewAssignListStmt(r.pos(), ir.OSELRECV2, r.exprList(), r.exprList())
+
default:
base.Fatalf("cannot import %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", op, int(op))
@@ -1517,11 +1696,7 @@ func (r *importReader) op() ir.Op {
func (r *importReader) fieldList() []ir.Node {
list := make([]ir.Node, r.uint64())
for i := range list {
- x := ir.NewStructKeyExpr(r.pos(), r.selector(), r.expr())
- if go117ExportTypes {
- x.Offset = int64(r.uint64())
- }
- list[i] = x
+ list[i] = ir.NewStructKeyExpr(r.pos(), r.exoticField(), r.expr())
}
return list
}
@@ -1544,3 +1719,134 @@ func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr {
}
return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}
+
+// InstTypeName creates a name for an instantiated type, based on the name of the
+// generic type and the type args.
+func InstTypeName(name string, targs []*types.Type) string {
+ b := bytes.NewBufferString(name)
+ b.WriteByte('[')
+ for i, targ := range targs {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ // WriteString() does not include the package name for the local
+ // package, but we want it to make sure type arguments (including
+ // type params) are uniquely specified.
+ if targ.Sym() != nil && targ.Sym().Pkg == types.LocalPkg {
+ b.WriteString(targ.Sym().Pkg.Name)
+ b.WriteByte('.')
+ }
+ b.WriteString(targ.String())
+ }
+ b.WriteByte(']')
+ return b.String()
+}
+
+// NewIncompleteNamedType returns a TFORW type t with name specified by sym, such
+// that t.nod and sym.Def are set correctly.
+func NewIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type {
+ name := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ forw := types.NewNamed(name)
+ name.SetType(forw)
+ sym.Def = name
+ return forw
+}
+
+// Instantiate creates a new named type which is the instantiation of the base
+// named generic type, with the specified type args.
+func Instantiate(pos src.XPos, baseType *types.Type, targs []*types.Type) *types.Type {
+ baseSym := baseType.Sym()
+ if strings.Index(baseSym.Name, "[") >= 0 {
+ base.Fatalf("arg to Instantiate is not a base generic type")
+ }
+ name := InstTypeName(baseSym.Name, targs)
+ instSym := baseSym.Pkg.Lookup(name)
+ if instSym.Def != nil {
+ // May match existing type from previous import or
+ // types2-to-types1 conversion, or from in-progress instantiation
+ // in the current type import stack.
+ return instSym.Def.Type()
+ }
+
+ t := NewIncompleteNamedType(baseType.Pos(), instSym)
+ t.SetRParams(targs)
+ t.OrigSym = baseSym
+
+ // baseType may still be TFORW or its methods may not be fully filled in
+ // (since we are in the middle of importing it). So, delay call to
+ // substInstType until we get back up to the top of the current top-most
+ // type import.
+ deferredInstStack = append(deferredInstStack, t)
+
+ return t
+}
+
+var deferredInstStack []*types.Type
+var deferInst int
+
+// deferDoInst defers substitution on instantiated types until we are at the
+// top-most defined type, so the base types are fully defined.
+func deferDoInst() {
+ deferInst++
+}
+
+func resumeDoInst() {
+ if deferInst == 1 {
+ for len(deferredInstStack) > 0 {
+ t := deferredInstStack[0]
+ deferredInstStack = deferredInstStack[1:]
+ substInstType(t, t.OrigSym.Def.(*ir.Name).Type(), t.RParams())
+ }
+ }
+ deferInst--
+}
+
+// doInst creates a new instantiation type (which will be added to
+// deferredInstStack for completion later) for an incomplete type encountered
+// during a type substitution for an instantiation. This is needed for
+// instantiations of mutually recursive types.
+func doInst(t *types.Type) *types.Type {
+ return Instantiate(t.Pos(), t.OrigSym.Def.(*ir.Name).Type(), t.RParams())
+}
+
+// substInstType completes the instantiation of a generic type by doing a
+// substitution on the underlying type itself and any methods. t is the
+// instantiation being created, baseType is the base generic type, and targs are
+// the type arguments that baseType is being instantiated with.
+func substInstType(t *types.Type, baseType *types.Type, targs []*types.Type) {
+ subst := Tsubster{
+ Tparams: baseType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t.SetUnderlying(subst.Typ(baseType.Underlying()))
+
+ newfields := make([]*types.Field, baseType.Methods().Len())
+ for i, f := range baseType.Methods().Slice() {
+ recvType := f.Type.Recv().Type
+ if recvType.IsPtr() {
+ recvType = recvType.Elem()
+ }
+ // Substitute in the method using the type params used in the
+ // method (not the type params in the definition of the generic type).
+ subst := Tsubster{
+ Tparams: recvType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t2 := subst.Typ(f.Type)
+ oldsym := f.Nname.Sym()
+ newsym := MakeInstName(oldsym, targs, true)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ t.Methods().Set(newfields)
+}
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index 9ee7a94b1f..0e306eaea8 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -5,6 +5,7 @@
package typecheck
import (
+ "bytes"
"fmt"
"sort"
"strconv"
@@ -722,13 +723,23 @@ func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field,
return m, followptr
}
+// implements reports whether t implements the interface iface. t can be
+// an interface, a type parameter, or a concrete type. If implements returns
+// false, it stores a method of iface that is not implemented in *m. If the
+// method name matches but the type is wrong, it additionally stores the type
+// of the method (on t) in *samename.
func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
t0 := t
if t == nil {
return false
}
- if t.IsInterface() {
+ if t.IsInterface() || t.IsTypeParam() {
+ if t.IsTypeParam() {
+ // A typeparam satisfies an interface if its type bound
+ // has all the methods of that interface.
+ t = t.Bound()
+ }
i := 0
tms := t.AllMethods().Slice()
for _, im := range iface.AllMethods().Slice() {
@@ -874,3 +885,403 @@ var slist []symlink
type symlink struct {
field *types.Field
}
+
+// TypesOf converts a list of nodes to a list
+// of types of those nodes.
+func TypesOf(x []ir.Node) []*types.Type {
+ r := make([]*types.Type, len(x))
+ for i, n := range x {
+ r[i] = n.Type()
+ }
+ return r
+}
+
+// MakeInstName makes the unique name for a stenciled generic function or method,
+// based on the name of the function fnsym and the targs. It replaces any
+// existing bracket type list in the name. makeInstName asserts that fnsym has
+// brackets in its name if and only if hasBrackets is true.
+//
+// Names of declared generic functions have no brackets originally, so hasBrackets
+// should be false. Names of generic methods already have brackets, since the new
+// type parameter is specified in the generic type of the receiver (e.g. func
+// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set.
+//
+// The standard naming is something like: 'genFn[int,bool]' for functions and
+// '(*genType[int,bool]).methodName' for methods
+func MakeInstName(fnsym *types.Sym, targs []*types.Type, hasBrackets bool) *types.Sym {
+ b := bytes.NewBufferString("")
+
+ // Determine if the type args are concrete types or new typeparams.
+ hasTParam := false
+ for _, targ := range targs {
+ if hasTParam {
+ assert(targ.HasTParam())
+ } else if targ.HasTParam() {
+ hasTParam = true
+ }
+ }
+
+ // Marker to distinguish generic instantiations from fully stenciled wrapper functions.
+ // Once we move to GC shape implementations, this prefix will not be necessary as the
+ // GC shape naming will distinguish them.
+ // e.g. f[8bytenonpointer] vs. f[int].
+ // For now, we use .inst.f[int] vs. f[int].
+ if !hasTParam {
+ b.WriteString(".inst.")
+ }
+
+ name := fnsym.Name
+ i := strings.Index(name, "[")
+ assert(hasBrackets == (i >= 0))
+ if i >= 0 {
+ b.WriteString(name[0:i])
+ } else {
+ b.WriteString(name)
+ }
+ b.WriteString("[")
+ for i, targ := range targs {
+ if i > 0 {
+ b.WriteString(",")
+ }
+ // WriteString() does not include the package name for the local
+ // package, but we want it for uniqueness.
+ if targ.Sym() != nil && targ.Sym().Pkg == types.LocalPkg {
+ b.WriteString(targ.Sym().Pkg.Name)
+ b.WriteByte('.')
+ }
+ b.WriteString(targ.String())
+ }
+ b.WriteString("]")
+ if i >= 0 {
+ i2 := strings.LastIndex(name[i:], "]")
+ assert(i2 >= 0)
+ b.WriteString(name[i+i2+1:])
+ }
+ if strings.HasPrefix(b.String(), ".inst..inst.") {
+ panic(fmt.Sprintf("multiple .inst. prefix in %s", b.String()))
+ }
+ return fnsym.Pkg.Lookup(b.String())
+}
+
+func assert(p bool) {
+ if !p {
+ panic("assertion failed")
+ }
+}
+
+// General type substituter, for replacing typeparams with type args.
+type Tsubster struct {
+ Tparams []*types.Type
+ Targs []*types.Type
+ // If non-nil, the substitution map from name nodes in the generic function to the
+ // name nodes in the new stenciled function.
+ Vars map[*ir.Name]*ir.Name
+ // New fully-instantiated generic types whose methods should be instantiated.
+ InstTypeList []*types.Type
+ // If non-nil, function to substitute an incomplete (TFORW) type.
+ SubstForwFunc func(*types.Type) *types.Type
+}
+
+// Typ computes the type obtained by substituting any type parameter in t with the
+// corresponding type argument in subst. If t contains no type parameters, the
+// result is t; otherwise the result is a new type. It deals with recursive types
+// by using TFORW types and finding partially or fully created types via sym.Def.
+func (ts *Tsubster) Typ(t *types.Type) *types.Type {
+ if !t.HasTParam() && t.Kind() != types.TFUNC {
+ // Note: function types need to be copied regardless, as the
+ // types of closures may contain declarations that need
+ // to be copied. See #45738.
+ return t
+ }
+
+ if t.IsTypeParam() {
+ for i, tp := range ts.Tparams {
+ if tp == t {
+ return ts.Targs[i]
+ }
+ }
+ // If t is a simple typeparam T, then t has the name/symbol 'T'
+ // and t.Underlying() == t.
+ //
+ // However, consider the type definition: 'type P[T any] T'. We
+ // might use this definition so we can have a variant of type T
+ // that we can add new methods to. Suppose t is a reference to
+ // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM,
+ // because P[T] is defined as T. If we look at t.Underlying(), it
+ // is different, because the name of t.Underlying() is 'T' rather
+ // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM.
+ // In this case, we do the needed recursive substitution in the
+ // case statement below.
+ if t.Underlying() == t {
+ // t is a simple typeparam that didn't match anything in tparam
+ return t
+ }
+ // t is a more complex typeparam (e.g. P[T], as above, whose
+ // definition is just T).
+ assert(t.Sym() != nil)
+ }
+
+ var newsym *types.Sym
+ var neededTargs []*types.Type
+ var forw *types.Type
+
+ if t.Sym() != nil {
+ // Translate the type params for this type according to
+ // the tparam/targs mapping from subst.
+ neededTargs = make([]*types.Type, len(t.RParams()))
+ for i, rparam := range t.RParams() {
+ neededTargs[i] = ts.Typ(rparam)
+ }
+ // For a named (defined) type, we have to change the name of the
+ // type as well. We do this first, so we can look up if we've
+ // already seen this type during this substitution or other
+ // definitions/substitutions.
+ genName := genericTypeName(t.Sym())
+ newsym = t.Sym().Pkg.Lookup(InstTypeName(genName, neededTargs))
+ if newsym.Def != nil {
+ // We've already created this instantiated defined type.
+ return newsym.Def.Type()
+ }
+
+ // In order to deal with recursive generic types, create a TFORW
+ // type initially and set the Def field of its sym, so it can be
+ // found if this type appears recursively within the type.
+ forw = NewIncompleteNamedType(t.Pos(), newsym)
+ //println("Creating new type by sub", newsym.Name, forw.HasTParam())
+ forw.SetRParams(neededTargs)
+ // Copy the OrigSym from the re-instantiated type (which is the sym of
+ // the base generic type).
+ assert(t.OrigSym != nil)
+ forw.OrigSym = t.OrigSym
+ }
+
+ var newt *types.Type
+
+ switch t.Kind() {
+ case types.TTYPEPARAM:
+ if t.Sym() == newsym {
+ // The substitution did not change the type.
+ return t
+ }
+ // Substitute the underlying typeparam (e.g. T in P[T], see
+ // the example describing type P[T] above).
+ newt = ts.Typ(t.Underlying())
+ assert(newt != t)
+
+ case types.TARRAY:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewArray(newelem, t.NumElem())
+ }
+
+ case types.TPTR:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewPtr(newelem)
+ }
+
+ case types.TSLICE:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewSlice(newelem)
+ }
+
+ case types.TSTRUCT:
+ newt = ts.tstruct(t, false)
+ if newt == t {
+ newt = nil
+ }
+
+ case types.TFUNC:
+ newrecvs := ts.tstruct(t.Recvs(), false)
+ newparams := ts.tstruct(t.Params(), false)
+ newresults := ts.tstruct(t.Results(), false)
+ if newrecvs != t.Recvs() || newparams != t.Params() || newresults != t.Results() {
+ // If any types have changed, then the all the fields of
+ // of recv, params, and results must be copied, because they have
+ // offset fields that are dependent, and so must have an
+ // independent copy for each new signature.
+ var newrecv *types.Field
+ if newrecvs.NumFields() > 0 {
+ if newrecvs == t.Recvs() {
+ newrecvs = ts.tstruct(t.Recvs(), true)
+ }
+ newrecv = newrecvs.Field(0)
+ }
+ if newparams == t.Params() {
+ newparams = ts.tstruct(t.Params(), true)
+ }
+ if newresults == t.Results() {
+ newresults = ts.tstruct(t.Results(), true)
+ }
+ newt = types.NewSignature(t.Pkg(), newrecv, t.TParams().FieldSlice(), newparams.FieldSlice(), newresults.FieldSlice())
+ }
+
+ case types.TINTER:
+ newt = ts.tinter(t)
+ if newt == t {
+ newt = nil
+ }
+
+ case types.TMAP:
+ newkey := ts.Typ(t.Key())
+ newval := ts.Typ(t.Elem())
+ if newkey != t.Key() || newval != t.Elem() {
+ newt = types.NewMap(newkey, newval)
+ }
+
+ case types.TCHAN:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewChan(newelem, t.ChanDir())
+ if !newt.HasTParam() {
+ // TODO(danscales): not sure why I have to do this
+ // only for channels.....
+ types.CheckSize(newt)
+ }
+ }
+ case types.TFORW:
+ if ts.SubstForwFunc != nil {
+ newt = ts.SubstForwFunc(t)
+ } else {
+ assert(false)
+ }
+ }
+ if newt == nil {
+ // Even though there were typeparams in the type, there may be no
+ // change if this is a function type for a function call (which will
+ // have its own tparams/targs in the function instantiation).
+ return t
+ }
+
+ if t.Sym() == nil {
+ // Not a named type, so there was no forwarding type and there are
+ // no methods to substitute.
+ assert(t.Methods().Len() == 0)
+ return newt
+ }
+
+ forw.SetUnderlying(newt)
+ newt = forw
+
+ if t.Kind() != types.TINTER && t.Methods().Len() > 0 {
+ // Fill in the method info for the new type.
+ var newfields []*types.Field
+ newfields = make([]*types.Field, t.Methods().Len())
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.Typ(f.Type)
+ oldsym := f.Nname.Sym()
+ newsym := MakeInstName(oldsym, ts.Targs, true)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ newt.Methods().Set(newfields)
+ if !newt.HasTParam() {
+ // Generate all the methods for a new fully-instantiated type.
+ ts.InstTypeList = append(ts.InstTypeList, newt)
+ }
+ }
+ return newt
+}
+
+// tstruct substitutes type params in types of the fields of a structure type. For
+// each field, tstruct copies the Nname, and translates it if Nname is in
+// ts.vars. To always force the creation of a new (top-level) struct,
+// regardless of whether anything changed with the types or names of the struct's
+// fields, set force to true.
+func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
+ if t.NumFields() == 0 {
+ if t.HasTParam() {
+ // For an empty struct, we need to return a new type,
+ // since it may now be fully instantiated (HasTParam
+ // becomes false).
+ return types.NewStruct(t.Pkg(), nil)
+ }
+ return t
+ }
+ var newfields []*types.Field
+ if force {
+ newfields = make([]*types.Field, t.NumFields())
+ }
+ for i, f := range t.Fields().Slice() {
+ t2 := ts.Typ(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.NumFields())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Field(j)
+ }
+ }
+ if newfields != nil {
+ // TODO(danscales): make sure this works for the field
+ // names of embedded types (which should keep the name of
+ // the type param, not the instantiated type).
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ if f.Nname != nil && ts.Vars != nil {
+ v := ts.Vars[f.Nname.(*ir.Name)]
+ if v != nil {
+ // This is the case where we are
+ // translating the type of the function we
+ // are substituting, so its dcls are in
+ // the subst.ts.vars table, and we want to
+ // change to reference the new dcl.
+ newfields[i].Nname = v
+ } else {
+ // This is the case where we are
+ // translating the type of a function
+ // reference inside the function we are
+ // substituting, so we leave the Nname
+ // value as is.
+ newfields[i].Nname = f.Nname
+ }
+ }
+ }
+ }
+ if newfields != nil {
+ return types.NewStruct(t.Pkg(), newfields)
+ }
+ return t
+
+}
+
+// tinter substitutes type params in types of the methods of an interface type.
+func (ts *Tsubster) tinter(t *types.Type) *types.Type {
+ if t.Methods().Len() == 0 {
+ return t
+ }
+ var newfields []*types.Field
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.Typ(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.Methods().Len())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Methods().Index(j)
+ }
+ }
+ if newfields != nil {
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ }
+ }
+ if newfields != nil {
+ return types.NewInterface(t.Pkg(), newfields)
+ }
+ return t
+}
+
+// genericSym returns the name of the base generic type for the type named by
+// sym. It simply returns the name obtained by removing everything after the
+// first bracket ("[").
+func genericTypeName(sym *types.Sym) string {
+ return sym.Name[0:strings.Index(sym.Name, "[")]
+}
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index bf52941b2c..8454b8d5b3 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -787,11 +787,7 @@ func typecheck1(n ir.Node, top int) ir.Node {
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
- tcClosure(n, top)
- if n.Type() == nil {
- return n
- }
- return n
+ return tcClosure(n, top)
case ir.OITAB:
n := n.(*ir.UnaryExpr)
@@ -1906,11 +1902,6 @@ func typecheckdef(n *ir.Name) {
n.SetDiag(true)
goto ret
}
- // For package-level type aliases, set n.Sym.Def so we can identify
- // it as a type alias during export. See also #31959.
- if n.Curfn == nil {
- n.Sym().Def = n.Ntype
- }
}
break
}
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index b538ea8054..b4d1f6c8bb 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -319,31 +319,34 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
// Unless the 'L' flag was specified, if the type has a name, just print that name.
if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] {
- switch mode {
- case fmtTypeID, fmtTypeIDName:
- if verb == 'S' {
- if t.Vargen != 0 {
- sconv2(b, t.Sym(), 'S', mode)
- fmt.Fprintf(b, "·%d", t.Vargen)
- return
- }
- sconv2(b, t.Sym(), 'S', mode)
- return
- }
+ // Default to 'v' if verb is invalid.
+ if verb != 'S' {
+ verb = 'v'
+ }
- if mode == fmtTypeIDName {
- sconv2(b, t.Sym(), 'v', fmtTypeIDName)
- return
+ // In unified IR, function-scope defined types will have a ·N
+ // suffix embedded directly in their Name. Trim this off for
+ // non-fmtTypeID modes.
+ sym := t.Sym()
+ if mode != fmtTypeID {
+ i := len(sym.Name)
+ for i > 0 && sym.Name[i-1] >= '0' && sym.Name[i-1] <= '9' {
+ i--
}
-
- if t.Sym().Pkg == LocalPkg && t.Vargen != 0 {
- sconv2(b, t.Sym(), 'v', mode)
- fmt.Fprintf(b, "·%d", t.Vargen)
- return
+ const dot = "·"
+ if i >= len(dot) && sym.Name[i-len(dot):i] == dot {
+ sym = &Sym{Pkg: sym.Pkg, Name: sym.Name[:i-len(dot)]}
}
}
-
- sconv2(b, t.Sym(), 'v', mode)
+ sconv2(b, sym, verb, mode)
+
+ // TODO(mdempsky): Investigate including Vargen in fmtTypeIDName
+ // output too. It seems like it should, but that mode is currently
+ // used in string representation used by reflection, which is
+ // user-visible and doesn't expect this.
+ if mode == fmtTypeID && t.Vargen != 0 {
+ fmt.Fprintf(b, "·%d", t.Vargen)
+ }
return
}
diff --git a/src/cmd/compile/internal/types/kind_string.go b/src/cmd/compile/internal/types/kind_string.go
index ae24a58b92..3e6a8bc064 100644
--- a/src/cmd/compile/internal/types/kind_string.go
+++ b/src/cmd/compile/internal/types/kind_string.go
@@ -38,20 +38,21 @@ func _() {
_ = x[TSTRING-27]
_ = x[TUNSAFEPTR-28]
_ = x[TTYPEPARAM-29]
- _ = x[TIDEAL-30]
- _ = x[TNIL-31]
- _ = x[TBLANK-32]
- _ = x[TFUNCARGS-33]
- _ = x[TCHANARGS-34]
- _ = x[TSSA-35]
- _ = x[TTUPLE-36]
- _ = x[TRESULTS-37]
- _ = x[NTYPE-38]
+ _ = x[TUNION-30]
+ _ = x[TIDEAL-31]
+ _ = x[TNIL-32]
+ _ = x[TBLANK-33]
+ _ = x[TFUNCARGS-34]
+ _ = x[TCHANARGS-35]
+ _ = x[TSSA-36]
+ _ = x[TTUPLE-37]
+ _ = x[TRESULTS-38]
+ _ = x[NTYPE-39]
}
-const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRTYPEPARAMIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
+const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRTYPEPARAMUNIONIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
-var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 162, 167, 170, 175, 183, 191, 194, 199, 206, 211}
+var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 162, 167, 172, 175, 180, 188, 196, 199, 204, 211, 216}
func (i Kind) String() string {
if i >= Kind(len(_Kind_index)-1) {
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
index a6d2e2007b..f63a357f0d 100644
--- a/src/cmd/compile/internal/types/pkg.go
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -137,7 +137,3 @@ func CleanroomDo(f func()) {
f()
pkgMap = saved
}
-
-func IsDotAlias(sym *Sym) bool {
- return sym.Def != nil && sym.Def.Sym() != sym
-}
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index f0e695ab96..f5a74f83b3 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -90,6 +90,26 @@ func expandiface(t *Type) {
methods = append(methods, m)
}
+ {
+ methods := t.Methods().Slice()
+ sort.SliceStable(methods, func(i, j int) bool {
+ mi, mj := methods[i], methods[j]
+
+ // Sort embedded types by type name (if any).
+ if mi.Sym == nil && mj.Sym == nil {
+ return mi.Type.Sym().Less(mj.Type.Sym())
+ }
+
+ // Sort methods before embedded types.
+ if mi.Sym == nil || mj.Sym == nil {
+ return mi.Sym != nil
+ }
+
+ // Sort methods by symbol name.
+ return mi.Sym.Less(mj.Sym)
+ })
+ }
+
for _, m := range t.Methods().Slice() {
if m.Sym == nil {
continue
@@ -104,8 +124,14 @@ func expandiface(t *Type) {
continue
}
+ if m.Type.IsUnion() {
+ continue
+ }
+
+ // Once we go to 1.18, then embedded types can be anything, but
+ // for now, just interfaces and unions.
if !m.Type.IsInterface() {
- base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
+ base.ErrorfAt(m.Pos, "interface contains embedded non-interface, non-union %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
@@ -120,10 +146,15 @@ func expandiface(t *Type) {
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.AllMethods().Slice() {
- // Use m.Pos rather than t1.Pos to preserve embedding position.
f := NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
+
+ // Clear position after typechecking, for consistency with types2.
+ f.Pos = src.NoXPos
}
+
+ // Clear position after typechecking, for consistency with types2.
+ m.Pos = src.NoXPos
}
sort.Sort(MethodsByName(methods))
@@ -405,6 +436,12 @@ func CalcSize(t *Type) {
t.Align = uint8(PtrSize)
expandiface(t)
+ case TUNION:
+ // Always part of an interface for now, so size/align don't matter.
+ // Pretend a union is represented like an interface.
+ w = 2 * int64(PtrSize)
+ t.Align = uint8(PtrSize)
+
case TCHAN: // implemented as pointer
w = int64(PtrSize)
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
index 7028938742..7349e52a73 100644
--- a/src/cmd/compile/internal/types/sizeof_test.go
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
_64bit uintptr // size on 64bit platforms
}{
{Sym{}, 44, 72},
- {Type{}, 60, 104},
+ {Type{}, 64, 112},
{Map{}, 20, 40},
{Forward{}, 20, 32},
{Func{}, 28, 48},
diff --git a/src/cmd/compile/internal/types/sort.go b/src/cmd/compile/internal/types/sort.go
index dc59b06415..765c070cd9 100644
--- a/src/cmd/compile/internal/types/sort.go
+++ b/src/cmd/compile/internal/types/sort.go
@@ -4,11 +4,16 @@
package types
-// MethodsByName sorts methods by symbol.
+// MethodsByName sorts methods by name.
type MethodsByName []*Field
-func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+// EmbeddedsByName sorts embedded types by name.
+type EmbeddedsByName []*Field
-func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
+func (x EmbeddedsByName) Len() int { return len(x) }
+func (x EmbeddedsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x EmbeddedsByName) Less(i, j int) bool { return x[i].Type.Sym().Less(x[j].Type.Sym()) }
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
index 534cf7e237..fb642f52f8 100644
--- a/src/cmd/compile/internal/types/sym.go
+++ b/src/cmd/compile/internal/types/sym.go
@@ -110,6 +110,14 @@ func (a *Sym) Less(b *Sym) bool {
return false
}
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
// Exported symbols before non-exported.
ea := IsExported(a.Name)
eb := IsExported(b.Name)
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 1a9aa6916a..075009d6a3 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base"
"cmd/internal/src"
"fmt"
+ "strings"
"sync"
)
@@ -73,6 +74,7 @@ const (
TSTRING
TUNSAFEPTR
TTYPEPARAM
+ TUNION
// pseudo-types for literals
TIDEAL // untyped numeric constants
@@ -151,7 +153,7 @@ type Type struct {
// TARRAY: *Array
// TSLICE: Slice
// TSSA: string
- // TTYPEPARAM: *Interface (though we may not need to store/use the Interface info)
+ // TTYPEPARAM: *Typeparam
Extra interface{}
// Width is the width of this Type in bytes.
@@ -182,12 +184,19 @@ type Type struct {
flags bitset8
// For defined (named) generic types, a pointer to the list of type params
- // (in order) of this type that need to be instantiated. For
- // fully-instantiated generic types, this is the targs used to instantiate
- // them (which are used when generating the corresponding instantiated
- // methods). rparams is only set for named types that are generic or are
- // fully-instantiated from a generic type, and is otherwise set to nil.
+ // (in order) of this type that need to be instantiated. For instantiated
+ // generic types, this is the targs used to instantiate them. These targs
+ // may be typeparams (for re-instantiated types such as Value[T2]) or
+ // concrete types (for fully instantiated types such as Value[int]).
+ // rparams is only set for named types that are generic or are fully
+ // instantiated from a generic type, and is otherwise set to nil.
+ // TODO(danscales): choose a better name.
rparams *[]*Type
+
+ // For an instantiated generic type, the symbol for the base generic type.
+ // This backpointer is useful, because the base type is the type that has
+ // the method bodies.
+ OrigSym *Sym
}
func (*Type) CanBeAnSSAAux() {}
@@ -213,7 +222,9 @@ func (t *Type) SetBroke(b bool) { t.flags.set(typeBroke, b) }
func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) }
func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
-func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b) }
+
+// Generic types should never have alg functions.
+func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b); t.flags.set(typeNoalg, b) }
// Kind returns the kind of type t.
func (t *Type) Kind() Kind { return t.kind }
@@ -269,6 +280,26 @@ func (t *Type) SetRParams(rparams []*Type) {
}
}
+// IsBaseGeneric returns true if t is a generic type (not reinstantiated with
+// another type params or fully instantiated.
+func (t *Type) IsBaseGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") < 0
+}
+
+// IsInstantiatedGeneric returns t if t ia generic type that has been
+// reinstantiated with new typeparams (i.e. is not fully instantiated).
+func (t *Type) IsInstantiatedGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") >= 0 &&
+ t.HasTParam()
+}
+
+// IsFullyInstantiated reports whether t is a fully instantiated generic type; i.e. an
+// instantiated generic type where all type arguments are non-generic or fully
+// instantiated generic types.
+func (t *Type) IsFullyInstantiated() bool {
+ return len(t.RParams()) > 0 && !t.HasTParam()
+}
+
// NoPkg is a nil *Pkg value for clarity.
// It's intended for use when constructing types that aren't exported
// and thus don't need to be associated with any package.
@@ -377,6 +408,18 @@ type Interface struct {
pkg *Pkg
}
+// Typeparam contains Type fields specific to typeparam types.
+type Typeparam struct {
+ index int // type parameter index in source order, starting at 0
+ bound *Type
+}
+
+// Union contains Type fields specific to union types.
+type Union struct {
+ terms []*Type
+ tildes []bool // whether terms[i] is of form ~T
+}
+
// Ptr contains Type fields specific to pointer types.
type Ptr struct {
Elem *Type // element type
@@ -558,7 +601,9 @@ func New(et Kind) *Type {
case TRESULTS:
t.Extra = new(Results)
case TTYPEPARAM:
- t.Extra = new(Interface)
+ t.Extra = new(Typeparam)
+ case TUNION:
+ t.Extra = new(Union)
}
return t
}
@@ -825,6 +870,8 @@ func (t *Type) copy() *Type {
case TARRAY:
x := *t.Extra.(*Array)
nt.Extra = &x
+ case TTYPEPARAM:
+ base.Fatalf("typeparam types cannot be copied")
case TTUPLE, TSSA, TRESULTS:
base.Fatalf("ssa types cannot be copied")
}
@@ -925,7 +972,7 @@ func (t *Type) FuncArgs() *Type {
return t.Extra.(FuncArgs).T
}
-// IsFuncArgStruct reports whether t is a struct representing function parameters.
+// IsFuncArgStruct reports whether t is a struct representing function parameters or results.
func (t *Type) IsFuncArgStruct() bool {
return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
}
@@ -1436,6 +1483,14 @@ func (t *Type) IsInterface() bool {
return t.kind == TINTER
}
+func (t *Type) IsUnion() bool {
+ return t.kind == TUNION
+}
+
+func (t *Type) IsTypeParam() bool {
+ return t.kind == TTYPEPARAM
+}
+
// IsEmptyInterface reports whether t is an empty interface type.
func (t *Type) IsEmptyInterface() bool {
return t.IsInterface() && t.AllMethods().Len() == 0
@@ -1766,14 +1821,60 @@ func NewInterface(pkg *Pkg, methods []*Field) *Type {
return t
}
-// NewTypeParam returns a new type param.
-func NewTypeParam(pkg *Pkg) *Type {
+// NewTypeParam returns a new type param with the specified sym (package and name)
+// and specified index within the typeparam list.
+func NewTypeParam(sym *Sym, index int) *Type {
t := New(TTYPEPARAM)
- t.Extra.(*Interface).pkg = pkg
+ t.sym = sym
+ t.Extra.(*Typeparam).index = index
t.SetHasTParam(true)
return t
}
+// Index returns the index of the type param within its param list.
+func (t *Type) Index() int {
+ t.wantEtype(TTYPEPARAM)
+ return t.Extra.(*Typeparam).index
+}
+
+// SetBound sets the bound of a typeparam.
+func (t *Type) SetBound(bound *Type) {
+ t.wantEtype(TTYPEPARAM)
+ t.Extra.(*Typeparam).bound = bound
+}
+
+// Bound returns the bound of a typeparam.
+func (t *Type) Bound() *Type {
+ t.wantEtype(TTYPEPARAM)
+ return t.Extra.(*Typeparam).bound
+}
+
+// NewUnion returns a new union with the specified set of terms (types). If
+// tildes[i] is true, then terms[i] represents ~T, rather than just T.
+func NewUnion(terms []*Type, tildes []bool) *Type {
+ t := New(TUNION)
+ if len(terms) != len(tildes) {
+ base.Fatalf("Mismatched terms and tildes for NewUnion")
+ }
+ t.Extra.(*Union).terms = terms
+ t.Extra.(*Union).tildes = tildes
+ return t
+}
+
+// NumTerms returns the number of terms in a union type.
+func (t *Type) NumTerms() int {
+ t.wantEtype(TUNION)
+ return len(t.Extra.(*Union).terms)
+}
+
+// Term returns ith term of a union type as (term, tilde). If tilde is true, term
+// represents ~T, rather than just T.
+func (t *Type) Term(i int) (*Type, bool) {
+ t.wantEtype(TUNION)
+ u := t.Extra.(*Union)
+ return u.terms[i], u.tildes[i]
+}
+
const BOGUS_FUNARG_OFFSET = -1000000000
func unzeroFieldOffsets(f []*Field) {
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index 2939dcc0bd..4f7f35e61b 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -125,6 +125,12 @@ type Config struct {
// TODO(gri) Consolidate error messages and remove this flag.
CompilerErrorMessages bool
+ // If AllowTypeLists is set, the type list syntax is permitted
+ // in an interface in addition to the type set syntax.
+ // TODO(gri) Remove once type lists are no longer supported by
+ // the parser.
+ AllowTypeLists bool
+
// If go115UsesCgo is set, the type checker expects the
// _cgo_gotypes.go file generated by running cmd/cgo to be
// provided as a package source file. Qualified identifiers
@@ -355,7 +361,7 @@ func (tv TypeAndValue) HasOk() bool {
// Inferred reports the inferred type arguments and signature
// for a parameterized function call that uses type inference.
type Inferred struct {
- Targs []Type
+ TArgs []Type
Sig *Signature
}
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 873390c1e9..c7f3e490aa 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -329,27 +329,26 @@ func TestTypesInfo(t *testing.T) {
{brokenPkg + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string]invalid type`},
// parameterized functions
- {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[T₁ interface{}](T₁)`},
+ {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[generic_p0.T₁ interface{}](generic_p0.T₁)`},
{genericPkg + `p1; func f[T any](T); var _ = f[int]`, `f[int]`, `func(int)`},
- {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[T₁ interface{}](T₁)`},
+ {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[generic_p2.T₁ interface{}](generic_p2.T₁)`},
{genericPkg + `p3; func f[T any](T); func _() { f(42) }`, `f(42)`, `()`},
// type parameters
{genericPkg + `t0; type t[] int; var _ t`, `t`, `generic_t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
- {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[P₁ interface{}]`},
- {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[P₁ interface{}]`},
- {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[P₁, Q₂ interface{}]`},
- {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P₁, Q₂ interface{m()}]`},
+ {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[generic_t1.P₁ interface{}]`},
+ {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[generic_t2.P₁ interface{}]`},
+ {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[generic_t3.P₁, generic_t3.Q₂ interface{}]`},
+ {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[broken_t4.P₁, broken_t4.Q₂ interface{m()}]`},
// instantiated types must be sanitized
{genericPkg + `g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `generic_g0.t[int]`},
// issue 45096
- {genericPkg + `issue45096; func _[T interface{ type int8, int16, int32 }](x T) { _ = x < 0 }`, `0`, `T₁`},
+ {genericPkg + `issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `generic_issue45096.T₁`},
}
for _, test := range tests {
- ResetId() // avoid renumbering of type parameter ids when adding tests
info := Info{Types: make(map[syntax.Expr]TypeAndValue)}
var name string
if strings.HasPrefix(test.src, brokenPkg) {
@@ -455,38 +454,38 @@ func TestInferredInfo(t *testing.T) {
// `func(float64)`,
// },
- {genericPkg + `s1; func f[T any, P interface{type *T}](x T); func _(x string) { f(x) }`,
+ {genericPkg + `s1; func f[T any, P interface{~*T}](x T); func _(x string) { f(x) }`,
`f`,
[]string{`string`, `*string`},
`func(x string)`,
},
- {genericPkg + `s2; func f[T any, P interface{type *T}](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s2; func f[T any, P interface{~*T}](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `*int`},
`func(x []int)`,
},
- {genericPkg + `s3; type C[T any] interface{type chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`},
`func(x []int)`,
},
- {genericPkg + `s4; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func(x []int)`,
},
- {genericPkg + `t1; func f[T any, P interface{type *T}]() T; func _() { _ = f[string] }`,
+ {genericPkg + `t1; func f[T any, P interface{~*T}]() T; func _() { _ = f[string] }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {genericPkg + `t2; type C[T any] interface{type chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t2; type C[T any] interface{~chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`},
`func() []int`,
},
- {genericPkg + `t3; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
@@ -515,7 +514,7 @@ func TestInferredInfo(t *testing.T) {
panic(fmt.Sprintf("unexpected call expression type %T", call))
}
if syntax.String(fun) == test.fun {
- targs = inf.Targs
+ targs = inf.TArgs
sig = inf.Sig
break
}
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index f90e06f226..8f2d849ef5 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -178,9 +178,9 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
mode = value
}
- case *Sum:
- if t.is(func(t Type) bool {
- switch t := under(t).(type) {
+ case *Union:
+ if t.underIs(func(t Type) bool {
+ switch t := t.(type) {
case *Basic:
if isString(t) && id == _Len {
return true
@@ -460,8 +460,8 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
m = 2
case *Map, *Chan:
m = 1
- case *Sum:
- return t.is(valid)
+ case *Union:
+ return t.underIs(valid)
default:
return false
}
@@ -760,9 +760,11 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
// Test if t satisfies the requirements for the argument
// type and collect possible result types at the same time.
var rtypes []Type
- if !tp.Bound().is(func(x Type) bool {
- if r := f(x); r != nil {
+ var tildes []bool
+ if !tp.Bound().is(func(typ Type, tilde bool) bool {
+ if r := f(typ); r != nil {
rtypes = append(rtypes, r)
+ tildes = append(tildes, tilde)
return true
}
return false
@@ -775,11 +777,13 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
// uses of real() where the result is used to
// define type and initialize a variable?
- // construct a suitable new type parameter
- tpar := NewTypeName(nopos, nil /* = Universe pkg */, "<type parameter>", nil)
+ // Construct a suitable new type parameter for the sum type. The
+ // type param is placed in the current package so export/import
+ // works as expected.
+ tpar := NewTypeName(nopos, check.pkg, "<type parameter>", nil)
ptyp := check.NewTypeParam(tpar, 0, &emptyInterface) // assigns type to tpar as a side-effect
- tsum := NewSum(rtypes)
- ptyp.bound = &Interface{types: tsum, allMethods: markComplete, allTypes: tsum}
+ tsum := newUnion(rtypes, tildes)
+ ptyp.bound = &Interface{allMethods: markComplete, allTypes: tsum}
return ptyp
}
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index 6d149340b2..8c717cd1e5 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -576,17 +576,37 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
- // the receiver type becomes the type of the first function
- // argument of the method expression's function type
- var params []*Var
sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, "illegal cycle in method declaration")
+ goto Error
+ }
+
+ // The receiver type becomes the type of the first function
+ // argument of the method expression's function type.
+ var params []*Var
if sig.params != nil {
params = sig.params.vars
}
+ // Be consistent about named/unnamed parameters. This is not needed
+ // for type-checking, but the newly constructed signature may appear
+ // in an error message and then have mixed named/unnamed parameters.
+ // (An alternative would be to not print parameter names in errors,
+ // but it's useful to see them; this is cheap and method expressions
+ // are rare.)
+ name := ""
+ if len(params) > 0 && params[0].name != "" {
+ // name needed
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
x.mode = value
x.typ = &Signature{
tparams: sig.tparams,
- params: NewTuple(append([]*Var{NewVar(nopos, check.pkg, "_", x.typ)}, params...)...),
+ params: NewTuple(params...),
results: sig.results,
variadic: sig.variadic,
}
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index 8d6cd1edab..5d3c2c8ad2 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -71,7 +71,7 @@ type importKey struct {
// A dotImportKey describes a dot-imported object in the given scope.
type dotImportKey struct {
scope *Scope
- obj Object
+ name string
}
// A Checker maintains the state of the type checker.
@@ -83,6 +83,7 @@ type Checker struct {
pkg *Package
*Info
version version // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
posMap map[*Interface][]syntax.Pos // maps interface types to lists of embedded interface positions
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 1333e4c0ec..00b4ef7010 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -522,28 +522,59 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) {
// is detected, the result is Typ[Invalid]. If a cycle is detected and
// n0.check != nil, the cycle is reported.
func (n0 *Named) under() Type {
- u := n0.underlying
- if u == nil {
- return Typ[Invalid]
+ u := n0.Underlying()
+
+ if u == Typ[Invalid] {
+ return u
}
// If the underlying type of a defined type is not a defined
- // type, then that is the desired underlying type.
+ // (incl. instance) type, then that is the desired underlying
+ // type.
+ switch u.(type) {
+ case nil:
+ return Typ[Invalid]
+ default:
+ // common case
+ return u
+ case *Named, *instance:
+ // handled below
+ }
+
+ if n0.check == nil {
+ panic("internal error: Named.check == nil but type is incomplete")
+ }
+
+ // Invariant: after this point n0 as well as any named types in its
+ // underlying chain should be set up when this function exits.
+ check := n0.check
+
+ // If we can't expand u at this point, it is invalid.
n := asNamed(u)
if n == nil {
- return u // common case
+ n0.underlying = Typ[Invalid]
+ return n0.underlying
}
// Otherwise, follow the forward chain.
seen := map[*Named]int{n0: 0}
path := []Object{n0.obj}
for {
- u = n.underlying
+ u = n.Underlying()
if u == nil {
u = Typ[Invalid]
break
}
- n1 := asNamed(u)
+ var n1 *Named
+ switch u1 := u.(type) {
+ case *Named:
+ n1 = u1
+ case *instance:
+ n1, _ = u1.expand().(*Named)
+ if n1 == nil {
+ u = Typ[Invalid]
+ }
+ }
if n1 == nil {
break // end of chain
}
@@ -554,11 +585,7 @@ func (n0 *Named) under() Type {
if i, ok := seen[n]; ok {
// cycle
- // TODO(gri) revert this to a method on Checker. Having a possibly
- // nil Checker on Named and TypeParam is too subtle.
- if n0.check != nil {
- n0.check.cycleError(path[i:])
- }
+ check.cycleError(path[i:])
u = Typ[Invalid]
break
}
@@ -568,8 +595,8 @@ func (n0 *Named) under() Type {
// We should never have to update the underlying type of an imported type;
// those underlying types should have been resolved during the import.
// Also, doing so would lead to a race condition (was issue #31749).
- // Do this check always, not just in debug more (it's cheap).
- if n0.check != nil && n.obj.pkg != n0.check.pkg {
+ // Do this check always, not just in debug mode (it's cheap).
+ if n.obj.pkg != check.pkg {
panic("internal error: imported type with unresolved underlying type")
}
n.underlying = u
@@ -737,7 +764,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
// and field names must be distinct."
base := asNamed(obj.typ) // shouldn't fail but be conservative
if base != nil {
- if t, _ := base.underlying.(*Struct); t != nil {
+ if t, _ := base.Underlying().(*Struct); t != nil {
for _, fld := range t.fields {
if fld.name != "_" {
assert(mset.insert(fld) == nil)
@@ -779,6 +806,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
}
if base != nil {
+ base.expand() // TODO(mdempsky): Probably unnecessary.
base.methods = append(base.methods, m)
}
}
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index 23b79656bb..b223387f18 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -723,8 +723,8 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
default:
return nil, nil, _InvalidUntypedConversion
}
- case *Sum:
- ok := t.is(func(t Type) bool {
+ case *Union:
+ ok := t.underIs(func(t Type) bool {
target, _, _ := check.implicitTypeAndValue(x, t)
return target != nil
})
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index c94017a8fb..47e0853a3b 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -91,15 +91,15 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
x.expr = e
return
- case *Sum:
- // A sum type can be indexed if all of the sum's types
+ case *Union:
+ // A union type can be indexed if all of the union's terms
// support indexing and have the same index and element
- // type. Special rules apply for maps in the sum type.
+ // type. Special rules apply for maps in the union type.
var tkey, telem Type // key is for map types only
- nmaps := 0 // number of map types in sum type
- if typ.is(func(t Type) bool {
+ nmaps := 0 // number of map types in union type
+ if typ.underIs(func(t Type) bool {
var e Type
- switch t := under(t).(type) {
+ switch t := t.(type) {
case *Basic:
if isString(t) {
e = universeByte
@@ -113,7 +113,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
case *Slice:
e = t.elem
case *Map:
- // If there are multiple maps in the sum type,
+ // If there are multiple maps in the union type,
// they must have identical key types.
// TODO(gri) We may be able to relax this rule
// but it becomes complicated very quickly.
@@ -126,7 +126,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
case *TypeParam:
check.errorf(x, "type of %s contains a type parameter - cannot index (implementation restriction)", x)
case *instance:
- panic("unimplemented")
+ unimplemented()
}
if e == nil || telem != nil && !Identical(e, telem) {
return false
@@ -148,7 +148,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
// ok to continue even if indexing failed - map element type is known
// If there are only maps, we are done.
- if nmaps == len(typ.types) {
+ if nmaps == typ.NumTerms() {
x.mode = mapindex
x.typ = telem
x.expr = e
@@ -246,7 +246,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
valid = true
// x.typ doesn't change
- case *Sum, *TypeParam:
+ case *Union, *TypeParam:
check.error(x, "generic slice expressions not yet implemented")
x.mode = invalid
return
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index f37d7f6477..63cd63aacc 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -307,7 +307,7 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
}
}
- case *Sum:
+ case *Union:
return w.isParameterizedList(t.types)
case *Signature:
@@ -328,7 +328,7 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return true
}
}
- return w.isParameterizedList(unpack(t.allTypes))
+ return w.isParameterized(t.allTypes)
}
return t.iterate(func(t *Interface) bool {
@@ -337,7 +337,7 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return true
}
}
- return w.isParameterizedList(unpack(t.types))
+ return w.isParameterizedList(t.embeddeds)
}, nil)
case *Map:
@@ -477,11 +477,14 @@ func (check *Checker) inferB(tparams []*TypeName, targs []Type, report bool) (ty
func (check *Checker) structuralType(constraint Type) Type {
if iface, _ := under(constraint).(*Interface); iface != nil {
check.completeInterface(nopos, iface)
- types := unpack(iface.allTypes)
- if len(types) == 1 {
- return types[0]
+ if u, _ := iface.allTypes.(*Union); u != nil {
+ if u.NumTerms() == 1 {
+ // TODO(gri) do we need to respect tilde?
+ return u.types[0]
+ }
+ return nil
}
- return nil
+ return iface.allTypes
}
- return constraint
+ return nil
}
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index 0df52e851c..85c897a909 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -23,7 +23,7 @@ func Instantiate(pos syntax.Pos, typ Type, targs []Type) (res Type) {
var tparams []*TypeName
switch t := typ.(type) {
case *Named:
- tparams = t.tparams
+ tparams = t.TParams()
case *Signature:
tparams = t.tparams
defer func() {
@@ -61,3 +61,19 @@ func Instantiate(pos syntax.Pos, typ Type, targs []Type) (res Type) {
smap := makeSubstMap(tparams, targs)
return (*Checker)(nil).subst(pos, typ, smap)
}
+
+// InstantiateLazy is like Instantiate, but avoids actually
+// instantiating the type until needed.
+func (check *Checker) InstantiateLazy(pos syntax.Pos, typ Type, targs []Type) (res Type) {
+ base := asNamed(typ)
+ if base == nil {
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ }
+
+ return &instance{
+ check: check,
+ pos: pos,
+ base: base,
+ targs: targs,
+ }
+}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
new file mode 100644
index 0000000000..c79026f00d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -0,0 +1,318 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "sort"
+)
+
+func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
+ var tlist []syntax.Expr // types collected from all type lists
+ var tname *syntax.Name // most recent "type" name
+
+ for _, f := range iface.MethodList {
+ if f.Name == nil {
+ // We have an embedded type; possibly a union of types.
+ ityp.embeddeds = append(ityp.embeddeds, parseUnion(check, flattenUnion(nil, f.Type)))
+ check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
+ continue
+ }
+ // f.Name != nil
+
+ // We have a method with name f.Name, or a type of a type list (f.Name.Value == "type").
+ name := f.Name.Value
+ if name == "_" {
+ if check.conf.CompilerErrorMessages {
+ check.error(f.Name, "methods must have a unique non-blank name")
+ } else {
+ check.error(f.Name, "invalid method name _")
+ }
+ continue // ignore
+ }
+
+ // TODO(gri) Remove type list handling once the parser doesn't accept type lists anymore.
+ if name == "type" {
+ // Report an error for the first type list per interface
+ // if we don't allow type lists, but continue.
+ if !check.conf.AllowTypeLists && tlist == nil {
+ check.softErrorf(f.Name, "use generalized embedding syntax instead of a type list")
+ }
+ // For now, collect all type list entries as if it
+ // were a single union, where each union element is
+ // of the form ~T.
+ op := new(syntax.Operation)
+ // We should also set the position (but there is no setter);
+ // we don't care because this code will eventually go away.
+ op.Op = syntax.Tilde
+ op.X = f.Type
+ tlist = append(tlist, op)
+ // Report an error if we have multiple type lists in an
+ // interface, but only if they are permitted in the first place.
+ if check.conf.AllowTypeLists && tname != nil && tname != f.Name {
+ check.error(f.Name, "cannot have multiple type lists in an interface")
+ }
+ tname = f.Name
+ continue
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.errorf(f.Type, invalidAST+"%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (This extra check is needed here because interface method signatures don't have
+ // a receiver specification.)
+ if sig.tparams != nil && !acceptMethodTypeParams {
+ check.error(f.Type, "methods cannot have type parameters")
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ recvTyp = def
+ }
+ sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
+ check.recordDef(f.Name, m)
+ ityp.methods = append(ityp.methods, m)
+ }
+
+ // If we saw a type list, add it like an embedded union.
+ if tlist != nil {
+ ityp.embeddeds = append(ityp.embeddeds, parseUnion(check, tlist))
+ // Types T in a type list are added as ~T expressions but we don't
+ // have the position of the '~'. Use the first type position instead.
+ check.posMap[ityp] = append(check.posMap[ityp], tlist[0].(*syntax.Operation).X.Pos())
+ }
+
+ if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.allMethods = markComplete
+ return
+ }
+
+ // sort for API stability
+ sortMethods(ityp.methods)
+ sortTypes(ityp.embeddeds)
+
+ check.later(func() { check.completeInterface(iface.Pos(), ityp) })
+}
+
+func flattenUnion(list []syntax.Expr, x syntax.Expr) []syntax.Expr {
+ if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or {
+ list = flattenUnion(list, o.X)
+ x = o.Y
+ }
+ return append(list, x)
+}
+
+func (check *Checker) completeInterface(pos syntax.Pos, ityp *Interface) {
+ if ityp.allMethods != nil {
+ return
+ }
+
+ // completeInterface may be called via the LookupFieldOrMethod,
+ // MissingMethod, Identical, or IdenticalIgnoreTags external API
+ // in which case check will be nil. In this case, type-checking
+ // must be finished and all interfaces should have been completed.
+ if check == nil {
+ panic("internal error: incomplete interface")
+ }
+
+ completeInterface(check, pos, ityp)
+}
+
+// completeInterface may be called with check == nil.
+func completeInterface(check *Checker, pos syntax.Pos, ityp *Interface) {
+ assert(ityp.allMethods == nil)
+
+ if check != nil && check.conf.Trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsKnown() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "complete %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.allMethods = markComplete
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also issue #34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var todo []*Func
+ var seen objset
+ var methods []*Func
+ mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ methods = append(methods, m)
+ mpos[m] = pos
+ case explicit:
+ if check == nil {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ // check != nil
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (issue #33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check == nil {
+ // check method signatures after all locally embedded interfaces are computed
+ todo = append(todo, m, other.(*Func))
+ break
+ }
+ // check != nil
+ check.later(func() {
+ if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ }
+ })
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect embedded elements
+ var allTypes Type
+ var posList []syntax.Pos
+ if check != nil {
+ posList = check.posMap[ityp]
+ }
+ for i, typ := range ityp.embeddeds {
+ var pos syntax.Pos // embedding position
+ if posList != nil {
+ pos = posList[i]
+ }
+ var types Type
+ switch t := under(typ).(type) {
+ case *Interface:
+ if t.allMethods == nil {
+ completeInterface(check, pos, t)
+ }
+ for _, m := range t.allMethods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+ }
+ types = t.allTypes
+ case *Union:
+ // TODO(gri) combine with default case once we have
+ // converted all tests to new notation and we
+ // can report an error when we don't have an
+ // interface before go1.18.
+ types = typ
+ case *TypeParam:
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(pos, "%s is a type parameter, not an interface", typ)
+ continue
+ }
+ types = typ
+ default:
+ if typ == Typ[Invalid] {
+ continue
+ }
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(pos, "%s is not an interface", typ)
+ continue
+ }
+ types = typ
+ }
+ allTypes = intersect(allTypes, types)
+ }
+
+ // process todo's (this only happens if check == nil)
+ for i := 0; i < len(todo); i += 2 {
+ m := todo[i]
+ other := todo[i+1]
+ if !Identical(m.typ, other.typ) {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ }
+
+ if methods != nil {
+ sortMethods(methods)
+ ityp.allMethods = methods
+ }
+ ityp.allTypes = allTypes
+}
+
+func sortTypes(list []Type) {
+ sort.Stable(byUniqueTypeName(list))
+}
+
+// byUniqueTypeName named type lists can be sorted by their unique type names.
+type byUniqueTypeName []Type
+
+func (a byUniqueTypeName) Len() int { return len(a) }
+func (a byUniqueTypeName) Less(i, j int) bool { return sortObj(a[i]).less(sortObj(a[j])) }
+func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+func sortObj(t Type) *object {
+ if named := asNamed(t); named != nil {
+ return &named.obj.object
+ }
+ return nil
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("internal error: assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("internal error: methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(&a[j].object) }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/src/cmd/compile/internal/types2/labels.go b/src/cmd/compile/internal/types2/labels.go
index d3206988b5..6f02e2fc96 100644
--- a/src/cmd/compile/internal/types2/labels.go
+++ b/src/cmd/compile/internal/types2/labels.go
@@ -32,7 +32,8 @@ func (check *Checker) labels(body *syntax.BlockStmt) {
}
// spec: "It is illegal to define a label that is never used."
- for _, obj := range all.elems {
+ for name, obj := range all.elems {
+ obj = resolve(name, obj)
if lbl := obj.(*Label); !lbl.used {
check.softErrorf(lbl.pos, "label %s declared but not used", lbl.name)
}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index 78299502e9..93ed620449 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -54,7 +54,7 @@ func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package
// pointer type but discard the result if it is a method since we would
// not have found it for T (see also issue 8590).
if t := asNamed(T); t != nil {
- if p, _ := t.underlying.(*Pointer); p != nil {
+ if p, _ := t.Underlying().(*Pointer); p != nil {
obj, index, indirect = check.rawLookupFieldOrMethod(p, false, pkg, name)
if _, ok := obj.(*Func); ok {
return nil, nil, false
@@ -126,6 +126,7 @@ func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Pack
seen[named] = true
// look for a matching attached method
+ named.expand()
if i, m := lookupMethod(named.methods, pkg, name); m != nil {
// potential match
// caution: method may not have a proper signature yet
@@ -333,6 +334,9 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if !acceptMethodTypeParams && len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If the methods have type parameters we don't care whether they
// are the same or not, as long as they match up. Use unification
@@ -386,6 +390,9 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if !acceptMethodTypeParams && len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If V is a (instantiated) generic type, its methods are still
// parameterized using the original (declaration) receiver type
@@ -394,7 +401,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// In order to compare the signatures, substitute the receiver
// type parameters of ftyp with V's instantiation type arguments.
// This lazily instantiates the signature of method f.
- if Vn != nil && len(Vn.tparams) > 0 {
+ if Vn != nil && len(Vn.TParams()) > 0 {
// Be careful: The number of type arguments may not match
// the number of receiver parameters. If so, an error was
// reported earlier but the length discrepancy is still
@@ -413,7 +420,20 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ if len(ftyp.tparams) > 0 {
+ // We reach here only if we accept method type parameters.
+ // In this case, unification must consider any receiver
+ // and method type parameters as "free" type parameters.
+ assert(acceptMethodTypeParams)
+ // We don't have a test case for this at the moment since
+ // we can't parse method type parameters. Keeping the
+ // unimplemented call so that we test this code if we
+ // enable method type parameters.
+ unimplemented()
+ u.x.init(append(ftyp.rparams, ftyp.tparams...))
+ } else {
+ u.x.init(ftyp.rparams)
+ }
if !u.unify(ftyp, mtyp) {
return m, f
}
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index 844bc34b6a..82297ff17f 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -186,6 +186,45 @@ func (obj *object) sameId(pkg *Package, name string) bool {
return pkg.path == obj.pkg.path
}
+// less reports whether object a is ordered before object b.
+//
+// Objects are ordered nil before non-nil, exported before
+// non-exported, then by name, and finally (for non-exported
+// functions) by package height and path.
+func (a *object) less(b *object) bool {
+ if a == b {
+ return false
+ }
+
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
+ // Exported functions before non-exported.
+ ea := isExported(a.name)
+ eb := isExported(b.name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package.
+ if a.name != b.name {
+ return a.name < b.name
+ }
+ if !ea {
+ if a.pkg.height != b.pkg.height {
+ return a.pkg.height < b.pkg.height
+ }
+ return a.pkg.path < b.pkg.path
+ }
+
+ return false
+}
+
// A PkgName represents an imported Go package.
// PkgNames don't have a type.
type PkgName struct {
@@ -237,6 +276,14 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName
return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}}
}
+// NewTypeNameLazy returns a new defined type like NewTypeName, but it
+// lazily calls resolve to finish constructing the Named object.
+func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, resolve func(named *Named) (tparams []*TypeName, underlying Type, methods []*Func)) *TypeName {
+ obj := NewTypeName(pos, pkg, name, nil)
+ NewNamed(obj, nil, nil).resolve = resolve
+ return obj
+}
+
// IsAlias reports whether obj is an alias name for a type.
func (obj *TypeName) IsAlias() bool {
switch t := obj.typ.(type) {
@@ -329,36 +376,6 @@ func (obj *Func) FullName() string {
// Scope returns the scope of the function's body block.
func (obj *Func) Scope() *Scope { return obj.typ.(*Signature).scope }
-// Less reports whether function a is ordered before function b.
-//
-// Functions are ordered exported before non-exported, then by name,
-// and finally (for non-exported functions) by package path.
-//
-// TODO(gri) The compiler also sorts by package height before package
-// path for non-exported names.
-func (a *Func) less(b *Func) bool {
- if a == b {
- return false
- }
-
- // Exported functions before non-exported.
- ea := isExported(a.name)
- eb := isExported(b.name)
- if ea != eb {
- return ea
- }
-
- // Order by name and then (for non-exported names) by package.
- if a.name != b.name {
- return a.name < b.name
- }
- if !ea {
- return a.pkg.path < b.pkg.path
- }
-
- return false
-}
-
func (*Func) isDependency() {} // a function may be a dependency of an initialization expression
// A Label represents a declared label.
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
index 455d8b5dd1..fdc6ec52aa 100644
--- a/src/cmd/compile/internal/types2/operand.go
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -248,6 +248,12 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
V := x.typ
+ const debugAssignableTo = false
+ if debugAssignableTo && check != nil {
+ check.dump("V = %s", V)
+ check.dump("T = %s", T)
+ }
+
// x's type is identical to T
if check.identical(V, T) {
return true, 0
@@ -256,11 +262,20 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
Vu := optype(V)
Tu := optype(T)
+ if debugAssignableTo && check != nil {
+ check.dump("Vu = %s", Vu)
+ check.dump("Tu = %s", Tu)
+ }
+
// x is an untyped value representable by a value of type T.
if isUntyped(Vu) {
- if t, ok := Tu.(*Sum); ok {
- return t.is(func(t Type) bool {
+ if t, ok := Tu.(*Union); ok {
+ return t.is(func(t Type, tilde bool) bool {
// TODO(gri) this could probably be more efficient
+ if tilde {
+ // TODO(gri) We need to check assignability
+ // for the underlying type of x.
+ }
ok, _ := x.assignableTo(check, t, reason)
return ok
}), _IncompatibleAssign
diff --git a/src/cmd/compile/internal/types2/package.go b/src/cmd/compile/internal/types2/package.go
index 31b1e71787..8044e7e6a7 100644
--- a/src/cmd/compile/internal/types2/package.go
+++ b/src/cmd/compile/internal/types2/package.go
@@ -13,8 +13,9 @@ type Package struct {
path string
name string
scope *Scope
- complete bool
imports []*Package
+ height int
+ complete bool
fake bool // scope lookup errors are silently dropped if package is fake (internal use only)
cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go
}
@@ -22,8 +23,14 @@ type Package struct {
// NewPackage returns a new Package for the given package path and name.
// The package is not complete and contains no explicit imports.
func NewPackage(path, name string) *Package {
+ return NewPackageHeight(path, name, 0)
+}
+
+// NewPackageHeight is like NewPackage, but allows specifying the
+// package's height.
+func NewPackageHeight(path, name string, height int) *Package {
scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path))
- return &Package{path: path, name: name, scope: scope}
+ return &Package{path: path, name: name, scope: scope, height: height}
}
// Path returns the package path.
@@ -32,13 +39,22 @@ func (pkg *Package) Path() string { return pkg.path }
// Name returns the package name.
func (pkg *Package) Name() string { return pkg.name }
+// Height returns the package height.
+func (pkg *Package) Height() int { return pkg.height }
+
// SetName sets the package name.
func (pkg *Package) SetName(name string) { pkg.name = name }
// Scope returns the (complete or incomplete) package scope
// holding the objects declared at package level (TypeNames,
// Consts, Vars, and Funcs).
-func (pkg *Package) Scope() *Scope { return pkg.scope }
+// For a nil pkg receiver, Scope returns the Universe scope.
+func (pkg *Package) Scope() *Scope {
+ if pkg != nil {
+ return pkg.scope
+ }
+ return Universe
+}
// A package is complete if its scope contains (at least) all
// exported objects; otherwise it is incomplete.
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index ae186a0b5d..66de249044 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -21,15 +21,15 @@ func isNamed(typ Type) bool {
func isGeneric(typ Type) bool {
// A parameterized type is only instantiated if it doesn't have an instantiation already.
named, _ := typ.(*Named)
- return named != nil && named.obj != nil && named.tparams != nil && named.targs == nil
+ return named != nil && named.obj != nil && named.TParams() != nil && named.targs == nil
}
func is(typ Type, what BasicInfo) bool {
switch t := optype(typ).(type) {
case *Basic:
return t.info&what != 0
- case *Sum:
- return t.is(func(typ Type) bool { return is(typ, what) })
+ case *Union:
+ return t.underIs(func(t Type) bool { return is(t, what) })
}
return false
}
@@ -97,9 +97,9 @@ func comparable(T Type, seen map[Type]bool) bool {
seen[T] = true
// If T is a type parameter not constrained by any type
- // list (i.e., it's underlying type is the top type),
+ // list (i.e., it's operational type is the top type),
// T is comparable if it has the == method. Otherwise,
- // the underlying type "wins". For instance
+ // the operational type "wins". For instance
//
// interface{ comparable; type []byte }
//
@@ -124,11 +124,10 @@ func comparable(T Type, seen map[Type]bool) bool {
return true
case *Array:
return comparable(t.elem, seen)
- case *Sum:
- pred := func(t Type) bool {
+ case *Union:
+ return t.underIs(func(t Type) bool {
return comparable(t, seen)
- }
- return t.is(pred)
+ })
case *TypeParam:
return t.Bound().IsComparable()
}
@@ -142,8 +141,8 @@ func hasNil(typ Type) bool {
return t.kind == UnsafePointer
case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
return true
- case *Sum:
- return t.is(hasNil)
+ case *Union:
+ return t.underIs(hasNil)
}
return false
}
@@ -261,21 +260,20 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
check.identical0(x.results, y.results, cmpTags, p)
}
- case *Sum:
- // Two sum types are identical if they contain the same types.
- // (Sum types always consist of at least two types. Also, the
- // the set (list) of types in a sum type consists of unique
- // types - each type appears exactly once. Thus, two sum types
+ case *Union:
+ // Two union types are identical if they contain the same terms.
+ // The set (list) of types in a union type consists of unique
+ // types - each type appears exactly once. Thus, two union types
// must contain the same number of types to have chance of
// being equal.
- if y, ok := y.(*Sum); ok && len(x.types) == len(y.types) {
+ if y, ok := y.(*Union); ok && x.NumTerms() == y.NumTerms() {
// Every type in x.types must be in y.types.
// Quadratic algorithm, but probably good enough for now.
// TODO(gri) we need a fast quick type ID/hash for all types.
L:
- for _, x := range x.types {
- for _, y := range y.types {
- if Identical(x, y) {
+ for i, xt := range x.types {
+ for j, yt := range y.types {
+ if Identical(xt, yt) && x.tilde[i] == y.tilde[j] {
continue L // x is in y.types
}
}
@@ -372,10 +370,9 @@ func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
// case *instance:
// unreachable since types are expanded
- case *bottom, *top:
- // Either both types are theBottom, or both are theTop in which
- // case the initial x == y check will have caught them. Otherwise
- // they are not identical.
+ case *top:
+ // Either both types are theTop in which case the initial x == y check
+ // will have caught them. Otherwise they are not identical.
case nil:
// avoid a crash in case of nil type
diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go
index fa30650bd4..018a20cfb2 100644
--- a/src/cmd/compile/internal/types2/resolver.go
+++ b/src/cmd/compile/internal/types2/resolver.go
@@ -196,6 +196,7 @@ func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package {
// methods with receiver base type names.
func (check *Checker) collectObjects() {
pkg := check.pkg
+ pkg.height = 0
// pkgImports is the set of packages already imported by any package file seen
// so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate
@@ -253,6 +254,15 @@ func (check *Checker) collectObjects() {
continue
}
+ if imp == Unsafe {
+ // typecheck ignores imports of package unsafe for
+ // calculating height.
+ // TODO(mdempsky): Revisit this. This seems fine, but I
+ // don't remember explicitly considering this case.
+ } else if h := imp.height + 1; h > pkg.height {
+ pkg.height = h
+ }
+
// local name overrides imported package name
name := imp.name
if s.LocalPkgName != nil {
@@ -265,7 +275,7 @@ func (check *Checker) collectObjects() {
}
if name == "init" {
- check.error(s.LocalPkgName, "cannot import package as init - init must be a func")
+ check.error(s, "cannot import package as init - init must be a func")
continue
}
@@ -298,22 +308,26 @@ func (check *Checker) collectObjects() {
check.dotImportMap = make(map[dotImportKey]*PkgName)
}
// merge imported scope with file scope
- for _, obj := range imp.scope.elems {
+ for name, obj := range imp.scope.elems {
+ // Note: Avoid eager resolve(name, obj) here, so we only
+ // resolve dot-imported objects as needed.
+
// A package scope may contain non-exported objects,
// do not import them!
- if obj.Exported() {
+ if isExported(name) {
// declare dot-imported object
// (Do not use check.declare because it modifies the object
// via Object.setScopePos, which leads to a race condition;
// the object may be imported into more than one file scope
// concurrently. See issue #32154.)
- if alt := fileScope.Insert(obj); alt != nil {
+ if alt := fileScope.Lookup(name); alt != nil {
var err error_
- err.errorf(s.LocalPkgName, "%s redeclared in this block", obj.Name())
+ err.errorf(s.LocalPkgName, "%s redeclared in this block", alt.Name())
err.recordAltDecl(alt)
check.report(&err)
} else {
- check.dotImportMap[dotImportKey{fileScope, obj}] = pkgName
+ fileScope.insert(name, obj)
+ check.dotImportMap[dotImportKey{fileScope, name}] = pkgName
}
}
}
@@ -459,8 +473,9 @@ func (check *Checker) collectObjects() {
// verify that objects in package and file scopes have different names
for _, scope := range fileScopes {
- for _, obj := range scope.elems {
- if alt := pkg.scope.Lookup(obj.Name()); alt != nil {
+ for name, obj := range scope.elems {
+ if alt := pkg.scope.Lookup(name); alt != nil {
+ obj = resolve(name, obj)
var err error_
if pkg, ok := obj.(*PkgName); ok {
err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported())
diff --git a/src/cmd/compile/internal/types2/sanitize.go b/src/cmd/compile/internal/types2/sanitize.go
index 64a2dedc7d..406b46e574 100644
--- a/src/cmd/compile/internal/types2/sanitize.go
+++ b/src/cmd/compile/internal/types2/sanitize.go
@@ -26,9 +26,9 @@ func sanitizeInfo(info *Info) {
for e, inf := range info.Inferred {
changed := false
- for i, targ := range inf.Targs {
+ for i, targ := range inf.TArgs {
if typ := s.typ(targ); typ != targ {
- inf.Targs[i] = typ
+ inf.TArgs[i] = typ
changed = true
}
}
@@ -77,7 +77,7 @@ func (s sanitizer) typ(typ Type) Type {
s[typ] = typ
switch t := typ.(type) {
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
@@ -106,14 +106,11 @@ func (s sanitizer) typ(typ Type) Type {
s.tuple(t.params)
s.tuple(t.results)
- case *Sum:
+ case *Union:
s.typeList(t.types)
case *Interface:
s.funcList(t.methods)
- if types := s.typ(t.types); types != t.types {
- t.types = types
- }
s.typeList(t.embeddeds)
s.funcList(t.allMethods)
if allTypes := s.typ(t.allTypes); allTypes != t.allTypes {
@@ -134,6 +131,10 @@ func (s sanitizer) typ(typ Type) Type {
}
case *Named:
+ if debug && t.check != nil {
+ panic("internal error: Named.check != nil")
+ }
+ t.expand()
if orig := s.typ(t.fromRHS); orig != t.fromRHS {
t.fromRHS = orig
}
@@ -153,7 +154,7 @@ func (s sanitizer) typ(typ Type) Type {
s[t] = typ
default:
- panic("unimplemented")
+ unimplemented()
}
return typ
diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go
index ade0a79b31..2f1814a631 100644
--- a/src/cmd/compile/internal/types2/scope.go
+++ b/src/cmd/compile/internal/types2/scope.go
@@ -13,6 +13,7 @@ import (
"io"
"sort"
"strings"
+ "sync"
)
// A Scope maintains a set of objects and links to its containing
@@ -66,7 +67,7 @@ func (s *Scope) Child(i int) *Scope { return s.children[i] }
// Lookup returns the object in scope s with the given name if such an
// object exists; otherwise the result is nil.
func (s *Scope) Lookup(name string) Object {
- return s.elems[name]
+ return resolve(name, s.elems[name])
}
// LookupParent follows the parent chain of scopes starting with s until
@@ -81,7 +82,7 @@ func (s *Scope) Lookup(name string) Object {
// whose scope is the scope of the package that exported them.
func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) {
for ; s != nil; s = s.parent {
- if obj := s.elems[name]; obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) {
+ if obj := s.Lookup(name); obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) {
return s, obj
}
}
@@ -95,19 +96,38 @@ func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) {
// if not already set, and returns nil.
func (s *Scope) Insert(obj Object) Object {
name := obj.Name()
- if alt := s.elems[name]; alt != nil {
+ if alt := s.Lookup(name); alt != nil {
return alt
}
- if s.elems == nil {
- s.elems = make(map[string]Object)
- }
- s.elems[name] = obj
+ s.insert(name, obj)
if obj.Parent() == nil {
obj.setParent(s)
}
return nil
}
+// InsertLazy is like Insert, but allows deferring construction of the
+// inserted object until it's accessed with Lookup. The Object
+// returned by resolve must have the same name as given to InsertLazy.
+// If s already contains an alternative object with the same name,
+// InsertLazy leaves s unchanged and returns false. Otherwise it
+// records the binding and returns true. The object's parent scope
+// will be set to s after resolve is called.
+func (s *Scope) InsertLazy(name string, resolve func() Object) bool {
+ if s.elems[name] != nil {
+ return false
+ }
+ s.insert(name, &lazyObject{parent: s, resolve: resolve})
+ return true
+}
+
+func (s *Scope) insert(name string, obj Object) {
+ if s.elems == nil {
+ s.elems = make(map[string]Object)
+ }
+ s.elems[name] = obj
+}
+
// Squash merges s with its parent scope p by adding all
// objects of s to p, adding all children of s to the
// children of p, and removing s from p's children.
@@ -117,7 +137,8 @@ func (s *Scope) Insert(obj Object) Object {
func (s *Scope) Squash(err func(obj, alt Object)) {
p := s.parent
assert(p != nil)
- for _, obj := range s.elems {
+ for name, obj := range s.elems {
+ obj = resolve(name, obj)
obj.setParent(nil)
if alt := p.Insert(obj); alt != nil {
err(obj, alt)
@@ -196,7 +217,7 @@ func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) {
indn1 := indn + ind
for _, name := range s.Names() {
- fmt.Fprintf(w, "%s%s\n", indn1, s.elems[name])
+ fmt.Fprintf(w, "%s%s\n", indn1, s.Lookup(name))
}
if recurse {
@@ -214,3 +235,57 @@ func (s *Scope) String() string {
s.WriteTo(&buf, 0, false)
return buf.String()
}
+
+// A lazyObject represents an imported Object that has not been fully
+// resolved yet by its importer.
+type lazyObject struct {
+ parent *Scope
+ resolve func() Object
+ obj Object
+ once sync.Once
+}
+
+// resolve returns the Object represented by obj, resolving lazy
+// objects as appropriate.
+func resolve(name string, obj Object) Object {
+ if lazy, ok := obj.(*lazyObject); ok {
+ lazy.once.Do(func() {
+ obj := lazy.resolve()
+
+ if _, ok := obj.(*lazyObject); ok {
+ panic("recursive lazy object")
+ }
+ if obj.Name() != name {
+ panic("lazy object has unexpected name")
+ }
+
+ if obj.Parent() == nil {
+ obj.setParent(lazy.parent)
+ }
+ lazy.obj = obj
+ })
+
+ obj = lazy.obj
+ }
+ return obj
+}
+
+// stub implementations so *lazyObject implements Object and we can
+// store them directly into Scope.elems.
+func (*lazyObject) Parent() *Scope { panic("unreachable") }
+func (*lazyObject) Pos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) Pkg() *Package { panic("unreachable") }
+func (*lazyObject) Name() string { panic("unreachable") }
+func (*lazyObject) Type() Type { panic("unreachable") }
+func (*lazyObject) Exported() bool { panic("unreachable") }
+func (*lazyObject) Id() string { panic("unreachable") }
+func (*lazyObject) String() string { panic("unreachable") }
+func (*lazyObject) order() uint32 { panic("unreachable") }
+func (*lazyObject) color() color { panic("unreachable") }
+func (*lazyObject) setType(Type) { panic("unreachable") }
+func (*lazyObject) setOrder(uint32) { panic("unreachable") }
+func (*lazyObject) setColor(color color) { panic("unreachable") }
+func (*lazyObject) setParent(*Scope) { panic("unreachable") }
+func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") }
+func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) setScopePos(pos syntax.Pos) { panic("unreachable") }
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
new file mode 100644
index 0000000000..a7edc5ac03
--- /dev/null
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -0,0 +1,314 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+)
+
+// Disabled by default, but enabled when running tests (via types_test.go).
+var acceptMethodTypeParams bool
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ var recvTyp syntax.Expr // rewritten receiver type; valid if != nil
+ if recvPar != nil {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.Type, true)
+ if len(rparams) > 0 {
+ // Blank identifiers don't get declared and regular type-checking of the instantiated
+ // parameterized receiver type expression fails in Checker.collectParams of receiver.
+ // Identify blank type parameters and substitute each with a unique new identifier named
+ // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
+ // name.
+ var smap map[*syntax.Name]*syntax.Name // substitution map from "_" to "!n" identifiers
+ for i, p := range rparams {
+ if p.Value == "_" {
+ new := *p
+ new.Value = fmt.Sprintf("%d_", i)
+ rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
+ if smap == nil {
+ smap = make(map[*syntax.Name]*syntax.Name)
+ }
+ smap[p] = &new
+ }
+ }
+ if smap != nil {
+ // blank identifiers were found => use rewritten receiver type
+ recvTyp = isubst(recvPar.Type, smap)
+ }
+ // TODO(gri) rework declareTypeParams
+ sig.rparams = nil
+ for _, rparam := range rparams {
+ sig.rparams = check.declareTypeParam(sig.rparams, rparam)
+ }
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeName
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv := asNamed(check.genericType(rname, false)); recv != nil {
+ recvTParams = recv.TParams()
+ }
+ }
+ // provide type parameter bounds
+ // - only do this if we have the right number (otherwise an error is reported elsewhere)
+ if len(sig.rparams) == len(recvTParams) {
+ // We have a list of *TypeNames but we need a list of Types.
+ list := make([]Type, len(sig.rparams))
+ for i, t := range sig.rparams {
+ list[i] = t.typ
+ }
+ smap := makeSubstMap(recvTParams, list)
+ for i, tname := range sig.rparams {
+ bound := recvTParams[i].typ.(*TypeParam).bound
+ // bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the
+ // current context.
+ // TODO(gri) should we assume now that bounds always exist?
+ // (no bound == empty interface)
+ if bound != nil {
+ bound = check.subst(tname.pos, bound, smap)
+ tname.typ.(*TypeParam).bound = bound
+ }
+ }
+ }
+ }
+ }
+
+ if tparams != nil {
+ sig.tparams = check.collectTypeParams(tparams)
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (A separate check is needed when type-checking interface method signatures because
+ // they don't have a receiver specification.)
+ if recvPar != nil && !acceptMethodTypeParams {
+ check.error(ftyp, "methods cannot have type parameters")
+ }
+ }
+
+ // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
+ // declarations and then squash that scope into the parent scope (and report any redeclarations at
+ // that time).
+ scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
+ var recvList []*Var // TODO(gri) remove the need for making a list here
+ if recvPar != nil {
+ recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, recvTyp, false) // use rewritten receiver type, if any
+ }
+ params, variadic := check.collectParams(scope, ftyp.ParamList, nil, true)
+ results, _ := check.collectParams(scope, ftyp.ResultList, nil, false)
+ scope.Squash(func(obj, alt Object) {
+ var err error_
+ err.errorf(obj, "%s redeclared in this block", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+
+ // TODO(gri) We should delay rtyp expansion to when we actually need the
+ // receiver; thus all checks here should be delayed to later.
+ rtyp, _ := deref(recv.typ)
+ rtyp = expand(rtyp)
+
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if t := rtyp; t != Typ[Invalid] {
+ var err string
+ if T := asNamed(t); T != nil {
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ switch u := optype(T).(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ }
+ }
+ } else if T := asBasic(t); T != nil {
+ err = "basic or unnamed type"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
+ }
+ if err != "" {
+ check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
+ // ok to continue
+ }
+ }
+ sig.recv = recv
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list. If type0 != nil, it is used instead of the first type in list.
+func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 syntax.Expr, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+
+ var typ Type
+ var prev syntax.Expr
+ for i, field := range list {
+ ftype := field.Type
+ // type-check type of grouped fields only once
+ if ftype != prev {
+ prev = ftype
+ if i == 0 && type0 != nil {
+ ftype = type0
+ }
+ if t, _ := ftype.(*syntax.DotsType); t != nil {
+ ftype = t.Elem
+ if variadicOk && i == len(list)-1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ = check.varType(ftype)
+ }
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if field.Name != nil {
+ // named parameter
+ name := field.Name.Value
+ if name == "" {
+ check.error(field.Name, invalidAST+"anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(field.Name.Pos(), check.pkg, name, typ)
+ check.declare(scope, field.Name, par, scope.pos)
+ params = append(params, par)
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(field.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.error(list[0], invalidAST+"list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
+
+// isubst returns an x with identifiers substituted per the substitution map smap.
+// isubst only handles the case of (valid) method receiver type expressions correctly.
+func isubst(x syntax.Expr, smap map[*syntax.Name]*syntax.Name) syntax.Expr {
+ switch n := x.(type) {
+ case *syntax.Name:
+ if alt := smap[n]; alt != nil {
+ return alt
+ }
+ // case *syntax.StarExpr:
+ // X := isubst(n.X, smap)
+ // if X != n.X {
+ // new := *n
+ // new.X = X
+ // return &new
+ // }
+ case *syntax.Operation:
+ if n.Op == syntax.Mul && n.Y == nil {
+ X := isubst(n.X, smap)
+ if X != n.X {
+ new := *n
+ new.X = X
+ return &new
+ }
+ }
+ case *syntax.IndexExpr:
+ Index := isubst(n.Index, smap)
+ if Index != n.Index {
+ new := *n
+ new.Index = Index
+ return &new
+ }
+ case *syntax.ListExpr:
+ var elems []syntax.Expr
+ for i, elem := range n.ElemList {
+ new := isubst(elem, smap)
+ if new != elem {
+ if elems == nil {
+ elems = make([]syntax.Expr, len(n.ElemList))
+ copy(elems, n.ElemList)
+ }
+ elems[i] = new
+ }
+ }
+ if elems != nil {
+ new := *n
+ new.ElemList = elems
+ return &new
+ }
+ case *syntax.ParenExpr:
+ return isubst(n.X, smap) // no need to keep parentheses
+ default:
+ // Other receiver type expressions are invalid.
+ // It's fine to ignore those here as they will
+ // be checked elsewhere.
+ }
+ return x
+}
diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go
index 236feb0404..3cb162764c 100644
--- a/src/cmd/compile/internal/types2/sizeof_test.go
+++ b/src/cmd/compile/internal/types2/sizeof_test.go
@@ -27,14 +27,13 @@ func TestSizeof(t *testing.T) {
{Pointer{}, 8, 16},
{Tuple{}, 12, 24},
{Signature{}, 44, 88},
- {Sum{}, 12, 24},
- {Interface{}, 60, 120},
+ {Union{}, 24, 48},
+ {Interface{}, 52, 104},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 68, 136},
+ {Named{}, 84, 160},
{TypeParam{}, 28, 48},
{instance{}, 52, 96},
- {bottom{}, 0, 0},
{top{}, 0, 0},
// Objects
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index aa0fbf40fc..cb789598e5 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -148,8 +148,8 @@ func (s *StdSizes) Sizeof(T Type) int64 {
}
offsets := s.Offsetsof(t.fields)
return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
- case *Sum:
- panic("Sizeof unimplemented for type sum")
+ case *Union:
+ panic("Sizeof unimplemented for union")
case *Interface:
return s.WordSize * 2
}
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
index c3e646c80c..ab66432126 100644
--- a/src/cmd/compile/internal/types2/stmt.go
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -64,7 +64,8 @@ func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body
func (check *Checker) usage(scope *Scope) {
var unused []*Var
- for _, elem := range scope.elems {
+ for name, elem := range scope.elems {
+ elem = resolve(name, elem)
if v, _ := elem.(*Var); v != nil && !v.used {
unused = append(unused, v)
}
@@ -912,12 +913,12 @@ func rangeKeyVal(typ Type, wantKey, wantVal bool) (Type, Type, string) {
msg = "receive from send-only channel"
}
return typ.elem, Typ[Invalid], msg
- case *Sum:
+ case *Union:
first := true
var key, val Type
var msg string
- typ.is(func(t Type) bool {
- k, v, m := rangeKeyVal(under(t), wantKey, wantVal)
+ typ.underIs(func(t Type) bool {
+ k, v, m := rangeKeyVal(t, wantKey, wantVal)
if k == nil || m != "" {
key, val, msg = k, v, m
return false
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
new file mode 100644
index 0000000000..302b9886f4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -0,0 +1,165 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "strconv"
+)
+
+func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
+ if e.FieldList == nil {
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Value
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (issue #25627).
+ addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ var prev syntax.Expr
+ for i, f := range e.FieldList {
+ // Fields declared syntactically with the same type (e.g.: a, b, c T)
+ // share the same type expression. Only check type if it's a new type.
+ if i == 0 || f.Type != prev {
+ typ = check.varType(f.Type)
+ prev = f.Type
+ }
+ tag = ""
+ if i < len(e.TagList) {
+ tag = check.tag(e.TagList[i])
+ }
+ if f.Name != nil {
+ // named field
+ add(f.Name, false, f.Name.Pos())
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := syntax.StartPos(f.Type)
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ check.errorf(pos, "invalid embedded field type %s", f.Type)
+ name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, pos)
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+ embeddedTyp := typ // for closure below
+ embeddedPos := pos
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch t := optype(t).(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if t.kind == UnsafePointer {
+ check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.error(embeddedPos, "embedded field type cannot be a pointer")
+ case *Interface:
+ if isPtr {
+ check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ })
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+}
+
+func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
+ switch e := e.(type) {
+ case *syntax.Name:
+ return e
+ case *syntax.Operation:
+ if base := ptrBase(e); base != nil {
+ // *T is valid, but **T is not
+ if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
+ return embeddedFieldIdent(e.X)
+ }
+ }
+ case *syntax.SelectorExpr:
+ return e.Sel
+ case *syntax.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ var err error_
+ err.errorf(pos, "%s redeclared", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *syntax.BasicLit) string {
+ // If t.Bad, an error was reported during parsing.
+ if t != nil && !t.Bad {
+ if t.Kind == syntax.StringLit {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
+
+func ptrBase(x *syntax.Operation) syntax.Expr {
+ if x.Op == syntax.Mul && x.Y == nil {
+ return x.X
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index c8e428c183..dd8dd74161 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -76,7 +76,7 @@ func (check *Checker) instantiate(pos syntax.Pos, typ Type, targs []Type, poslis
var tparams []*TypeName
switch t := typ.(type) {
case *Named:
- tparams = t.tparams
+ tparams = t.TParams()
case *Signature:
tparams = t.tparams
defer func() {
@@ -119,90 +119,99 @@ func (check *Checker) instantiate(pos syntax.Pos, typ Type, targs []Type, poslis
// check bounds
for i, tname := range tparams {
- tpar := tname.typ.(*TypeParam)
- iface := tpar.Bound()
- if iface.Empty() {
- continue // no type bound
- }
-
- targ := targs[i]
-
// best position for error reporting
pos := pos
if i < len(poslist) {
pos = poslist[i]
}
-
- // The type parameter bound is parameterized with the same type parameters
- // as the instantiated type; before we can use it for bounds checking we
- // need to instantiate it with the type arguments with which we instantiate
- // the parameterized type.
- iface = check.subst(pos, iface, smap).(*Interface)
-
- // targ must implement iface (methods)
- // - check only if we have methods
- check.completeInterface(nopos, iface)
- if len(iface.allMethods) > 0 {
- // If the type argument is a pointer to a type parameter, the type argument's
- // method set is empty.
- // TODO(gri) is this what we want? (spec question)
- if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
- check.errorf(pos, "%s has no methods", targ)
- break
- }
- if m, wrong := check.missingMethod(targ, iface, true); m != nil {
- // TODO(gri) needs to print updated name to avoid major confusion in error message!
- // (print warning for now)
- // Old warning:
- // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
- if m.name == "==" {
- // We don't want to report "missing method ==".
- check.softErrorf(pos, "%s does not satisfy comparable", targ)
- } else if wrong != nil {
- // TODO(gri) This can still report uninstantiated types which makes the error message
- // more difficult to read then necessary.
- check.softErrorf(pos,
- "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
- targ, tpar.bound, wrong, m,
- )
- } else {
- check.softErrorf(pos, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
- }
- break
- }
+ // stop checking bounds after the first failure
+ if !check.satisfies(pos, targs[i], tname.typ.(*TypeParam), smap) {
+ break
}
+ }
- // targ's underlying type must also be one of the interface types listed, if any
- if iface.allTypes == nil {
- continue // nothing to do
- }
+ return check.subst(pos, typ, smap)
+}
- // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
- // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
- if targ := asTypeParam(targ); targ != nil {
- targBound := targ.Bound()
- if targBound.allTypes == nil {
- check.softErrorf(pos, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
- break
- }
- for _, t := range unpack(targBound.allTypes) {
- if !iface.isSatisfiedBy(t) {
- // TODO(gri) match this error message with the one below (or vice versa)
- check.softErrorf(pos, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, t, iface.allTypes)
- break
- }
+// satisfies reports whether the type argument targ satisfies the constraint of type parameter
+// parameter tpar (after any of its type parameters have been substituted through smap).
+// A suitable error is reported if the result is false.
+func (check *Checker) satisfies(pos syntax.Pos, targ Type, tpar *TypeParam, smap *substMap) bool {
+ iface := tpar.Bound()
+ if iface.Empty() {
+ return true // no type bound
+ }
+
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiate
+ // the parameterized type.
+ iface = check.subst(pos, iface, smap).(*Interface)
+
+ // targ must implement iface (methods)
+ // - check only if we have methods
+ check.completeInterface(nopos, iface)
+ if len(iface.allMethods) > 0 {
+ // If the type argument is a pointer to a type parameter, the type argument's
+ // method set is empty.
+ // TODO(gri) is this what we want? (spec question)
+ if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
+ check.errorf(pos, "%s has no methods", targ)
+ return false
+ }
+ if m, wrong := check.missingMethod(targ, iface, true); m != nil {
+ // TODO(gri) needs to print updated name to avoid major confusion in error message!
+ // (print warning for now)
+ // Old warning:
+ // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
+ if m.name == "==" {
+ // We don't want to report "missing method ==".
+ check.softErrorf(pos, "%s does not satisfy comparable", targ)
+ } else if wrong != nil {
+ // TODO(gri) This can still report uninstantiated types which makes the error message
+ // more difficult to read then necessary.
+ check.softErrorf(pos,
+ "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
+ targ, tpar.bound, wrong, m,
+ )
+ } else {
+ check.softErrorf(pos, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
}
- break
+ return false
}
+ }
- // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
- if !iface.isSatisfiedBy(targ) {
- check.softErrorf(pos, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, under(targ), iface.allTypes)
- break
+ // targ's underlying type must also be one of the interface types listed, if any
+ if iface.allTypes == nil {
+ return true // nothing to do
+ }
+
+ // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
+ // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
+ if targ := asTypeParam(targ); targ != nil {
+ targBound := targ.Bound()
+ if targBound.allTypes == nil {
+ check.softErrorf(pos, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
+ return false
}
+ return iface.is(func(typ Type, tilde bool) bool {
+ // TODO(gri) incorporate tilde information!
+ if !iface.isSatisfiedBy(typ) {
+ // TODO(gri) match this error message with the one below (or vice versa)
+ check.softErrorf(pos, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, typ, iface.allTypes)
+ return false
+ }
+ return true
+ })
}
- return check.subst(pos, typ, smap)
+ // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
+ if !iface.isSatisfiedBy(targ) {
+ check.softErrorf(pos, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, targ, iface.allTypes)
+ return false
+ }
+
+ return true
}
// subst returns the type typ with its type parameters tpars replaced by
@@ -241,7 +250,7 @@ func (subst *subster) typ(typ Type) Type {
// Call typOrNil if it's possible that typ is nil.
panic("nil typ")
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
@@ -290,24 +299,20 @@ func (subst *subster) typ(typ Type) Type {
}
}
- case *Sum:
+ case *Union:
types, copied := subst.typeList(t.types)
if copied {
- // Don't do it manually, with a Sum literal: the new
- // types list may not be unique and NewSum may remove
- // duplicates.
- return NewSum(types)
+ // TODO(gri) Remove duplicates that may have crept in after substitution
+ // (unlikely but possible). This matters for the Identical
+ // predicate on unions.
+ return newUnion(types, t.tilde)
}
case *Interface:
methods, mcopied := subst.funcList(t.methods)
- types := t.types
- if t.types != nil {
- types = subst.typ(t.types)
- }
embeddeds, ecopied := subst.typeList(t.embeddeds)
- if mcopied || types != t.types || ecopied {
- iface := &Interface{methods: methods, types: types, embeddeds: embeddeds}
+ if mcopied || ecopied {
+ iface := &Interface{methods: methods, embeddeds: embeddeds}
if subst.check == nil {
panic("internal error: cannot instantiate interfaces yet")
}
@@ -342,7 +347,7 @@ func (subst *subster) typ(typ Type) Type {
}
}
- if t.tparams == nil {
+ if t.TParams() == nil {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
@@ -352,7 +357,7 @@ func (subst *subster) typ(typ Type) Type {
if len(t.targs) > 0 {
// already instantiated
dump(">>> %s already instantiated", t)
- assert(len(t.targs) == len(t.tparams))
+ assert(len(t.targs) == len(t.TParams()))
// For each (existing) type argument targ, determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
@@ -362,7 +367,7 @@ func (subst *subster) typ(typ Type) Type {
if new_targ != targ {
dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
if new_targs == nil {
- new_targs = make([]Type, len(t.tparams))
+ new_targs = make([]Type, len(t.TParams()))
copy(new_targs, t.targs)
}
new_targs[i] = new_targ
@@ -392,7 +397,7 @@ func (subst *subster) typ(typ Type) Type {
// create a new named type and populate caches to avoid endless recursion
tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil)
- named := subst.check.newNamed(tname, t, t.underlying, t.tparams, t.methods) // method signatures are updated lazily
+ named := subst.check.newNamed(tname, t, t.Underlying(), t.TParams(), t.methods) // method signatures are updated lazily
named.targs = new_targs
if subst.check != nil {
subst.check.typMap[h] = named
@@ -401,7 +406,7 @@ func (subst *subster) typ(typ Type) Type {
// do the substitution
dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, new_targs)
- named.underlying = subst.typOrNil(t.underlying)
+ named.underlying = subst.typOrNil(t.Underlying())
named.fromRHS = named.underlying // for cycle detection (Checker.validType)
return named
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.go2 b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
index 3918d836b5..5bb67efec9 100644
--- a/src/cmd/compile/internal/types2/testdata/check/builtins.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
@@ -7,19 +7,19 @@
package builtins
type Bmc interface {
- type map[rune]string, chan int
+ ~map[rune]string | ~chan int
}
type Bms interface {
- type map[string]int, []int
+ ~map[string]int | ~[]int
}
type Bcs interface {
- type chan bool, []float64
+ ~chan bool | ~[]float64
}
type Bss interface {
- type []int, []string
+ ~[]int | ~[]string
}
func _[T any] () {
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls0.src b/src/cmd/compile/internal/types2/testdata/check/decls0.src
index e78d8867e0..f051a4f2ac 100644
--- a/src/cmd/compile/internal/types2/testdata/check/decls0.src
+++ b/src/cmd/compile/internal/types2/testdata/check/decls0.src
@@ -4,7 +4,7 @@
// type declarations
-package decls0
+package go1_17 // don't permit non-interface elements in interfaces
import "unsafe"
@@ -185,10 +185,10 @@ func f2(x *f2 /* ERROR "not a type" */ ) {}
func f3() (x f3 /* ERROR "not a type" */ ) { return }
func f4() (x *f4 /* ERROR "not a type" */ ) { return }
-func (S0) m1(x S0 /* ERROR value .* is not a type */ .m1) {}
-func (S0) m2(x *S0 /* ERROR value .* is not a type */ .m2) {}
-func (S0) m3() (x S0 /* ERROR value .* is not a type */ .m3) { return }
-func (S0) m4() (x *S0 /* ERROR value .* is not a type */ .m4) { return }
+func (S0) m1(x S0 /* ERROR illegal cycle in method declaration */ .m1) {}
+func (S0) m2(x *S0 /* ERROR illegal cycle in method declaration */ .m2) {}
+func (S0) m3() (x S0 /* ERROR illegal cycle in method declaration */ .m3) { return }
+func (S0) m4() (x *S0 /* ERROR illegal cycle in method declaration */ .m4) { return }
// interfaces may not have any blank methods
type BlankI interface {
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.go2 b/src/cmd/compile/internal/types2/testdata/check/issues.go2
index 1c73b5da92..59dd4ae465 100644
--- a/src/cmd/compile/internal/types2/testdata/check/issues.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.go2
@@ -57,7 +57,7 @@ func _() {
// type with a type list constraint, all of the type argument's types in its
// bound, but at least one (!), must be in the type list of the bound of the
// corresponding parameterized type's type parameter.
-type T1[P interface{type uint}] struct{}
+type T1[P interface{~uint}] struct{}
func _[P any]() {
_ = T1[P /* ERROR P has no type constraints */ ]{}
@@ -65,7 +65,7 @@ func _[P any]() {
// This is the original (simplified) program causing the same issue.
type Unsigned interface {
- type uint
+ ~uint
}
type T2[U Unsigned] struct {
@@ -156,7 +156,7 @@ type inf2[T any] struct{ inf2 /* ERROR illegal cycle */ [T] }
// predicate disjunction in the implementation was wrong because if a type list
// contains both an integer and a floating-point type, the type parameter is
// neither an integer or a floating-point number.
-func convert[T1, T2 interface{type int, uint, float32}](v T1) T2 {
+func convert[T1, T2 interface{~int | ~uint | ~float32}](v T1) T2 {
return T2(v)
}
@@ -168,12 +168,12 @@ func _() {
// both numeric, or both strings. The implementation had the same problem
// with this check as the conversion issue above (issue #39623).
-func issue39623[T interface{type int, string}](x, y T) T {
+func issue39623[T interface{~int | ~string}](x, y T) T {
return x + y
}
// Simplified, from https://go2goplay.golang.org/p/efS6x6s-9NI:
-func Sum[T interface{type int, string}](s []T) (sum T) {
+func Sum[T interface{~int | ~string}](s []T) (sum T) {
for _, v := range s {
sum += v
}
@@ -182,19 +182,19 @@ func Sum[T interface{type int, string}](s []T) (sum T) {
// Assignability of an unnamed pointer type to a type parameter that
// has a matching underlying type.
-func _[T interface{}, PT interface{type *T}] (x T) PT {
+func _[T interface{}, PT interface{~*T}] (x T) PT {
return &x
}
// Indexing of generic types containing type parameters in their type list:
-func at[T interface{ type []E }, E interface{}](x T, i int) E {
+func at[T interface{ ~[]E }, E interface{}](x T, i int) E {
return x[i]
}
// A generic type inside a function acts like a named type. Its underlying
// type is itself, its "operational type" is defined by the type list in
// the tybe bound, if any.
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
type myint int
var _ int = int(x)
var _ T = 42
@@ -203,24 +203,24 @@ func _[T interface{type int}](x T) {
// Indexing a generic type with an array type bound checks length.
// (Example by mdempsky@.)
-func _[T interface { type [10]int }](x T) {
+func _[T interface { ~[10]int }](x T) {
_ = x[9] // ok
_ = x[20 /* ERROR out of bounds */ ]
}
// Pointer indirection of a generic type.
-func _[T interface{ type *int }](p T) int {
+func _[T interface{ ~*int }](p T) int {
return *p
}
// Channel sends and receives on generic types.
-func _[T interface{ type chan int }](ch T) int {
+func _[T interface{ ~chan int }](ch T) int {
ch <- 0
return <- ch
}
// Calling of a generic variable.
-func _[T interface{ type func() }](f T) {
+func _[T interface{ ~func() }](f T) {
f()
go f()
}
@@ -232,9 +232,9 @@ func _[T interface{ type func() }](f T) {
// type parameter that was substituted with a defined type.
// Test case from an (originally) failing example.
-type sliceOf[E any] interface{ type []E }
+type sliceOf[E any] interface{ ~[]E }
-func append[T interface{}, S sliceOf[T], T2 interface{ type T }](s S, t ...T2) S
+func append[T interface{}, S sliceOf[T], T2 interface{ T }](s S, t ...T2) S
var f func()
var cancelSlice []context.CancelFunc
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.src b/src/cmd/compile/internal/types2/testdata/check/issues.src
index 21aa208cc7..60d23b3c3b 100644
--- a/src/cmd/compile/internal/types2/testdata/check/issues.src
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.src
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package issues
+package go1_17 // don't permit non-interface elements in interfaces
import (
"fmt"
diff --git a/src/cmd/compile/internal/types2/testdata/check/linalg.go2 b/src/cmd/compile/internal/types2/testdata/check/linalg.go2
index 0d27603a58..efc090a1d1 100644
--- a/src/cmd/compile/internal/types2/testdata/check/linalg.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/linalg.go2
@@ -9,10 +9,10 @@ import "math"
// Numeric is type bound that matches any numeric type.
// It would likely be in a constraints package in the standard library.
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
func DotProduct[T Numeric](s1, s2 []T) T {
@@ -42,14 +42,14 @@ func AbsDifference[T NumericAbs[T]](a, b T) T {
// OrderedNumeric is a type bound that matches numeric types that support the < operator.
type OrderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex is a type bound that matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// OrderedAbs is a helper type that defines an Abs method for
diff --git a/src/cmd/compile/internal/types2/testdata/check/tinference.go2 b/src/cmd/compile/internal/types2/testdata/check/tinference.go2
index a53fde0a2a..2fdb39ca7a 100644
--- a/src/cmd/compile/internal/types2/testdata/check/tinference.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/tinference.go2
@@ -8,28 +8,28 @@ import "strconv"
type any interface{}
-func f0[A any, B interface{type C}, C interface{type D}, D interface{type A}](A, B, C, D)
+func f0[A any, B interface{~C}, C interface{~D}, D interface{~A}](A, B, C, D)
func _() {
f := f0[string]
f("a", "b", "c", "d")
f0("a", "b", "c", "d")
}
-func f1[A any, B interface{type A}](A, B)
+func f1[A any, B interface{~A}](A, B)
func _() {
f := f1[int]
f(int(0), int(0))
f1(int(0), int(0))
}
-func f2[A any, B interface{type []A}](A, B)
+func f2[A any, B interface{~[]A}](A, B)
func _() {
f := f2[byte]
f(byte(0), []byte{})
f2(byte(0), []byte{})
}
-func f3[A any, B interface{type C}, C interface{type *A}](A, B, C)
+func f3[A any, B interface{~C}, C interface{~*A}](A, B, C)
func _() {
f := f3[int]
var x int
@@ -37,7 +37,7 @@ func _() {
f3(x, &x, &x)
}
-func f4[A any, B interface{type []C}, C interface{type *A}](A, B, C)
+func f4[A any, B interface{~[]C}, C interface{~*A}](A, B, C)
func _() {
f := f4[int]
var x int
@@ -45,14 +45,14 @@ func _() {
f4(x, []*int{}, &x)
}
-func f5[A interface{type struct{b B; c C}}, B any, C interface{type *B}](x B) A
+func f5[A interface{~struct{b B; c C}}, B any, C interface{~*B}](x B) A
func _() {
x := f5(1.2)
var _ float64 = x.b
var _ float64 = *x.c
}
-func f6[A any, B interface{type struct{f []A}}](B) A
+func f6[A any, B interface{~struct{f []A}}](B) A
func _() {
x := f6(struct{f []string}{})
var _ string = x
@@ -60,11 +60,11 @@ func _() {
// TODO(gri) Need to flag invalid recursive constraints. At the
// moment these cause infinite recursions and stack overflow.
-// func f7[A interface{type B}, B interface{type A}]()
+// func f7[A interface{type B}, B interface{~A}]()
// More realistic examples
-func Double[S interface{ type []E }, E interface{ type int, int8, int16, int32, int64 }](s S) S {
+func Double[S interface{ ~[]E }, E interface{ ~int | ~int8 | ~int16 | ~int32 | ~int64 }](s S) S {
r := make(S, len(s))
for i, v := range s {
r[i] = v + v
@@ -80,7 +80,7 @@ var _ = Double(MySlice{1})
type Setter[B any] interface {
Set(string)
- type *B
+ ~*B
}
func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2 b/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
index 6e2104a515..14d8f0ea8c 100644
--- a/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
@@ -148,15 +148,15 @@ func _[T any](r R2[T, int], p *R2[string, T]) {
p.pm()
}
-// An interface can (explicitly) declare at most one type list.
+// It is ok to have multiple embedded unions.
type _ interface {
m0()
- type int, string, bool
- type /* ERROR multiple type lists */ float32, float64
+ ~int | ~string | ~bool
+ ~float32 | ~float64
m1()
m2()
- type /* ERROR multiple type lists */ complex64, complex128
- type /* ERROR multiple type lists */ rune
+ ~complex64 | ~complex128
+ ~rune
}
// Interface type lists may contain each type at most once.
@@ -164,23 +164,24 @@ type _ interface {
// for them to be all in a single list, and we report the error
// as well.)
type _ interface {
- type int, int /* ERROR duplicate type int */
- type /* ERROR multiple type lists */ int /* ERROR duplicate type int */
+ ~int|~int /* ERROR duplicate term int */
+ ~int|int /* ERROR duplicate term int */
+ int|int /* ERROR duplicate term int */
}
type _ interface {
- type struct{f int}, struct{g int}, struct /* ERROR duplicate type */ {f int}
+ ~struct{f int} | ~struct{g int} | ~struct /* ERROR duplicate term */ {f int}
}
// Interface type lists can contain any type, incl. *Named types.
// Verify that we use the underlying type to compute the operational type.
type MyInt int
-func add1[T interface{type MyInt}](x T) T {
+func add1[T interface{MyInt}](x T) T {
return x + 1
}
type MyString string
-func double[T interface{type MyInt, MyString}](x T) T {
+func double[T interface{MyInt|MyString}](x T) T {
return x + x
}
@@ -189,15 +190,15 @@ func double[T interface{type MyInt, MyString}](x T) T {
// type lists.
type E0 interface {
- type int, bool, string
+ ~int | ~bool | ~string
}
type E1 interface {
- type int, float64, string
+ ~int | ~float64 | ~string
}
type E2 interface {
- type float64
+ ~float64
}
type I0 interface {
@@ -246,7 +247,7 @@ var _ = f12[float64]
type I0_ interface {
E0
- type int
+ ~int
}
func f0_[T I0_]()
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeparams.go2 b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
index badda01105..4074ef17ea 100644
--- a/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
@@ -52,22 +52,22 @@ func swapswap[A, B any](a A, b B) (A, B) {
type F[A, B any] func(A, B) (B, A)
-func min[T interface{ type int }](x, y T) T {
+func min[T interface{ ~int }](x, y T) T {
if x < y {
return x
}
return y
}
-func _[T interface{type int, float32}](x, y T) bool { return x < y }
+func _[T interface{~int | ~float32}](x, y T) bool { return x < y }
func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
-func _[T interface{type int, float32, bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{~int | ~float32 | ~bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C2[T]](x, y T) bool { return x < y }
type C1[T any] interface{}
-type C2[T any] interface{ type int, float32 }
+type C2[T any] interface{ ~int | ~float32 }
func new[T any]() *T {
var x T
@@ -95,48 +95,48 @@ var _ = f3[int, rune, bool](1, struct{x rune}{}, nil)
// indexing
func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type [10]int, *[20]int, map[int]int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type string, []byte }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int, [1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string, []rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[int]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
// indexing with various combinations of map types in type lists (see issue #42616)
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = x[i] }
-func _[T interface{ type []E }, E any](x T, i int) { _ = &x[i] }
-func _[T interface{ type map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
-func _[T interface{ type []E, map[int]E, map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
-func _[T interface{ type []E, map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = x[i] }
+func _[T interface{ ~[]E }, E any](x T, i int) { _ = &x[i] }
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
+func _[T interface{ ~[]E | ~map[int]E | ~map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
+func _[T interface{ ~[]E | ~map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
// slicing
// TODO(gri) implement this
-func _[T interface{ type string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
// len/cap built-ins
func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = len(x) }
-func _[T interface{ type [10]int }](x T) { _ = len(x) }
-func _[T interface{ type []byte }](x T) { _ = len(x) }
-func _[T interface{ type map[int]int }](x T) { _ = len(x) }
-func _[T interface{ type chan int }](x T) { _ = len(x) }
-func _[T interface{ type string, []byte, chan int }](x T) { _ = len(x) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type [10]int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte }](x T) { _ = cap(x) }
-func _[T interface{ type map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type chan int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte, chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
// range iteration
@@ -144,7 +144,7 @@ func _[T interface{}](x T) {
for range x /* ERROR cannot range */ {}
}
-func _[T interface{ type string, []string }](x T) {
+func _[T interface{ ~string | ~[]string }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i }
@@ -156,23 +156,23 @@ func _[T interface{ type string, []string }](x T) {
}
-func _[T interface{ type string, []rune, map[int]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[int]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x { _ = i; _ = e }
}
-func _[T interface{ type string, []rune, map[string]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[string]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x /* ERROR must have the same key type */ { _ = e }
}
-func _[T interface{ type string, chan int }](x T) {
+func _[T interface{ ~string | ~chan int }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
}
-func _[T interface{ type string, chan<-int }](x T) {
+func _[T interface{ ~string | ~chan<-int }](x T) {
for i := range x /* ERROR send-only channel */ { _ = i }
}
@@ -400,7 +400,7 @@ func _[T any](x T) {
}
}
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
_ = x /* ERROR not an interface */ .(int)
switch x /* ERROR not an interface */ .(type) {
}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/constraints.go2 b/src/cmd/compile/internal/types2/testdata/examples/constraints.go2
new file mode 100644
index 0000000000..d9805fe694
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/constraints.go2
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic constraint interfaces.
+
+package p
+
+type (
+ // Type lists are processed as unions but an error is reported.
+ // TODO(gri) remove this once the parser doesn't accept type lists anymore.
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ }
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ type float32
+ }
+)
+
+type (
+ // Arbitrary types may be embedded like interfaces.
+ _ interface{int}
+ _ interface{~int}
+
+ // Types may be combined into a union.
+ _ interface{int|~string}
+
+ // Union terms must be unique independent of whether they are ~ or not.
+ _ interface{int|int /* ERROR duplicate term int */ }
+ _ interface{int|~ /* ERROR duplicate term int */ int }
+ _ interface{~int|~ /* ERROR duplicate term int */ int }
+
+ // For now we do not permit interfaces with ~ or in unions.
+ _ interface{~ /* ERROR cannot use interface */ interface{}}
+ _ interface{int|interface /* ERROR cannot use interface */ {}}
+)
+
+type (
+ // Tilde is not permitted on defined types or interfaces.
+ foo int
+ bar interface{}
+ _ interface{foo}
+ _ interface{~ /* ERROR invalid use of ~ */ foo }
+ _ interface{~ /* ERROR invalid use of ~ */ bar }
+)
+
+// Multiple embedded union elements are intersected. The order in which they
+// appear in the interface doesn't matter since intersection is a symmetric
+// operation.
+
+type myInt1 int
+type myInt2 int
+
+func _[T interface{ myInt1|myInt2; ~int }]() T { return T(0) }
+func _[T interface{ ~int; myInt1|myInt2 }]() T { return T(0) }
+
+// Here the intersections are empty - there's no type that's in the type set of T.
+func _[T interface{ myInt1|myInt2; int }]() T { return T(0 /* ERROR cannot convert */ ) }
+func _[T interface{ int; myInt1|myInt2 }]() T { return T(0 /* ERROR cannot convert */ ) }
diff --git a/src/cmd/compile/internal/types2/testdata/examples/functions.go2 b/src/cmd/compile/internal/types2/testdata/examples/functions.go2
index 0c2a408f02..154d09f528 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/functions.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/functions.go2
@@ -98,7 +98,7 @@ func g2b[P, Q any](x P, y Q) {
// Here's an example of a recursive function call with variadic
// arguments and type inference inferring the type parameter of
// the caller (i.e., itself).
-func max[T interface{ type int }](x ...T) T {
+func max[T interface{ ~int }](x ...T) T {
var x0 T
if len(x) > 0 {
x0 = x[0]
diff --git a/src/cmd/compile/internal/types2/testdata/examples/inference.go2 b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
index b47ce75805..75d47d2c9b 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/inference.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
@@ -7,7 +7,7 @@
package p
type Ordered interface {
- type int, float64, string
+ ~int|~float64|~string
}
func min[T Ordered](x, y T) T
@@ -54,7 +54,7 @@ func _() {
mixed[int, string](1.1 /* ERROR cannot use 1.1 */ , "", false)
}
-func related1[Slice interface{type []Elem}, Elem any](s Slice, e Elem)
+func related1[Slice interface{~[]Elem}, Elem any](s Slice, e Elem)
func _() {
// related1 can be called with explicit instantiation.
@@ -78,7 +78,7 @@ func _() {
related1(si, "foo" /* ERROR cannot use "foo" */ )
}
-func related2[Elem any, Slice interface{type []Elem}](e Elem, s Slice)
+func related2[Elem any, Slice interface{~[]Elem}](e Elem, s Slice)
func _() {
// related2 can be called with explicit instantiation.
diff --git a/src/cmd/compile/internal/types2/testdata/examples/types.go2 b/src/cmd/compile/internal/types2/testdata/examples/types.go2
index a7825ed2d9..66e7a7b90e 100644
--- a/src/cmd/compile/internal/types2/testdata/examples/types.go2
+++ b/src/cmd/compile/internal/types2/testdata/examples/types.go2
@@ -159,7 +159,7 @@ type _ struct {
// are type parameters. As with ordinary type definitions, the
// types underlying properties are "inherited" but the methods
// are not.
-func _[T interface{ m(); type int }]() {
+func _[T interface{ m(); ~int }]() {
type L T
var x L
@@ -232,11 +232,11 @@ func _[A Adder[A], B Adder[B], C Adder[A]]() {
// The type of variables (incl. parameters and return values) cannot
// be an interface with type constraints or be/embed comparable.
type I interface {
- type int
+ ~int
}
var (
- _ interface /* ERROR contains type constraints */ {type int}
+ _ interface /* ERROR contains type constraints */ {~int}
_ I /* ERROR contains type constraints */
)
@@ -267,7 +267,7 @@ func _() {
// (If a type list contains just a single const type, we could
// allow it, but such type lists don't make much sense in the
// first place.)
-func _[T interface { type int, float64 }]() {
+func _[T interface{~int|~float64}]() {
// not valid
const _ = T /* ERROR not constant */ (0)
const _ T /* ERROR invalid constant type T */ = 1
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
index 2c1299feb0..6d002f5d2f 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
@@ -31,13 +31,13 @@ type x7[A any] struct{ foo7 }
func main7() { var _ foo7 = x7[int]{} }
// crash 8
-type foo8[A any] interface { type A }
+type foo8[A any] interface { ~A }
func bar8[A foo8[A]](a A) {}
func main8() {}
// crash 9
-type foo9[A any] interface { type foo9 /* ERROR interface contains type constraints */ [A] }
-func _() { var _ = new(foo9 /* ERROR interface contains type constraints */ [int]) }
+type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
+func _() { var _ = new(foo9 /* ERROR illegal cycle */ [int]) }
// crash 12
var u /* ERROR cycle */ , i [func /* ERROR used as value */ /* ERROR used as value */ (u, c /* ERROR undeclared */ /* ERROR undeclared */ ) {}(0, len /* ERROR must be called */ /* ERROR must be called */ )]c /* ERROR undeclared */ /* ERROR undeclared */
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
index 9bc26f3546..01eadd2dbf 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
@@ -7,13 +7,13 @@ package p
import "fmt"
// Minimal test case.
-func _[T interface{type T}](x T) T{
+func _[T interface{~T}](x T) T{
return x
}
// Test case from issue.
type constr[T any] interface {
- type T
+ ~T
}
func Print[T constr[T]](s []T) {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
index 316ab1982e..301c13be41 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
@@ -4,11 +4,20 @@
package p
-type Number interface {
- int /* ERROR int is not an interface */
- float64 /* ERROR float64 is not an interface */
+type Number1 interface {
+ // embedding non-interface types is permitted
+ int
+ float64
}
-func Add[T Number](a, b T) T {
+func Add1[T Number1](a, b T) T {
return a /* ERROR not defined */ + b
}
+
+type Number2 interface {
+ int|float64
+}
+
+func Add2[T Number2](a, b T) T {
+ return a + b
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
index 75491e7e26..72f83997c2 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
@@ -8,7 +8,7 @@ type T0 interface{
}
type T1 interface{
- type int
+ ~int
}
type T2 interface{
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
index df621a4c17..85eb0a78fe 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
@@ -7,5 +7,7 @@ package p
// Do not report a duplicate type error for this type list.
// (Check types after interfaces have been completed.)
type _ interface {
- type interface{ Error() string }, interface{ String() string }
+ // TODO(gri) Once we have full type sets we can enable this again.
+ // Fow now we don't permit interfaces in type lists.
+ // type interface{ Error() string }, interface{ String() string }
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
index 55464e6b77..367b3f1360 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
@@ -6,4 +6,4 @@ package p
// A constraint must be an interface; it cannot
// be a type parameter, for instance.
-func _[A interface{ type interface{} }, B A /* ERROR not an interface */ ]()
+func _[A interface{ ~int }, B A /* ERROR not an interface */ ]()
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
index b7ab68818e..257b73a2fb 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
@@ -4,14 +4,14 @@
package p
-func _[T interface{type map[string]int}](x T) {
+func _[T interface{~map[string]int}](x T) {
_ = x == nil
}
// simplified test case from issue
type PathParamsConstraint interface {
- type map[string]string, []struct{key, value string}
+ ~map[string]string | ~[]struct{key, value string}
}
type PathParams[T PathParamsConstraint] struct {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
index c2b460902c..6372397ed9 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
@@ -2,7 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package p
+// TODO(gri) Eventually, once we disallow type lists, we need to
+// adjust this code: for 1.17 we don't accept type parameters,
+// and for 1.18 this code is valid.
+// Leaving for now so we can see that existing errors
+// are being reported.
+
+package go1_17 // don't permit non-interface elements in interfaces
type T[P any] interface{
P // ERROR P is a type parameter, not an interface
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
index 61f766bcbd..ab535049dd 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
@@ -7,7 +7,7 @@ package p
// Test case from issue.
type Nat interface {
- type Zero, Succ
+ Zero|Succ
}
type Zero struct{}
@@ -22,7 +22,7 @@ type I1 interface {
}
type I2 interface {
- type int
+ ~int
}
type I3 interface {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
index 698cb8a16b..bf0031f5d2 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
@@ -17,7 +17,7 @@ func _[T any](x interface{}){
}
type constraint interface {
- type int
+ ~int
}
func _[T constraint](x interface{}){
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
index b1e42497e8..b8ba0ad4a7 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
@@ -4,7 +4,7 @@
package p
-func f[F interface{type *Q}, G interface{type *R}, Q, R any](q Q, r R) {}
+func f[F interface{~*Q}, G interface{~*R}, Q, R any](q Q, r R) {}
func _() {
f[*float64, *int](1, 2)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
index 65662cdc76..e9b57ae8f1 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
@@ -13,7 +13,7 @@ type N[T any] struct{}
var _ N[] /* ERROR expecting type */
type I interface {
- type map[int]int, []int
+ ~map[int]int | ~[]int
}
func _[T I](i, j int) {
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
index 7678e348ef..f25b9d2b26 100644
--- a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
@@ -5,7 +5,7 @@
package issue45985
// TODO(gri): this error should be on app[int] below.
-func app[S /* ERROR "type S = S does not match" */ interface{ type []T }, T any](s S, e T) S {
+func app[S /* ERROR "type S = S does not match" */ interface{ ~[]T }, T any](s S, e T) S {
return append(s, e)
}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2
new file mode 100644
index 0000000000..f41ae26e4b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src
new file mode 100644
index 0000000000..da1f1ffbba
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T1 struct{}
+func (t T1) m(int) {}
+var f1 func(T1)
+
+type T2 struct{}
+func (t T2) m(x int) {}
+var f2 func(T2)
+
+type T3 struct{}
+func (T3) m(int) {}
+var f3 func(T3)
+
+type T4 struct{}
+func (T4) m(x int) {}
+var f4 func(T4)
+
+func _() {
+ f1 = T1 /* ERROR func\(T1, int\) */ .m
+ f2 = T2 /* ERROR func\(t T2, x int\) */ .m
+ f3 = T3 /* ERROR func\(T3, int\) */ .m
+ f4 = T4 /* ERROR func\(_ T4, x int\) */ .m
+}
diff --git a/src/cmd/compile/internal/types2/type.go b/src/cmd/compile/internal/types2/type.go
index e6c260ff67..10cb651d0c 100644
--- a/src/cmd/compile/internal/types2/type.go
+++ b/src/cmd/compile/internal/types2/type.go
@@ -6,7 +6,7 @@ package types2
import (
"cmd/compile/internal/syntax"
- "fmt"
+ "sync"
"sync/atomic"
)
@@ -250,6 +250,9 @@ func (s *Signature) RParams() []*TypeName { return s.rparams }
// SetTParams sets the type parameters of signature s.
func (s *Signature) SetTParams(tparams []*TypeName) { s.tparams = tparams }
+// SetRParams sets the receiver type params of signature s.
+func (s *Signature) SetRParams(rparams []*TypeName) { s.rparams = rparams }
+
// Params returns the parameters of signature s, or nil.
func (s *Signature) Params() *Tuple { return s.params }
@@ -259,57 +262,9 @@ func (s *Signature) Results() *Tuple { return s.results }
// Variadic reports whether the signature s is variadic.
func (s *Signature) Variadic() bool { return s.variadic }
-// A Sum represents a set of possible types.
-// Sums are currently used to represent type lists of interfaces
-// and thus the underlying types of type parameters; they are not
-// first class types of Go.
-type Sum struct {
- types []Type // types are unique
-}
-
-// NewSum returns a new Sum type consisting of the provided
-// types if there are more than one. If there is exactly one
-// type, it returns that type. If the list of types is empty
-// the result is nil.
-func NewSum(types []Type) Type {
- if len(types) == 0 {
- return nil
- }
-
- // What should happen if types contains a sum type?
- // Do we flatten the types list? For now we check
- // and panic. This should not be possible for the
- // current use case of type lists.
- // TODO(gri) Come up with the rules for sum types.
- for _, t := range types {
- if _, ok := t.(*Sum); ok {
- panic("sum type contains sum type - unimplemented")
- }
- }
-
- if len(types) == 1 {
- return types[0]
- }
- return &Sum{types: types}
-}
-
-// is reports whether all types in t satisfy pred.
-func (s *Sum) is(pred func(Type) bool) bool {
- if s == nil {
- return false
- }
- for _, t := range s.types {
- if !pred(t) {
- return false
- }
- }
- return true
-}
-
// An Interface represents an interface type.
type Interface struct {
methods []*Func // ordered list of explicitly declared methods
- types Type // (possibly a Sum) type declared with a type list (TODO(gri) need better field name)
embeddeds []Type // ordered list of explicitly embedded types
allMethods []*Func // ordered list of methods declared with or embedded in this interface (TODO(gri): replace with mset)
@@ -318,29 +273,17 @@ type Interface struct {
obj Object // type declaration defining this interface; or nil (for better error messages)
}
-// unpack unpacks a type into a list of types.
-// TODO(gri) Try to eliminate the need for this function.
-func unpack(typ Type) []Type {
- if typ == nil {
- return nil
- }
- if sum := asSum(typ); sum != nil {
- return sum.types
- }
- return []Type{typ}
-}
-
-// is reports whether interface t represents types that all satisfy pred.
-func (t *Interface) is(pred func(Type) bool) bool {
- if t.allTypes == nil {
+// is reports whether interface t represents types that all satisfy f.
+func (t *Interface) is(f func(Type, bool) bool) bool {
+ switch t := t.allTypes.(type) {
+ case nil, *top:
+ // TODO(gri) should settle on top or nil to represent this case
return false // we must have at least one type! (was bug)
+ case *Union:
+ return t.is(func(typ Type, tilde bool) bool { return f(typ, tilde) })
+ default:
+ return f(t, false)
}
- for _, t := range unpack(t.allTypes) {
- if !pred(t) {
- return false
- }
- }
- return true
}
// emptyInterface represents the empty (completed) interface
@@ -367,9 +310,6 @@ func NewInterface(methods []*Func, embeddeds []*Named) *Interface {
}
// NewInterfaceType returns a new (incomplete) interface for the given methods and embedded types.
-// Each embedded type must have an underlying type of interface type (this property is not
-// verified for defined types, which may be in the process of being set up and which don't
-// have a valid underlying type yet).
// NewInterfaceType takes ownership of the provided methods and may modify their types by setting
// missing receivers. To compute the method set of the interface, Complete must be called.
func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
@@ -385,16 +325,6 @@ func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
}
}
- // All embedded types should be interfaces; however, defined types
- // may not yet be fully resolved. Only verify that non-defined types
- // are interfaces. This matches the behavior of the code before the
- // fix for #25301 (issue #25596).
- for _, t := range embeddeds {
- if _, ok := t.(*Named); !ok && !IsInterface(t) {
- panic("embedded type is not an interface")
- }
- }
-
// sort for API stability
sortMethods(methods)
sortTypes(embeddeds)
@@ -425,79 +355,40 @@ func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
// NumMethods returns the total number of methods of interface t.
// The interface must have been completed.
-func (t *Interface) NumMethods() int { t.assertCompleteness(); return len(t.allMethods) }
-
-func (t *Interface) assertCompleteness() {
- if t.allMethods == nil {
- panic("interface is incomplete")
- }
-}
+func (t *Interface) NumMethods() int { t.Complete(); return len(t.allMethods) }
// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
// The methods are ordered by their unique Id.
// The interface must have been completed.
-func (t *Interface) Method(i int) *Func { t.assertCompleteness(); return t.allMethods[i] }
+func (t *Interface) Method(i int) *Func { t.Complete(); return t.allMethods[i] }
// Empty reports whether t is the empty interface.
func (t *Interface) Empty() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- // A non-nil allTypes may still be empty and represents the bottom type.
- return len(t.allMethods) == 0 && t.allTypes == nil
- }
- return !t.iterate(func(t *Interface) bool {
- return len(t.methods) > 0 || t.types != nil
- }, nil)
+ t.Complete()
+ return len(t.allMethods) == 0 && t.allTypes == nil
}
// HasTypeList reports whether interface t has a type list, possibly from an embedded type.
func (t *Interface) HasTypeList() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- return t.allTypes != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- return t.types != nil
- }, nil)
+ t.Complete()
+ return t.allTypes != nil
}
// IsComparable reports whether interface t is or embeds the predeclared interface "comparable".
func (t *Interface) IsComparable() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
+ t.Complete()
+ _, m := lookupMethod(t.allMethods, nil, "==")
+ return m != nil
}
// IsConstraint reports t.HasTypeList() || t.IsComparable().
func (t *Interface) IsConstraint() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- if t.allTypes != nil {
- return true
- }
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- if t.types != nil {
- return true
- }
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
+ return t.HasTypeList() || t.IsComparable()
}
// iterate calls f with t and then with any embedded interface of t, recursively, until f returns true.
// iterate reports whether any call to f returned true.
+// TODO(gri) This is now only used by infer.go - see if we can eliminate it.
func (t *Interface) iterate(f func(*Interface) bool, seen map[*Interface]bool) bool {
if f(t) {
return true
@@ -528,11 +419,15 @@ func (t *Interface) iterate(f func(*Interface) bool, seen map[*Interface]bool) b
// "implements" predicate.
func (t *Interface) isSatisfiedBy(typ Type) bool {
t.Complete()
- if t.allTypes == nil {
- return true
+ switch t := t.allTypes.(type) {
+ case nil:
+ return true // no type restrictions
+ case *Union:
+ r, _ := t.intersect(typ, false)
+ return r != nil
+ default:
+ return Identical(t, typ)
}
- types := unpack(t.allTypes)
- return includes(types, typ) || includes(types, under(typ))
}
// Complete computes the interface's method set. It must be called by users of
@@ -541,64 +436,9 @@ func (t *Interface) isSatisfiedBy(typ Type) bool {
// form other types. The interface must not contain duplicate methods or a
// panic occurs. Complete returns the receiver.
func (t *Interface) Complete() *Interface {
- // TODO(gri) consolidate this method with Checker.completeInterface
- if t.allMethods != nil {
- return t
- }
-
- t.allMethods = markComplete // avoid infinite recursion
-
- var todo []*Func
- var methods []*Func
- var seen objset
- addMethod := func(m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- case explicit:
- panic("duplicate method " + m.name)
- default:
- // check method signatures after all locally embedded interfaces are computed
- todo = append(todo, m, other.(*Func))
- }
- }
-
- for _, m := range t.methods {
- addMethod(m, true)
- }
-
- allTypes := t.types
-
- for _, typ := range t.embeddeds {
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- panic(fmt.Sprintf("%s is not an interface", typ))
- }
- continue
- }
- etyp.Complete()
- for _, m := range etyp.allMethods {
- addMethod(m, false)
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- for i := 0; i < len(todo); i += 2 {
- m := todo[i]
- other := todo[i+1]
- if !Identical(m.typ, other.typ) {
- panic("duplicate method " + m.name)
- }
- }
-
- if methods != nil {
- sortMethods(methods)
- t.allMethods = methods
+ if t.allMethods == nil {
+ completeInterface(nil, nopos, t)
}
- t.allTypes = allTypes
-
return t
}
@@ -649,7 +489,7 @@ func (c *Chan) Elem() Type { return c.elem }
// A Named represents a named (defined) type.
type Named struct {
- check *Checker // for Named.under implementation
+ check *Checker // for Named.under implementation; nilled once under has been called
info typeInfo // for cycle detection
obj *TypeName // corresponding declared object
orig *Named // original, uninstantiated type
@@ -658,6 +498,9 @@ type Named struct {
tparams []*TypeName // type parameters, or nil
targs []Type // type arguments (after instantiation), or nil
methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
+
+ resolve func(*Named) ([]*TypeName, Type, []*Func)
+ once sync.Once
}
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
@@ -670,6 +513,35 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
}
+func (t *Named) expand() *Named {
+ if t.resolve == nil {
+ return t
+ }
+
+ t.once.Do(func() {
+ // TODO(mdempsky): Since we're passing t to resolve anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions
+ // (like SetTParams).
+
+ tparams, underlying, methods := t.resolve(t)
+
+ switch underlying.(type) {
+ case nil, *Named:
+ panic("invalid underlying type")
+ }
+
+ t.tparams = tparams
+ t.underlying = underlying
+ t.methods = methods
+ })
+ return t
+}
+
// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams []*TypeName, methods []*Func) *Named {
typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
@@ -679,6 +551,23 @@ func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tpar
if obj.typ == nil {
obj.typ = typ
}
+ // Ensure that typ is always expanded, at which point the check field can be
+ // nilled out.
+ //
+ // Note that currently we cannot nil out check inside typ.under(), because
+ // it's possible that typ is expanded multiple times.
+ //
+ // TODO(gri): clean this up so that under is the only function mutating
+ // named types.
+ if check != nil {
+ check.later(func() {
+ switch typ.under().(type) {
+ case *Named, *instance:
+ panic("internal error: unexpanded underlying type")
+ }
+ typ.check = nil
+ })
+ }
return typ
}
@@ -694,10 +583,10 @@ func (t *Named) Orig() *Named { return t.orig }
// TParams returns the type parameters of the named type t, or nil.
// The result is non-nil for an (originally) parameterized type even if it is instantiated.
-func (t *Named) TParams() []*TypeName { return t.tparams }
+func (t *Named) TParams() []*TypeName { return t.expand().tparams }
// SetTParams sets the type parameters of the named type t.
-func (t *Named) SetTParams(tparams []*TypeName) { t.tparams = tparams }
+func (t *Named) SetTParams(tparams []*TypeName) { t.expand().tparams = tparams }
// TArgs returns the type arguments after instantiation of the named type t, or nil if not instantiated.
func (t *Named) TArgs() []Type { return t.targs }
@@ -706,10 +595,10 @@ func (t *Named) TArgs() []Type { return t.targs }
func (t *Named) SetTArgs(args []Type) { t.targs = args }
// NumMethods returns the number of explicit methods whose receiver is named type t.
-func (t *Named) NumMethods() int { return len(t.methods) }
+func (t *Named) NumMethods() int { return len(t.expand().methods) }
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
-func (t *Named) Method(i int) *Func { return t.methods[i] }
+func (t *Named) Method(i int) *Func { return t.expand().methods[i] }
// SetUnderlying sets the underlying type and marks t as complete.
func (t *Named) SetUnderlying(underlying Type) {
@@ -719,11 +608,12 @@ func (t *Named) SetUnderlying(underlying Type) {
if _, ok := underlying.(*Named); ok {
panic("types2.Named.SetUnderlying: underlying type must not be *Named")
}
- t.underlying = underlying
+ t.expand().underlying = underlying
}
// AddMethod adds method m unless it is already in the method list.
func (t *Named) AddMethod(m *Func) {
+ t.expand()
if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
t.methods = append(t.methods, m)
}
@@ -732,11 +622,11 @@ func (t *Named) AddMethod(m *Func) {
// Note: This is a uint32 rather than a uint64 because the
// respective 64 bit atomic instructions are not available
// on all platforms.
-var lastId uint32
+var lastID uint32
-// nextId returns a value increasing monotonically by 1 with
+// nextID returns a value increasing monotonically by 1 with
// each call, starting with 1. It may be called concurrently.
-func nextId() uint64 { return uint64(atomic.AddUint32(&lastId, 1)) }
+func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
// A TypeParam represents a type parameter type.
type TypeParam struct {
@@ -750,16 +640,32 @@ type TypeParam struct {
// Obj returns the type name for the type parameter t.
func (t *TypeParam) Obj() *TypeName { return t.obj }
-// NewTypeParam returns a new TypeParam.
+// NewTypeParam returns a new TypeParam. bound can be nil (and set later).
func (check *Checker) NewTypeParam(obj *TypeName, index int, bound Type) *TypeParam {
- assert(bound != nil)
- typ := &TypeParam{check: check, id: nextId(), obj: obj, index: index, bound: bound}
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+ typ := &TypeParam{check: check, id: id, obj: obj, index: index, bound: bound}
if obj.typ == nil {
obj.typ = typ
}
return typ
}
+// Index returns the index of the type param within its param list.
+func (t *TypeParam) Index() int {
+ return t.index
+}
+
+// SetId sets the unique id of a type param. Should only be used for type params
+// in imported generic types.
+func (t *TypeParam) SetId(id uint64) {
+ t.id = id
+}
+
func (t *TypeParam) Bound() *Interface {
iface := asInterface(t.bound)
// use the type bound position if we have one
@@ -772,6 +678,13 @@ func (t *TypeParam) Bound() *Interface {
return iface
}
+func (t *TypeParam) SetBound(bound Type) {
+ if bound == nil {
+ panic("types2.TypeParam.SetBound: bound must not be nil")
+ }
+ t.bound = bound
+}
+
// optype returns a type's operational type. Except for
// type parameters, the operational type is the same
// as the underlying type (as returned by under). For
@@ -787,9 +700,16 @@ func optype(typ Type) Type {
// for a type parameter list of the form:
// (type T interface { type T }).
// See also issue #39680.
- if u := t.Bound().allTypes; u != nil && u != typ {
- // u != typ and u is a type parameter => under(u) != typ, so this is ok
- return under(u)
+ if a := t.Bound().allTypes; a != nil {
+ // If we have a union with a single entry, ignore
+ // any tilde because under(~t) == under(t).
+ if u, _ := a.(*Union); u != nil && u.NumTerms() == 1 {
+ a = u.types[0]
+ }
+ if a != typ {
+ // a != typ and a is a type parameter => under(a) != typ, so this is ok
+ return under(a)
+ }
}
return theTop
}
@@ -844,20 +764,11 @@ var expandf func(Type) Type
func init() { expandf = expand }
-// bottom represents the bottom of the type lattice.
-// It is the underlying type of a type parameter that
-// cannot be satisfied by any type, usually because
-// the intersection of type constraints left nothing).
-type bottom struct{}
-
-// theBottom is the singleton bottom type.
-var theBottom = &bottom{}
-
// top represents the top of the type lattice.
// It is the underlying type of a type parameter that
// can be satisfied by any type (ignoring methods),
-// usually because the type constraint has no type
-// list.
+// because its type constraint contains no restrictions
+// besides methods.
type top struct{}
// theTop is the singleton top type.
@@ -871,14 +782,12 @@ func (t *Struct) Underlying() Type { return t }
func (t *Pointer) Underlying() Type { return t }
func (t *Tuple) Underlying() Type { return t }
func (t *Signature) Underlying() Type { return t }
-func (t *Sum) Underlying() Type { return t }
func (t *Interface) Underlying() Type { return t }
func (t *Map) Underlying() Type { return t }
func (t *Chan) Underlying() Type { return t }
-func (t *Named) Underlying() Type { return t.underlying }
+func (t *Named) Underlying() Type { return t.expand().underlying }
func (t *TypeParam) Underlying() Type { return t }
func (t *instance) Underlying() Type { return t }
-func (t *bottom) Underlying() Type { return t }
func (t *top) Underlying() Type { return t }
// Type-specific implementations of String.
@@ -889,14 +798,12 @@ func (t *Struct) String() string { return TypeString(t, nil) }
func (t *Pointer) String() string { return TypeString(t, nil) }
func (t *Tuple) String() string { return TypeString(t, nil) }
func (t *Signature) String() string { return TypeString(t, nil) }
-func (t *Sum) String() string { return TypeString(t, nil) }
func (t *Interface) String() string { return TypeString(t, nil) }
func (t *Map) String() string { return TypeString(t, nil) }
func (t *Chan) String() string { return TypeString(t, nil) }
func (t *Named) String() string { return TypeString(t, nil) }
func (t *TypeParam) String() string { return TypeString(t, nil) }
func (t *instance) String() string { return TypeString(t, nil) }
-func (t *bottom) String() string { return TypeString(t, nil) }
func (t *top) String() string { return TypeString(t, nil) }
// under returns the true expanded underlying type.
@@ -904,7 +811,7 @@ func (t *top) String() string { return TypeString(t, nil) }
// under must only be called when a type is known
// to be fully set up.
func under(t Type) Type {
- // TODO(gri) is this correct for *Sum?
+ // TODO(gri) is this correct for *Union?
if n := asNamed(t); n != nil {
return n.under()
}
@@ -951,11 +858,6 @@ func asSignature(t Type) *Signature {
return op
}
-func asSum(t Type) *Sum {
- op, _ := optype(t).(*Sum)
- return op
-}
-
func asInterface(t Type) *Interface {
op, _ := optype(t).(*Interface)
return op
@@ -991,3 +893,4 @@ func AsPointer(t Type) *Pointer { return asPointer(t) }
func AsNamed(t Type) *Named { return asNamed(t) }
func AsSignature(t Type) *Signature { return asSignature(t) }
func AsInterface(t Type) *Interface { return asInterface(t) }
+func AsTypeParam(t Type) *TypeParam { return asTypeParam(t) }
diff --git a/src/cmd/compile/internal/types2/types_test.go b/src/cmd/compile/internal/types2/types_test.go
index 096402148d..1525844f2d 100644
--- a/src/cmd/compile/internal/types2/types_test.go
+++ b/src/cmd/compile/internal/types2/types_test.go
@@ -4,14 +4,9 @@
package types2
-import "sync/atomic"
-
func init() {
acceptMethodTypeParams = true
}
-// Upon calling ResetId, nextId starts with 1 again.
-// It may be called concurrently. This is only needed
-// for tests where we may want to have a consistent
-// numbering for each individual test case.
-func ResetId() { atomic.StoreUint32(&lastId, 0) }
+// Debug is set if types2 is built with debug mode enabled.
+const Debug = debug
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
index 40016697b7..07ed510d11 100644
--- a/src/cmd/compile/internal/types2/typestring.go
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -157,12 +157,19 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
buf.WriteString("func")
writeSignature(buf, t, qf, visited)
- case *Sum:
- for i, t := range t.types {
+ case *Union:
+ if t.IsEmpty() {
+ buf.WriteString("⊥")
+ break
+ }
+ for i, e := range t.types {
if i > 0 {
- buf.WriteString(", ")
+ buf.WriteString("|")
}
- writeType(buf, t, qf, visited)
+ if t.tilde[i] {
+ buf.WriteByte('~')
+ }
+ writeType(buf, e, qf, visited)
}
case *Interface:
@@ -207,14 +214,6 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
writeSignature(buf, m.typ.(*Signature), qf, visited)
empty = false
}
- if !empty && t.types != nil {
- buf.WriteString("; ")
- }
- if t.types != nil {
- buf.WriteString("type ")
- writeType(buf, t.types, qf, visited)
- empty = false
- }
if !empty && len(t.embeddeds) > 0 {
buf.WriteString("; ")
}
@@ -226,7 +225,7 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
empty = false
}
}
- if t.allMethods == nil || len(t.methods) > len(t.allMethods) {
+ if debug && (t.allMethods == nil || len(t.methods) > len(t.allMethods)) {
if !empty {
buf.WriteByte(' ')
}
@@ -273,14 +272,21 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
buf.WriteByte('[')
writeTypeList(buf, t.targs, qf, visited)
buf.WriteByte(']')
- } else if t.tparams != nil {
+ } else if t.TParams() != nil {
// parameterized type
- writeTParamList(buf, t.tparams, qf, visited)
+ writeTParamList(buf, t.TParams(), qf, visited)
}
case *TypeParam:
s := "?"
if t.obj != nil {
+ // Optionally write out package for typeparams (like Named).
+ // TODO(danscales): this is required for import/export, so
+ // we maybe need a separate function that won't be changed
+ // for debugging purposes.
+ if t.obj.pkg != nil {
+ writePackage(buf, t.obj.pkg, qf)
+ }
s = t.obj.name
}
buf.WriteString(s + subscript(t.id))
@@ -292,14 +298,12 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
writeTypeList(buf, t.targs, qf, visited)
buf.WriteByte(']')
- case *bottom:
- buf.WriteString("⊥")
-
case *top:
buf.WriteString("⊤")
default:
// For externally defined implementations of Type.
+ // Note: In this case cycles won't be caught.
buf.WriteString(t.String())
}
}
diff --git a/src/cmd/compile/internal/types2/typestring_test.go b/src/cmd/compile/internal/types2/typestring_test.go
index d98e9a5ade..88103b81b1 100644
--- a/src/cmd/compile/internal/types2/typestring_test.go
+++ b/src/cmd/compile/internal/types2/typestring_test.go
@@ -91,7 +91,8 @@ var independentTestTypes = []testEntry{
dup("interface{}"),
dup("interface{m()}"),
dup(`interface{String() string; m(int) float32}`),
- dup(`interface{type int, float32, complex128}`),
+ dup("interface{int|float32|complex128}"),
+ dup("interface{int|~float32|~complex128}"),
// maps
dup("map[string]int"),
@@ -138,6 +139,10 @@ func TestTypeString(t *testing.T) {
var nopos syntax.Pos
func TestIncompleteInterfaces(t *testing.T) {
+ if !Debug {
+ t.Skip("requires type checker to be compiled with debug = true")
+ }
+
sig := NewSignature(nil, nil, nil, false)
m := NewFunc(nopos, nil, "m", sig)
for _, test := range []struct {
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index e64d804c30..583bb464b2 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -10,14 +10,9 @@ import (
"cmd/compile/internal/syntax"
"fmt"
"go/constant"
- "sort"
- "strconv"
"strings"
)
-// Disabled by default, but enabled when running tests (via types_test.go).
-var acceptMethodTypeParams bool
-
// ident type-checks identifier e and initializes x with the value or type of e.
// If an error occurred, x.mode is set to invalid.
// For the meaning of def, see Checker.definedType, below.
@@ -63,7 +58,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
// If so, mark the respective package as used.
// (This code is only needed for dot-imports. Without them,
// we only have to mark variables, see *Var case below).
- if pkgName := check.dotImportMap[dotImportKey{scope, obj}]; pkgName != nil {
+ if pkgName := check.dotImportMap[dotImportKey{scope, obj.Name()}]; pkgName != nil {
pkgName.used = true
}
@@ -198,238 +193,6 @@ func (check *Checker) genericType(e syntax.Expr, reportErr bool) Type {
return typ
}
-// isubst returns an x with identifiers substituted per the substitution map smap.
-// isubst only handles the case of (valid) method receiver type expressions correctly.
-func isubst(x syntax.Expr, smap map[*syntax.Name]*syntax.Name) syntax.Expr {
- switch n := x.(type) {
- case *syntax.Name:
- if alt := smap[n]; alt != nil {
- return alt
- }
- // case *syntax.StarExpr:
- // X := isubst(n.X, smap)
- // if X != n.X {
- // new := *n
- // new.X = X
- // return &new
- // }
- case *syntax.Operation:
- if n.Op == syntax.Mul && n.Y == nil {
- X := isubst(n.X, smap)
- if X != n.X {
- new := *n
- new.X = X
- return &new
- }
- }
- case *syntax.IndexExpr:
- Index := isubst(n.Index, smap)
- if Index != n.Index {
- new := *n
- new.Index = Index
- return &new
- }
- case *syntax.ListExpr:
- var elems []syntax.Expr
- for i, elem := range n.ElemList {
- new := isubst(elem, smap)
- if new != elem {
- if elems == nil {
- elems = make([]syntax.Expr, len(n.ElemList))
- copy(elems, n.ElemList)
- }
- elems[i] = new
- }
- }
- if elems != nil {
- new := *n
- new.ElemList = elems
- return &new
- }
- case *syntax.ParenExpr:
- return isubst(n.X, smap) // no need to keep parentheses
- default:
- // Other receiver type expressions are invalid.
- // It's fine to ignore those here as they will
- // be checked elsewhere.
- }
- return x
-}
-
-// funcType type-checks a function or method type.
-func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
- check.openScope(ftyp, "function")
- check.scope.isFunc = true
- check.recordScope(ftyp, check.scope)
- sig.scope = check.scope
- defer check.closeScope()
-
- var recvTyp syntax.Expr // rewritten receiver type; valid if != nil
- if recvPar != nil {
- // collect generic receiver type parameters, if any
- // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
- // - the receiver specification acts as local declaration for its type parameters, which may be blank
- _, rname, rparams := check.unpackRecv(recvPar.Type, true)
- if len(rparams) > 0 {
- // Blank identifiers don't get declared and regular type-checking of the instantiated
- // parameterized receiver type expression fails in Checker.collectParams of receiver.
- // Identify blank type parameters and substitute each with a unique new identifier named
- // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
- // name.
- var smap map[*syntax.Name]*syntax.Name // substitution map from "_" to "!n" identifiers
- for i, p := range rparams {
- if p.Value == "_" {
- new := *p
- new.Value = fmt.Sprintf("%d_", i)
- rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
- if smap == nil {
- smap = make(map[*syntax.Name]*syntax.Name)
- }
- smap[p] = &new
- }
- }
- if smap != nil {
- // blank identifiers were found => use rewritten receiver type
- recvTyp = isubst(recvPar.Type, smap)
- }
- // TODO(gri) rework declareTypeParams
- sig.rparams = nil
- for _, rparam := range rparams {
- sig.rparams = check.declareTypeParam(sig.rparams, rparam)
- }
- // determine receiver type to get its type parameters
- // and the respective type parameter bounds
- var recvTParams []*TypeName
- if rname != nil {
- // recv should be a Named type (otherwise an error is reported elsewhere)
- // Also: Don't report an error via genericType since it will be reported
- // again when we type-check the signature.
- // TODO(gri) maybe the receiver should be marked as invalid instead?
- if recv := asNamed(check.genericType(rname, false)); recv != nil {
- recvTParams = recv.tparams
- }
- }
- // provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if len(sig.rparams) == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, len(sig.rparams))
- for i, t := range sig.rparams {
- list[i] = t.typ
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tname := range sig.rparams {
- bound := recvTParams[i].typ.(*TypeParam).bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- // TODO(gri) should we assume now that bounds always exist?
- // (no bound == empty interface)
- if bound != nil {
- bound = check.subst(tname.pos, bound, smap)
- tname.typ.(*TypeParam).bound = bound
- }
- }
- }
- }
- }
-
- if tparams != nil {
- sig.tparams = check.collectTypeParams(tparams)
- // Always type-check method type parameters but complain if they are not enabled.
- // (A separate check is needed when type-checking interface method signatures because
- // they don't have a receiver specification.)
- if recvPar != nil && !acceptMethodTypeParams {
- check.error(ftyp, "methods cannot have type parameters")
- }
- }
-
- // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
- // declarations and then squash that scope into the parent scope (and report any redeclarations at
- // that time).
- scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
- var recvList []*Var // TODO(gri) remove the need for making a list here
- if recvPar != nil {
- recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, recvTyp, false) // use rewritten receiver type, if any
- }
- params, variadic := check.collectParams(scope, ftyp.ParamList, nil, true)
- results, _ := check.collectParams(scope, ftyp.ResultList, nil, false)
- scope.Squash(func(obj, alt Object) {
- var err error_
- err.errorf(obj, "%s redeclared in this block", obj.Name())
- err.recordAltDecl(alt)
- check.report(&err)
- })
-
- if recvPar != nil {
- // recv parameter list present (may be empty)
- // spec: "The receiver is specified via an extra parameter section preceding the
- // method name. That parameter section must declare a single parameter, the receiver."
- var recv *Var
- switch len(recvList) {
- case 0:
- // error reported by resolver
- recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
- default:
- // more than one receiver
- check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
- fallthrough // continue with first receiver
- case 1:
- recv = recvList[0]
- }
-
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
- rtyp = expand(rtyp)
-
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if t := rtyp; t != Typ[Invalid] {
- var err string
- if T := asNamed(t); T != nil {
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
- if check.conf.CompilerErrorMessages {
- check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
- err = ""
- }
- } else {
- switch u := optype(T).(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
- }
- case *Pointer, *Interface:
- err = "pointer or interface type"
- }
- }
- } else if T := asBasic(t); T != nil {
- err = "basic or unnamed type"
- if check.conf.CompilerErrorMessages {
- check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
- err = ""
- }
- } else {
- check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
- }
- if err != "" {
- check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
- // ok to continue
- }
- }
- sig.recv = recv
- }
-
- sig.params = NewTuple(params...)
- sig.results = NewTuple(results...)
- sig.variadic = variadic
-}
-
// goTypeName returns the Go type name for typ and
// removes any occurrences of "types2." from that name.
func goTypeName(typ Type) string {
@@ -732,537 +495,3 @@ func (check *Checker) typeList(list []syntax.Expr) []Type {
}
return res
}
-
-// collectParams declares the parameters of list in scope and returns the corresponding
-// variable list. If type0 != nil, it is used instead of the first type in list.
-func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 syntax.Expr, variadicOk bool) (params []*Var, variadic bool) {
- if list == nil {
- return
- }
-
- var named, anonymous bool
-
- var typ Type
- var prev syntax.Expr
- for i, field := range list {
- ftype := field.Type
- // type-check type of grouped fields only once
- if ftype != prev {
- prev = ftype
- if i == 0 && type0 != nil {
- ftype = type0
- }
- if t, _ := ftype.(*syntax.DotsType); t != nil {
- ftype = t.Elem
- if variadicOk && i == len(list)-1 {
- variadic = true
- } else {
- check.softErrorf(t, "can only use ... with final parameter in list")
- // ignore ... and continue
- }
- }
- typ = check.varType(ftype)
- }
- // The parser ensures that f.Tag is nil and we don't
- // care if a constructed AST contains a non-nil tag.
- if field.Name != nil {
- // named parameter
- name := field.Name.Value
- if name == "" {
- check.error(field.Name, invalidAST+"anonymous parameter")
- // ok to continue
- }
- par := NewParam(field.Name.Pos(), check.pkg, name, typ)
- check.declare(scope, field.Name, par, scope.pos)
- params = append(params, par)
- named = true
- } else {
- // anonymous parameter
- par := NewParam(ftype.Pos(), check.pkg, "", typ)
- check.recordImplicit(field, par)
- params = append(params, par)
- anonymous = true
- }
- }
-
- if named && anonymous {
- check.error(list[0], invalidAST+"list contains both named and anonymous parameters")
- // ok to continue
- }
-
- // For a variadic function, change the last parameter's type from T to []T.
- // Since we type-checked T rather than ...T, we also need to retro-actively
- // record the type for ...T.
- if variadic {
- last := params[len(params)-1]
- last.typ = &Slice{elem: last.typ}
- check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
- }
-
- return
-}
-
-func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
- if alt := oset.insert(obj); alt != nil {
- var err error_
- err.errorf(pos, "%s redeclared", obj.Name())
- err.recordAltDecl(alt)
- check.report(&err)
- return false
- }
- return true
-}
-
-func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
- var tname *syntax.Name // most recent "type" name
- var types []syntax.Expr
- for _, f := range iface.MethodList {
- if f.Name != nil {
- // We have a method with name f.Name, or a type
- // of a type list (f.Name.Value == "type").
- name := f.Name.Value
- if name == "_" {
- if check.conf.CompilerErrorMessages {
- check.error(f.Name, "methods must have a unique non-blank name")
- } else {
- check.error(f.Name, "invalid method name _")
- }
- continue // ignore
- }
-
- if name == "type" {
- // Always collect all type list entries, even from
- // different type lists, under the assumption that
- // the author intended to include all types.
- types = append(types, f.Type)
- if tname != nil && tname != f.Name {
- check.error(f.Name, "cannot have multiple type lists in an interface")
- }
- tname = f.Name
- continue
- }
-
- typ := check.typ(f.Type)
- sig, _ := typ.(*Signature)
- if sig == nil {
- if typ != Typ[Invalid] {
- check.errorf(f.Type, invalidAST+"%s is not a method signature", typ)
- }
- continue // ignore
- }
-
- // Always type-check method type parameters but complain if they are not enabled.
- // (This extra check is needed here because interface method signatures don't have
- // a receiver specification.)
- if sig.tparams != nil && !acceptMethodTypeParams {
- check.error(f.Type, "methods cannot have type parameters")
- }
-
- // use named receiver type if available (for better error messages)
- var recvTyp Type = ityp
- if def != nil {
- recvTyp = def
- }
- sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
-
- m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
- check.recordDef(f.Name, m)
- ityp.methods = append(ityp.methods, m)
- } else {
- // We have an embedded type. completeInterface will
- // eventually verify that we have an interface.
- ityp.embeddeds = append(ityp.embeddeds, check.typ(f.Type))
- check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
- }
- }
-
- // type constraints
- ityp.types = NewSum(check.collectTypeConstraints(iface.Pos(), types))
-
- if len(ityp.methods) == 0 && ityp.types == nil && len(ityp.embeddeds) == 0 {
- // empty interface
- ityp.allMethods = markComplete
- return
- }
-
- // sort for API stability
- sortMethods(ityp.methods)
- sortTypes(ityp.embeddeds)
-
- check.later(func() { check.completeInterface(iface.Pos(), ityp) })
-}
-
-func (check *Checker) completeInterface(pos syntax.Pos, ityp *Interface) {
- if ityp.allMethods != nil {
- return
- }
-
- // completeInterface may be called via the LookupFieldOrMethod,
- // MissingMethod, Identical, or IdenticalIgnoreTags external API
- // in which case check will be nil. In this case, type-checking
- // must be finished and all interfaces should have been completed.
- if check == nil {
- panic("internal error: incomplete interface")
- }
-
- if check.conf.Trace {
- // Types don't generally have position information.
- // If we don't have a valid pos provided, try to use
- // one close enough.
- if !pos.IsKnown() && len(ityp.methods) > 0 {
- pos = ityp.methods[0].pos
- }
-
- check.trace(pos, "complete %s", ityp)
- check.indent++
- defer func() {
- check.indent--
- check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
- }()
- }
-
- // An infinitely expanding interface (due to a cycle) is detected
- // elsewhere (Checker.validType), so here we simply assume we only
- // have valid interfaces. Mark the interface as complete to avoid
- // infinite recursion if the validType check occurs later for some
- // reason.
- ityp.allMethods = markComplete
-
- // Methods of embedded interfaces are collected unchanged; i.e., the identity
- // of a method I.m's Func Object of an interface I is the same as that of
- // the method m in an interface that embeds interface I. On the other hand,
- // if a method is embedded via multiple overlapping embedded interfaces, we
- // don't provide a guarantee which "original m" got chosen for the embedding
- // interface. See also issue #34421.
- //
- // If we don't care to provide this identity guarantee anymore, instead of
- // reusing the original method in embeddings, we can clone the method's Func
- // Object and give it the position of a corresponding embedded interface. Then
- // we can get rid of the mpos map below and simply use the cloned method's
- // position.
-
- var seen objset
- var methods []*Func
- mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
- addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- mpos[m] = pos
- case explicit:
- var err error_
- err.errorf(pos, "duplicate method %s", m.name)
- err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
- check.report(&err)
- default:
- // We have a duplicate method name in an embedded (not explicitly declared) method.
- // Check method signatures after all types are computed (issue #33656).
- // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
- // error here as well (even though we could do it eagerly) because it's the same
- // error message.
- check.later(func() {
- if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
- var err error_
- err.errorf(pos, "duplicate method %s", m.name)
- err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
- check.report(&err)
- }
- })
- }
- }
-
- for _, m := range ityp.methods {
- addMethod(m.pos, m, true)
- }
-
- // collect types
- allTypes := ityp.types
-
- posList := check.posMap[ityp]
- for i, typ := range ityp.embeddeds {
- pos := posList[i] // embedding position
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- var format string
- if _, ok := utyp.(*TypeParam); ok {
- format = "%s is a type parameter, not an interface"
- } else {
- format = "%s is not an interface"
- }
- check.errorf(pos, format, typ)
- }
- continue
- }
- check.completeInterface(pos, etyp)
- for _, m := range etyp.allMethods {
- addMethod(pos, m, false) // use embedding position pos rather than m.pos
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- if methods != nil {
- sortMethods(methods)
- ityp.allMethods = methods
- }
- ityp.allTypes = allTypes
-}
-
-// intersect computes the intersection of the types x and y.
-// Note: A incomming nil type stands for the top type. A top
-// type result is returned as nil.
-func intersect(x, y Type) (r Type) {
- defer func() {
- if r == theTop {
- r = nil
- }
- }()
-
- switch {
- case x == theBottom || y == theBottom:
- return theBottom
- case x == nil || x == theTop:
- return y
- case y == nil || x == theTop:
- return x
- }
-
- xtypes := unpack(x)
- ytypes := unpack(y)
- // Compute the list rtypes which includes only
- // types that are in both xtypes and ytypes.
- // Quadratic algorithm, but good enough for now.
- // TODO(gri) fix this
- var rtypes []Type
- for _, x := range xtypes {
- if includes(ytypes, x) {
- rtypes = append(rtypes, x)
- }
- }
-
- if rtypes == nil {
- return theBottom
- }
- return NewSum(rtypes)
-}
-
-func sortTypes(list []Type) {
- sort.Stable(byUniqueTypeName(list))
-}
-
-// byUniqueTypeName named type lists can be sorted by their unique type names.
-type byUniqueTypeName []Type
-
-func (a byUniqueTypeName) Len() int { return len(a) }
-func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
-func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func sortName(t Type) string {
- if named := asNamed(t); named != nil {
- return named.obj.Id()
- }
- return ""
-}
-
-func sortMethods(list []*Func) {
- sort.Sort(byUniqueMethodName(list))
-}
-
-func assertSortedMethods(list []*Func) {
- if !debug {
- panic("internal error: assertSortedMethods called outside debug mode")
- }
- if !sort.IsSorted(byUniqueMethodName(list)) {
- panic("internal error: methods not sorted")
- }
-}
-
-// byUniqueMethodName method lists can be sorted by their unique method names.
-type byUniqueMethodName []*Func
-
-func (a byUniqueMethodName) Len() int { return len(a) }
-func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(a[j]) }
-func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (check *Checker) tag(t *syntax.BasicLit) string {
- // If t.Bad, an error was reported during parsing.
- if t != nil && !t.Bad {
- if t.Kind == syntax.StringLit {
- if val, err := strconv.Unquote(t.Value); err == nil {
- return val
- }
- }
- check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value)
- }
- return ""
-}
-
-func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
- if e.FieldList == nil {
- return
- }
-
- // struct fields and tags
- var fields []*Var
- var tags []string
-
- // for double-declaration checks
- var fset objset
-
- // current field typ and tag
- var typ Type
- var tag string
- add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
- if tag != "" && tags == nil {
- tags = make([]string, len(fields))
- }
- if tags != nil {
- tags = append(tags, tag)
- }
-
- name := ident.Value
- fld := NewField(pos, check.pkg, name, typ, embedded)
- // spec: "Within a struct, non-blank field names must be unique."
- if name == "_" || check.declareInSet(&fset, pos, fld) {
- fields = append(fields, fld)
- check.recordDef(ident, fld)
- }
- }
-
- // addInvalid adds an embedded field of invalid type to the struct for
- // fields with errors; this keeps the number of struct fields in sync
- // with the source as long as the fields are _ or have different names
- // (issue #25627).
- addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
- typ = Typ[Invalid]
- tag = ""
- add(ident, true, pos)
- }
-
- var prev syntax.Expr
- for i, f := range e.FieldList {
- // Fields declared syntactically with the same type (e.g.: a, b, c T)
- // share the same type expression. Only check type if it's a new type.
- if i == 0 || f.Type != prev {
- typ = check.varType(f.Type)
- prev = f.Type
- }
- tag = ""
- if i < len(e.TagList) {
- tag = check.tag(e.TagList[i])
- }
- if f.Name != nil {
- // named field
- add(f.Name, false, f.Name.Pos())
- } else {
- // embedded field
- // spec: "An embedded type must be specified as a type name T or as a
- // pointer to a non-interface type name *T, and T itself may not be a
- // pointer type."
- pos := syntax.StartPos(f.Type)
- name := embeddedFieldIdent(f.Type)
- if name == nil {
- check.errorf(pos, "invalid embedded field type %s", f.Type)
- name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
- addInvalid(name, pos)
- continue
- }
- add(name, true, pos)
-
- // Because we have a name, typ must be of the form T or *T, where T is the name
- // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
- // We must delay this check to the end because we don't want to instantiate
- // (via under(t)) a possibly incomplete type.
- embeddedTyp := typ // for closure below
- embeddedPos := pos
- check.later(func() {
- t, isPtr := deref(embeddedTyp)
- switch t := optype(t).(type) {
- case *Basic:
- if t == Typ[Invalid] {
- // error was reported before
- return
- }
- // unsafe.Pointer is treated like a regular pointer
- if t.kind == UnsafePointer {
- check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
- }
- case *Pointer:
- check.error(embeddedPos, "embedded field type cannot be a pointer")
- case *Interface:
- if isPtr {
- check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
- }
- }
- })
- }
- }
-
- styp.fields = fields
- styp.tags = tags
-}
-
-func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
- switch e := e.(type) {
- case *syntax.Name:
- return e
- case *syntax.Operation:
- if base := ptrBase(e); base != nil {
- // *T is valid, but **T is not
- if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
- return embeddedFieldIdent(e.X)
- }
- }
- case *syntax.SelectorExpr:
- return e.Sel
- case *syntax.IndexExpr:
- return embeddedFieldIdent(e.X)
- }
- return nil // invalid embedded field
-}
-
-func (check *Checker) collectTypeConstraints(pos syntax.Pos, types []syntax.Expr) []Type {
- list := make([]Type, 0, len(types)) // assume all types are correct
- for _, texpr := range types {
- if texpr == nil {
- check.error(pos, invalidAST+"missing type constraint")
- continue
- }
- list = append(list, check.varType(texpr))
- }
-
- // Ensure that each type is only present once in the type list. Types may be
- // interfaces, which may not be complete yet. It's ok to do this check at the
- // end because it's not a requirement for correctness of the code.
- // Note: This is a quadratic algorithm, but type lists tend to be short.
- check.later(func() {
- for i, t := range list {
- if t := asInterface(t); t != nil {
- check.completeInterface(types[i].Pos(), t)
- }
- if includes(list[:i], t) {
- check.softErrorf(types[i], "duplicate type %s in type list", t)
- }
- }
- })
-
- return list
-}
-
-// includes reports whether typ is in list
-func includes(list []Type, typ Type) bool {
- for _, e := range list {
- if Identical(typ, e) {
- return true
- }
- }
- return false
-}
-
-func ptrBase(x *syntax.Operation) syntax.Expr {
- if x.Op == syntax.Mul && x.Y == nil {
- return x.X
- }
- return nil
-}
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index e1832bbb2a..e5983dd40c 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -352,9 +352,9 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
u.nify(x.results, y.results, p)
}
- case *Sum:
- // This should not happen with the current internal use of sum types.
- panic("type inference across sum types not implemented")
+ case *Union:
+ // This should not happen with the current internal use of union types.
+ panic("type inference across union types not implemented")
case *Interface:
// Two interface types are identical if they have the same set of methods with
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
new file mode 100644
index 0000000000..30570b5e80
--- /dev/null
+++ b/src/cmd/compile/internal/types2/union.go
@@ -0,0 +1,236 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Union represents a union of terms.
+// A term is a type with a ~ (tilde) flag.
+type Union struct {
+ types []Type // types are unique
+ tilde []bool // if tilde[i] is set, terms[i] is of the form ~T
+}
+
+// NewUnion returns a new Union type with the given terms (types[i], tilde[i]).
+// The lengths of both arguments must match. An empty union represents the set
+// of no types.
+func NewUnion(types []Type, tilde []bool) *Union { return newUnion(types, tilde) }
+
+func (u *Union) IsEmpty() bool { return len(u.types) == 0 }
+func (u *Union) NumTerms() int { return len(u.types) }
+func (u *Union) Term(i int) (Type, bool) { return u.types[i], u.tilde[i] }
+
+func (u *Union) Underlying() Type { return u }
+func (u *Union) String() string { return TypeString(u, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+var emptyUnion = new(Union)
+
+func newUnion(types []Type, tilde []bool) *Union {
+ assert(len(types) == len(tilde))
+ if len(types) == 0 {
+ return emptyUnion
+ }
+ t := new(Union)
+ t.types = types
+ t.tilde = tilde
+ return t
+}
+
+// is reports whether f returned true for all terms (type, tilde) of u.
+func (u *Union) is(f func(Type, bool) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for i, t := range u.types {
+ if !f(t, u.tilde[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// is reports whether f returned true for the underlying types of all terms of u.
+func (u *Union) underIs(f func(Type) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for _, t := range u.types {
+ if !f(under(t)) {
+ return false
+ }
+ }
+ return true
+}
+
+func parseUnion(check *Checker, tlist []syntax.Expr) Type {
+ var types []Type
+ var tilde []bool
+ for _, x := range tlist {
+ t, d := parseTilde(check, x)
+ if len(tlist) == 1 && !d {
+ return t // single type
+ }
+ types = append(types, t)
+ tilde = append(tilde, d)
+ }
+
+ // Ensure that each type is only present once in the type list.
+ // It's ok to do this check at the end because it's not a requirement
+ // for correctness of the code.
+ // Note: This is a quadratic algorithm, but unions tend to be short.
+ check.later(func() {
+ for i, t := range types {
+ t := expand(t)
+ if t == Typ[Invalid] {
+ continue
+ }
+
+ x := tlist[i]
+ pos := syntax.StartPos(x)
+ // We may not know the position of x if it was a typechecker-
+ // introduced ~T type of a type list entry T. Use the position
+ // of T instead.
+ // TODO(gri) remove this test once we don't support type lists anymore
+ if !pos.IsKnown() {
+ if op, _ := x.(*syntax.Operation); op != nil {
+ pos = syntax.StartPos(op.X)
+ }
+ }
+
+ u := under(t)
+ if tilde[i] && !Identical(u, t) {
+ check.errorf(x, "invalid use of ~ (underlying type of %s is %s)", t, u)
+ continue // don't report another error for t
+ }
+ if _, ok := u.(*Interface); ok {
+ // A single type with a ~ is a single-term union.
+ check.errorf(pos, "cannot use interface %s with ~ or inside a union (implementation restriction)", t)
+ continue // don't report another error for t
+ }
+
+ // Complain about duplicate entries a|a, but also a|~a, and ~a|~a.
+ // TODO(gri) We should also exclude myint|~int since myint is included in ~int.
+ if includes(types[:i], t) {
+ // TODO(gri) this currently doesn't print the ~ if present
+ check.softErrorf(pos, "duplicate term %s in union element", t)
+ }
+ }
+ })
+
+ return newUnion(types, tilde)
+}
+
+func parseTilde(check *Checker, x syntax.Expr) (Type, bool) {
+ tilde := false
+ if op, _ := x.(*syntax.Operation); op != nil && op.Op == syntax.Tilde {
+ x = op.X
+ tilde = true
+ }
+ return check.anyType(x), tilde
+}
+
+// intersect computes the intersection of the types x and y,
+// A nil type stands for the set of all types; an empty union
+// stands for the set of no types.
+func intersect(x, y Type) (r Type) {
+ // If one of the types is nil (no restrictions)
+ // the result is the other type.
+ switch {
+ case x == nil:
+ return y
+ case y == nil:
+ return x
+ }
+
+ // Compute the terms which are in both x and y.
+ // TODO(gri) This is not correct as it may not always compute
+ // the "largest" intersection. For instance, for
+ // x = myInt|~int, y = ~int
+ // we get the result myInt but we should get ~int.
+ xu, _ := x.(*Union)
+ yu, _ := y.(*Union)
+ switch {
+ case xu != nil && yu != nil:
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var types []Type
+ var tilde []bool
+ for j, y := range yu.types {
+ yt := yu.tilde[j]
+ if r, rt := xu.intersect(y, yt); r != nil {
+ // Terms x[i] and y[j] match: Select the one that
+ // is not a ~t because that is the intersection
+ // type. If both are ~t, they are identical:
+ // T ∩ T = T
+ // T ∩ ~t = T
+ // ~t ∩ T = T
+ // ~t ∩ ~t = ~t
+ types = append(types, r)
+ tilde = append(tilde, rt)
+ }
+ }
+ return newUnion(types, tilde)
+
+ case xu != nil:
+ if r, _ := xu.intersect(y, false); r != nil {
+ return y
+ }
+
+ case yu != nil:
+ if r, _ := yu.intersect(x, false); r != nil {
+ return x
+ }
+
+ default: // xu == nil && yu == nil
+ if Identical(x, y) {
+ return x
+ }
+ }
+
+ return emptyUnion
+}
+
+// includes reports whether typ is in list.
+func includes(list []Type, typ Type) bool {
+ for _, e := range list {
+ if Identical(typ, e) {
+ return true
+ }
+ }
+ return false
+}
+
+// intersect computes the intersection of the union u and term (y, yt)
+// and returns the intersection term, if any. Otherwise the result is
+// (nil, false).
+func (u *Union) intersect(y Type, yt bool) (Type, bool) {
+ under_y := under(y)
+ for i, x := range u.types {
+ xt := u.tilde[i]
+ // determine which types xx, yy to compare
+ xx := x
+ if yt {
+ xx = under(x)
+ }
+ yy := y
+ if xt {
+ yy = under_y
+ }
+ if Identical(xx, yy) {
+ // T ∩ T = T
+ // T ∩ ~t = T
+ // ~t ∩ T = T
+ // ~t ∩ ~t = ~t
+ return xx, xt && yt
+ }
+ }
+ return nil, false
+}
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
index 2194e1c5b0..feda3c3b4f 100644
--- a/src/cmd/compile/internal/walk/closure.go
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -122,6 +122,9 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
clos.SetEsc(clo.Esc())
clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...)
+ for i, value := range clos.List {
+ clos.List[i] = ir.NewStructKeyExpr(base.Pos, typ.Field(i), value)
+ }
addr := typecheck.NodAddr(clos)
addr.SetEsc(clo.Esc())
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
index abd920d646..6c6b4982a0 100644
--- a/src/cmd/compile/internal/walk/complit.go
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -218,11 +218,11 @@ func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node,
case ir.OSTRUCTLIT:
splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
r := rn.(*ir.StructKeyExpr)
- if r.Field.IsBlank() || isBlank {
+ if r.Sym().IsBlank() || isBlank {
return ir.BlankNode, r.Value
}
ir.SetPos(r)
- return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value
+ return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Value
}
default:
base.Fatalf("fixedlit bad op: %v", n.Op())
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
index 26e17a126f..5297332f6b 100644
--- a/src/cmd/compile/internal/walk/convert.go
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -499,7 +499,7 @@ func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
cheap := cheapExpr(n, init)
- slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice := typecheck.MakeDotArgs(base.Pos, types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
slice.SetEsc(ir.EscNone)
init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index b733d3a29f..845bf03657 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -7,7 +7,6 @@ package walk
import (
"fmt"
"go/constant"
- "internal/buildcfg"
"cmd/compile/internal/base"
"cmd/compile/internal/escape"
@@ -790,9 +789,7 @@ func (o *orderState) stmt(n ir.Node) {
n.Call = walkRecover(n.Call.(*ir.CallExpr), &init)
o.stmtList(init)
}
- if buildcfg.Experiment.RegabiDefer {
- o.wrapGoDefer(n)
- }
+ o.wrapGoDefer(n)
o.out = append(o.out, n)
o.cleanTemp(t)
@@ -1570,8 +1567,9 @@ func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
// only in-register results?
if len(callArgs) == 0 && call.Op() == ir.OCALLFUNC && callX.Type().NumResults() == 0 {
if c, ok := call.(*ir.CallExpr); ok && callX != nil && callX.Op() == ir.OCLOSURE {
- cloFunc := callX.(*ir.ClosureExpr).Func
- cloFunc.SetClosureCalled(false)
+ clo := callX.(*ir.ClosureExpr)
+ clo.Func.SetClosureCalled(false)
+ clo.IsGoWrap = true
c.PreserveClosure = true
}
return
@@ -1706,14 +1704,8 @@ func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
}
// Create a new no-argument function that we'll hand off to defer.
- var noFuncArgs []*ir.Field
- noargst := ir.NewFuncType(base.Pos, nil, noFuncArgs, nil)
- wrapGoDefer_prgen++
- outerfn := ir.CurFunc
- wrapname := fmt.Sprintf("%v·dwrap·%d", outerfn, wrapGoDefer_prgen)
- sym := types.LocalPkg.Lookup(wrapname)
- fn := typecheck.DeclFunc(sym, noargst)
- fn.SetIsHiddenClosure(true)
+ fn := ir.NewClosureFunc(base.Pos, true)
+ fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
fn.SetWrapper(true)
// helper for capturing reference to a var declared in an outer scope.
@@ -1743,7 +1735,6 @@ func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
if methSelectorExpr != nil {
methSelectorExpr.X = capName(callX.Pos(), fn, methSelectorExpr.X.(*ir.Name))
}
- ir.FinishCaptureNames(n.Pos(), outerfn, fn)
// This flags a builtin as opposed to a regular call.
irregular := (call.Op() != ir.OCALLFUNC &&
@@ -1757,32 +1748,18 @@ func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
}
newcall := mkNewCall(call.Pos(), op, callX, newCallArgs)
- // Type-check the result.
- if !irregular {
- typecheck.Call(newcall.(*ir.CallExpr))
- } else {
- typecheck.Stmt(newcall)
- }
-
// Finalize body, register function on the main decls list.
fn.Body = []ir.Node{newcall}
- typecheck.FinishFuncBody()
- typecheck.Func(fn)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+ ir.FinishCaptureNames(n.Pos(), ir.CurFunc, fn)
// Create closure expr
- clo := ir.NewClosureExpr(n.Pos(), fn)
- fn.OClosure = clo
- clo.SetType(fn.Type())
+ clo := typecheck.Expr(fn.OClosure).(*ir.ClosureExpr)
// Set escape properties for closure.
if n.Op() == ir.OGO {
- // For "go", assume that the closure is going to escape
- // (with an exception for the runtime, which doesn't
- // permit heap-allocated closures).
- if base.Ctxt.Pkgpath != "runtime" {
- clo.SetEsc(ir.EscHeap)
- }
+ // For "go", assume that the closure is going to escape.
+ clo.SetEsc(ir.EscHeap)
+ clo.IsGoWrap = true
} else {
// For defer, just use whatever result escape analysis
// has determined for the defer.
@@ -1793,7 +1770,7 @@ func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
}
// Create new top level call to closure over argless function.
- topcall := ir.NewCallExpr(n.Pos(), ir.OCALL, clo, []ir.Node{})
+ topcall := ir.NewCallExpr(n.Pos(), ir.OCALL, clo, nil)
typecheck.Call(topcall)
// Tag the call to insure that directClosureCall doesn't undo our work.
diff --git a/src/cmd/internal/goobj/builtinlist.go b/src/cmd/internal/goobj/builtinlist.go
index 9f248137da..608c0d7222 100644
--- a/src/cmd/internal/goobj/builtinlist.go
+++ b/src/cmd/internal/goobj/builtinlist.go
@@ -33,6 +33,7 @@ var builtins = [...]struct {
{"runtime.goPanicSlice3BU", 1},
{"runtime.goPanicSlice3C", 1},
{"runtime.goPanicSlice3CU", 1},
+ {"runtime.goPanicSliceConvert", 1},
{"runtime.printbool", 1},
{"runtime.printfloat", 1},
{"runtime.printint", 1},
@@ -129,6 +130,8 @@ var builtins = [...]struct {
{"runtime.makeslice64", 1},
{"runtime.makeslicecopy", 1},
{"runtime.growslice", 1},
+ {"runtime.unsafeslice", 1},
+ {"runtime.unsafeslice64", 1},
{"runtime.memmove", 1},
{"runtime.memclrNoHeapPointers", 1},
{"runtime.memclrHasPointers", 1},
@@ -203,7 +206,9 @@ var builtins = [...]struct {
{"runtime.newproc", 1},
{"runtime.panicoverflow", 1},
{"runtime.sigpanic", 1},
- {"runtime.gcWriteBarrier", 0},
+ {"runtime.gcWriteBarrier", 1},
+ {"runtime.duffzero", 1},
+ {"runtime.duffcopy", 1},
{"runtime.morestack", 0},
{"runtime.morestackc", 0},
{"runtime.morestack_noctxt", 0},
diff --git a/src/cmd/internal/goobj/mkbuiltin.go b/src/cmd/internal/goobj/mkbuiltin.go
index 18b969586c..c9995fcede 100644
--- a/src/cmd/internal/goobj/mkbuiltin.go
+++ b/src/cmd/internal/goobj/mkbuiltin.go
@@ -151,7 +151,9 @@ var fextras = [...]extra{
{"sigpanic", 1},
// compiler backend inserted calls
- {"gcWriteBarrier", 0}, // asm function, ABI0
+ {"gcWriteBarrier", 1},
+ {"duffzero", 1},
+ {"duffcopy", 1},
// assembler backend inserted calls
{"morestack", 0}, // asm function, ABI0
diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go
index e41fb3bb75..31b7c43245 100644
--- a/src/cmd/internal/obj/arm64/obj7.go
+++ b/src/cmd/internal/obj/arm64/obj7.go
@@ -52,7 +52,7 @@ var complements = []obj.As{
}
func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
- // MOV g_stackguard(g), R1
+ // MOV g_stackguard(g), RT1
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
@@ -63,7 +63,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R1
+ p.To.Reg = REGRT1
// Mark the stack bound check and morestack call async nonpreemptible.
// If we get preempted here, when resumed the preemption request is
@@ -74,25 +74,25 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
q := (*obj.Prog)(nil)
if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
- // MOV SP, R2
- // CMP stackguard, R2
+ // MOV SP, RT2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
p.From.Type = obj.TYPE_REG
p.From.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
} else if framesize <= objabi.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
- // SUB $(framesize-StackSmall), SP, R2
- // CMP stackguard, R2
+ // SUB $(framesize-StackSmall), SP, RT2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = ASUB
@@ -100,13 +100,13 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
} else {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
@@ -115,10 +115,10 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
// stack guard to incorrectly succeed. We explicitly
// guard against underflow.
//
- // SUBS $(framesize-StackSmall), SP, R2
+ // SUBS $(framesize-StackSmall), SP, RT2
// // On underflow, jump to morestack
// BLO label_of_call_to_morestack
- // CMP stackguard, R2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = ASUBS
@@ -126,7 +126,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
q = p
@@ -136,8 +136,8 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
}
// BLS do-morestack
@@ -161,17 +161,20 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog)
pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog)
+ if q != nil {
+ q.To.SetTarget(pcdata)
+ }
+ bls.To.SetTarget(pcdata)
+
+ spill := c.cursym.Func().SpillRegisterArgs(pcdata, c.newprog)
+
// MOV LR, R3
- movlr := obj.Appendp(pcdata, c.newprog)
+ movlr := obj.Appendp(spill, c.newprog)
movlr.As = AMOVD
movlr.From.Type = obj.TYPE_REG
movlr.From.Reg = REGLINK
movlr.To.Type = obj.TYPE_REG
movlr.To.Reg = REG_R3
- if q != nil {
- q.To.SetTarget(movlr)
- }
- bls.To.SetTarget(movlr)
debug := movlr
if false {
@@ -196,7 +199,8 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
}
call.To.Sym = c.ctxt.Lookup(morestack)
- pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1)
+ unspill := c.cursym.Func().UnspillRegisterArgs(call, c.newprog)
+ pcdata = c.ctxt.EndUnsafePoint(unspill, c.newprog, -1)
// B start
jmp := obj.Appendp(pcdata, c.newprog)
@@ -321,9 +325,9 @@ func (c *ctxt7) rewriteToUseGot(p *obj.Prog) {
// CALL REGTMP
var sym *obj.LSym
if p.As == obj.ADUFFZERO {
- sym = c.ctxt.Lookup("runtime.duffzero")
+ sym = c.ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
} else {
- sym = c.ctxt.Lookup("runtime.duffcopy")
+ sym = c.ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
}
offset := p.To.Offset
p.As = AMOVD
@@ -631,38 +635,38 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
if c.cursym.Func().Text.From.Sym.Wrapper() {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
- // MOV g_panic(g), R1
+ // MOV g_panic(g), RT1
// CBNZ checkargp
// end:
// NOP
// ... function body ...
// checkargp:
- // MOV panic_argp(R1), R2
- // ADD $(autosize+8), RSP, R3
- // CMP R2, R3
+ // MOV panic_argp(RT1), RT2
+ // ADD $(autosize+8), RSP, R20
+ // CMP RT2, R20
// BNE end
- // ADD $8, RSP, R4
- // MOVD R4, panic_argp(R1)
+ // ADD $8, RSP, R20
+ // MOVD R20, panic_argp(RT1)
// B end
//
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not an ARM64 NOP: it encodes to 0 instruction bytes.
q = q1
- // MOV g_panic(g), R1
+ // MOV g_panic(g), RT1
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_MEM
q.From.Reg = REGG
q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R1
+ q.To.Reg = REGRT1
- // CBNZ R1, checkargp
+ // CBNZ RT1, checkargp
cbnz := obj.Appendp(q, c.newprog)
cbnz.As = ACBNZ
cbnz.From.Type = obj.TYPE_REG
- cbnz.From.Reg = REG_R1
+ cbnz.From.Reg = REGRT1
cbnz.To.Type = obj.TYPE_BRANCH
// Empty branch target at the top of the function body
@@ -674,33 +678,33 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
for last = end; last.Link != nil; last = last.Link {
}
- // MOV panic_argp(R1), R2
+ // MOV panic_argp(RT1), RT2
mov := obj.Appendp(last, c.newprog)
mov.As = AMOVD
mov.From.Type = obj.TYPE_MEM
- mov.From.Reg = REG_R1
+ mov.From.Reg = REGRT1
mov.From.Offset = 0 // Panic.argp
mov.To.Type = obj.TYPE_REG
- mov.To.Reg = REG_R2
+ mov.To.Reg = REGRT2
// CBNZ branches to the MOV above
cbnz.To.SetTarget(mov)
- // ADD $(autosize+8), SP, R3
+ // ADD $(autosize+8), SP, R20
q = obj.Appendp(mov, c.newprog)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = int64(c.autosize) + 8
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R3
+ q.To.Reg = REG_R20
- // CMP R2, R3
+ // CMP RT2, R20
q = obj.Appendp(q, c.newprog)
q.As = ACMP
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R2
- q.Reg = REG_R3
+ q.From.Reg = REGRT2
+ q.Reg = REG_R20
// BNE end
q = obj.Appendp(q, c.newprog)
@@ -708,22 +712,22 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Type = obj.TYPE_BRANCH
q.To.SetTarget(end)
- // ADD $8, SP, R4
+ // ADD $8, SP, R20
q = obj.Appendp(q, c.newprog)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = 8
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R4
+ q.To.Reg = REG_R20
- // MOV R4, panic_argp(R1)
+ // MOV R20, panic_argp(RT1)
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R4
+ q.From.Reg = REG_R20
q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R1
+ q.To.Reg = REGRT1
q.To.Offset = 0 // Panic.argp
// B end
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index e2732d53e3..183ca2ebe9 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -35,7 +35,6 @@ import (
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
- "internal/buildcfg"
"log"
"math"
"path"
@@ -647,13 +646,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var regg int16
if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() {
- if ctxt.Arch.Family == sys.AMD64 && buildcfg.Experiment.RegabiG && cursym.ABI() == obj.ABIInternal {
+ if ctxt.Arch.Family == sys.AMD64 && cursym.ABI() == obj.ABIInternal {
regg = REGG // use the g register directly in ABIInternal
} else {
p = obj.Appendp(p, newprog)
regg = REG_CX
if ctxt.Arch.Family == sys.AMD64 {
- // Using this register means that stacksplit works w/ //go:registerparams even when !buildcfg.Experiment.RegabiG
regg = REGG // == REG_R14
}
p = load_g(ctxt, p, newprog, regg) // load g into regg
diff --git a/src/cmd/internal/objabi/funcid.go b/src/cmd/internal/objabi/funcid.go
index 93ebd7be94..d881cdd061 100644
--- a/src/cmd/internal/objabi/funcid.go
+++ b/src/cmd/internal/objabi/funcid.go
@@ -74,7 +74,6 @@ var funcIDs = map[string]FuncID{
// Don't show in call stack but otherwise not special.
"deferreturn": FuncID_wrapper,
"runOpenDeferFrame": FuncID_wrapper,
- "reflectcallSave": FuncID_wrapper,
"deferCallSave": FuncID_wrapper,
}
diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go
index 2f59c2fe0a..543dd5caac 100644
--- a/src/cmd/link/internal/ld/dwarf_test.go
+++ b/src/cmd/link/internal/ld/dwarf_test.go
@@ -101,8 +101,11 @@ func gobuild(t *testing.T, dir string, testfile string, gcflags string) *builtFi
}
cmd := exec.Command(testenv.GoToolPath(t), "build", gcflags, "-o", dst, src)
- if b, err := cmd.CombinedOutput(); err != nil {
- t.Logf("build: %s\n", b)
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## build output:\n%s", b)
+ }
+ if err != nil {
t.Fatalf("build error: %v", err)
}
diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go
index 00f557875a..1f5e333cfd 100644
--- a/src/cmd/link/internal/ld/symtab.go
+++ b/src/cmd/link/internal/ld/symtab.go
@@ -300,6 +300,7 @@ func putplan9sym(ctxt *Link, ldr *loader.Loader, s loader.Sym, char SymbolType)
ctxt.Out.Write8(uint8(t + 0x80)) /* 0x80 is variable length */
name := ldr.SymName(s)
+ name = mangleABIName(ctxt, ldr, s, name)
ctxt.Out.WriteString(name)
ctxt.Out.Write8(0)
diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go
index 7230054bed..fed9c7bc3f 100644
--- a/src/cmd/link/link_test.go
+++ b/src/cmd/link/link_test.go
@@ -545,14 +545,13 @@ const testFuncAlignSrc = `
package main
import (
"fmt"
- "reflect"
)
func alignPc()
+var alignPcFnAddr uintptr
func main() {
- addr := reflect.ValueOf(alignPc).Pointer()
- if (addr % 512) != 0 {
- fmt.Printf("expected 512 bytes alignment, got %v\n", addr)
+ if alignPcFnAddr % 512 != 0 {
+ fmt.Printf("expected 512 bytes alignment, got %v\n", alignPcFnAddr)
} else {
fmt.Printf("PASS")
}
@@ -567,6 +566,9 @@ TEXT ·alignPc(SB),NOSPLIT, $0-0
PCALIGN $512
MOVD $3, R1
RET
+
+GLOBL ·alignPcFnAddr(SB),RODATA,$8
+DATA ·alignPcFnAddr(SB)/8,$·alignPc(SB)
`
// TestFuncAlign verifies that the address of a function can be aligned
diff --git a/src/go/constant/kind_string.go b/src/go/constant/kind_string.go
new file mode 100644
index 0000000000..700332511d
--- /dev/null
+++ b/src/go/constant/kind_string.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type Kind"; DO NOT EDIT.
+
+package constant
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Unknown-0]
+ _ = x[Bool-1]
+ _ = x[String-2]
+ _ = x[Int-3]
+ _ = x[Float-4]
+ _ = x[Complex-5]
+}
+
+const _Kind_name = "UnknownBoolStringIntFloatComplex"
+
+var _Kind_index = [...]uint8{0, 7, 11, 17, 20, 25, 32}
+
+func (i Kind) String() string {
+ if i < 0 || i >= Kind(len(_Kind_index)-1) {
+ return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/src/go/constant/value.go b/src/go/constant/value.go
index 78cb3f896f..014e873100 100644
--- a/src/go/constant/value.go
+++ b/src/go/constant/value.go
@@ -24,6 +24,8 @@ import (
"unicode/utf8"
)
+//go:generate stringer -type Kind
+
// Kind specifies the kind of value represented by a Value.
type Kind int
diff --git a/src/go/internal/gcimporter/gcimporter_test.go b/src/go/internal/gcimporter/gcimporter_test.go
index 3c76aafde3..286b8a6347 100644
--- a/src/go/internal/gcimporter/gcimporter_test.go
+++ b/src/go/internal/gcimporter/gcimporter_test.go
@@ -138,6 +138,7 @@ func TestVersionHandling(t *testing.T) {
skipSpecialPlatforms(t)
// This package only handles gc export data.
+ // Disable test until we put in the new export version.
if runtime.Compiler != "gc" {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
diff --git a/src/go/internal/gcimporter/iimport.go b/src/go/internal/gcimporter/iimport.go
index 76d47d08f1..b300860e94 100644
--- a/src/go/internal/gcimporter/iimport.go
+++ b/src/go/internal/gcimporter/iimport.go
@@ -41,6 +41,16 @@ func (r *intReader) uint64() uint64 {
return i
}
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+
+ // Start of the unstable series of versions, remove "+ n" before release.
+ iexportVersionCurrent = iexportVersionGenerics + 1
+)
+
const predeclReserved = 32
type itag uint64
@@ -56,6 +66,8 @@ const (
signatureType
structType
interfaceType
+ typeParamType
+ instType
)
// iImportData imports a package from the serialized package data
@@ -63,7 +75,7 @@ const (
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataReader *bufio.Reader, path string) (pkg *types.Package, err error) {
- const currentVersion = 1
+ const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
@@ -79,9 +91,13 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
version = int64(r.uint64())
switch version {
- case currentVersion, 0:
+ case currentVersion, iexportVersionPosCol, iexportVersionGo1_11:
default:
- errorf("unknown iexport format version %d", version)
+ if version > iexportVersionGenerics {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
}
sLen := int64(r.uint64())
@@ -95,8 +111,9 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
declData := data[sLen:]
p := iimporter{
- ipath: path,
- version: int(version),
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
stringData: stringData,
stringCache: make(map[uint64]string),
@@ -172,8 +189,9 @@ func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataRea
}
type iimporter struct {
- ipath string
- version int
+ exportVersion int64
+ ipath string
+ version int
stringData []byte
stringCache map[uint64]string
@@ -272,11 +290,24 @@ func (r *importReader) obj(name string) {
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
+ if r.p.exportVersion >= iexportVersionGenerics {
+ numTparams := r.uint64()
+ if numTparams > 0 {
+ errorf("unexpected tparam")
+ }
+ }
sig := r.signature(nil)
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
case 'T':
+ if r.p.exportVersion >= iexportVersionGenerics {
+ numTparams := r.uint64()
+ if numTparams > 0 {
+ errorf("unexpected tparam")
+ }
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
@@ -549,6 +580,14 @@ func (r *importReader) doType(base *types.Named) types.Type {
typ := types.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
+
+ case typeParamType:
+ errorf("do not handle type param types yet")
+ return nil
+
+ case instType:
+ errorf("do not handle instantiated types yet")
+ return nil
}
}
diff --git a/src/go/types/api.go b/src/go/types/api.go
index 8c0d9d22bf..30f8ded744 100644
--- a/src/go/types/api.go
+++ b/src/go/types/api.go
@@ -255,7 +255,7 @@ func (tv TypeAndValue) HasOk() bool {
// _Inferred reports the _Inferred type arguments and signature
// for a parameterized function call that uses type inference.
type _Inferred struct {
- Targs []Type
+ TArgs []Type
Sig *Signature
}
diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go
index f37b91d5a4..5a2d4a4ca3 100644
--- a/src/go/types/api_test.go
+++ b/src/go/types/api_test.go
@@ -353,7 +353,6 @@ func TestTypesInfo(t *testing.T) {
}
for _, test := range tests {
- ResetId() // avoid renumbering of type parameter ids when adding tests
if strings.HasPrefix(test.src, genericPkg) && !typeparams.Enabled {
continue
}
diff --git a/src/go/types/api_typeparams.go b/src/go/types/api_typeparams.go
index ed744c4dba..ae2c5a7fd0 100644
--- a/src/go/types/api_typeparams.go
+++ b/src/go/types/api_typeparams.go
@@ -19,6 +19,11 @@ type (
func NewSum(types []Type) Type { return _NewSum(types) }
+// NewTypeParam returns a new TypeParam.
+func NewTypeParam(obj *TypeName, index int, bound Type) *TypeParam {
+ return (*Checker)(nil).newTypeParam(obj, index, bound)
+}
+
func (s *Signature) TParams() []*TypeName { return s._TParams() }
func (s *Signature) SetTParams(tparams []*TypeName) { s._SetTParams(tparams) }
diff --git a/src/go/types/api_typeparams_test.go b/src/go/types/api_typeparams_test.go
index 15c9bf09f9..517c58505b 100644
--- a/src/go/types/api_typeparams_test.go
+++ b/src/go/types/api_typeparams_test.go
@@ -109,7 +109,7 @@ func TestInferredInfo(t *testing.T) {
panic(fmt.Sprintf("unexpected call expression type %T", call))
}
if ExprString(fun) == test.fun {
- targs = inf.Targs
+ targs = inf.TArgs
sig = inf.Sig
break
}
diff --git a/src/go/types/call.go b/src/go/types/call.go
index 631ea426c6..3a04121e98 100644
--- a/src/go/types/call.go
+++ b/src/go/types/call.go
@@ -575,17 +575,38 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
+ sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, _InvalidDeclCycle, "illegal cycle in method declaration")
+ goto Error
+ }
+
// the receiver type becomes the type of the first function
// argument of the method expression's function type
var params []*Var
- sig := m.typ.(*Signature)
if sig.params != nil {
params = sig.params.vars
}
+ // Be consistent about named/unnamed parameters.
+ needName := true
+ for _, param := range params {
+ if param.Name() == "" {
+ needName = false
+ break
+ }
+ }
+ name := ""
+ if needName {
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
x.mode = value
x.typ = &Signature{
tparams: sig.tparams,
- params: NewTuple(append([]*Var{NewVar(token.NoPos, check.pkg, "_", x.typ)}, params...)...),
+ params: NewTuple(params...),
results: sig.results,
variadic: sig.variadic,
}
diff --git a/src/go/types/check.go b/src/go/types/check.go
index a923c3c612..e82056e722 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -86,6 +86,7 @@ type Checker struct {
pkg *Package
*Info
version version // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
posMap map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions
diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go
index 6c3b630a1b..0926ac7431 100644
--- a/src/go/types/check_test.go
+++ b/src/go/types/check_test.go
@@ -202,7 +202,7 @@ func asGoVersion(s string) string {
return ""
}
-func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string, srcs [][]byte, manual bool) {
+func testFiles(t *testing.T, sizes Sizes, filenames []string, srcs [][]byte, manual bool) {
if len(filenames) == 0 {
t.Fatal("no source files")
}
@@ -225,6 +225,7 @@ func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string,
}
// if no Go version is given, consider the package name
+ goVersion := *goVersion
if goVersion == "" {
goVersion = asGoVersion(pkgName)
}
@@ -297,29 +298,48 @@ func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string,
}
}
-// TestManual is for manual testing of input files, provided as a list
-// of arguments after the test arguments (and a separating "--"). For
-// instance, to check the files foo.go and bar.go, use:
+// TestManual is for manual testing of a package - either provided
+// as a list of filenames belonging to the package, or a directory
+// name containing the package files - after the test arguments
+// (and a separating "--"). For instance, to test the package made
+// of the files foo.go and bar.go, use:
//
// go test -run Manual -- foo.go bar.go
//
-// Provide the -verify flag to verify errors against ERROR comments in
-// the input files rather than having a list of errors reported.
-// The accepted Go language version can be controlled with the -lang flag.
+// If no source arguments are provided, the file testdata/manual.go2
+// is used instead.
+// Provide the -verify flag to verify errors against ERROR comments
+// in the input files rather than having a list of errors reported.
+// The accepted Go language version can be controlled with the -lang
+// flag.
func TestManual(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
filenames := flag.Args()
if len(filenames) == 0 {
- return
+ filenames = []string{filepath.FromSlash("testdata/manual.go2")}
}
- testenv.MustHaveGoBuild(t)
+
+ info, err := os.Stat(filenames[0])
+ if err != nil {
+ t.Fatalf("TestManual: %v", err)
+ }
+
DefPredeclaredTestFuncs()
- testPkg(t, filenames, *goVersion, true)
+ if info.IsDir() {
+ if len(filenames) > 1 {
+ t.Fatal("TestManual: must have only one directory argument")
+ }
+ testDir(t, filenames[0], true)
+ } else {
+ testPkg(t, filenames, true)
+ }
}
func TestLongConstants(t *testing.T) {
format := "package longconst\n\nconst _ = %s\nconst _ = %s // ERROR excessively long constant"
src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001))
- checkFiles(t, nil, "", []string{"longconst.go"}, [][]byte{[]byte(src)}, false)
+ testFiles(t, nil, []string{"longconst.go"}, [][]byte{[]byte(src)}, false)
}
// TestIndexRepresentability tests that constant index operands must
@@ -327,7 +347,7 @@ func TestLongConstants(t *testing.T) {
// represent larger values.
func TestIndexRepresentability(t *testing.T) {
const src = "package index\n\nvar s []byte\nvar _ = s[int64 /* ERROR \"int64\\(1\\) << 40 \\(.*\\) overflows int\" */ (1) << 40]"
- checkFiles(t, &StdSizes{4, 4}, "", []string{"index.go"}, [][]byte{[]byte(src)}, false)
+ testFiles(t, &StdSizes{4, 4}, []string{"index.go"}, [][]byte{[]byte(src)}, false)
}
func TestIssue46453(t *testing.T) {
@@ -335,17 +355,17 @@ func TestIssue46453(t *testing.T) {
t.Skip("type params are enabled")
}
const src = "package p\ntype _ comparable // ERROR \"undeclared name: comparable\""
- checkFiles(t, nil, "", []string{"issue46453.go"}, [][]byte{[]byte(src)}, false)
+ testFiles(t, nil, []string{"issue46453.go"}, [][]byte{[]byte(src)}, false)
}
-func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "check") }
-func TestExamples(t *testing.T) { testDir(t, "examples") }
-func TestFixedbugs(t *testing.T) { testDir(t, "fixedbugs") }
+func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/check", false) }
+func TestExamples(t *testing.T) { testDirFiles(t, "testdata/examples", false) }
+func TestFixedbugs(t *testing.T) { testDirFiles(t, "testdata/fixedbugs", false) }
-func testDir(t *testing.T, dir string) {
+func testDirFiles(t *testing.T, dir string, manual bool) {
testenv.MustHaveGoBuild(t)
+ dir = filepath.FromSlash(dir)
- dir = filepath.Join("testdata", dir)
fis, err := os.ReadDir(dir)
if err != nil {
t.Error(err)
@@ -355,28 +375,38 @@ func testDir(t *testing.T, dir string) {
for _, fi := range fis {
path := filepath.Join(dir, fi.Name())
- // if fi is a directory, its files make up a single package
- var filenames []string
+ // If fi is a directory, its files make up a single package.
if fi.IsDir() {
- fis, err := os.ReadDir(path)
- if err != nil {
- t.Error(err)
- continue
- }
- for _, fi := range fis {
- filenames = append(filenames, filepath.Join(path, fi.Name()))
- }
+ testDir(t, path, manual)
} else {
- filenames = []string{path}
+ t.Run(filepath.Base(path), func(t *testing.T) {
+ testPkg(t, []string{path}, manual)
+ })
}
- t.Run(filepath.Base(path), func(t *testing.T) {
- testPkg(t, filenames, "", false)
- })
}
}
+func testDir(t *testing.T, dir string, manual bool) {
+ testenv.MustHaveGoBuild(t)
+
+ fis, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var filenames []string
+ for _, fi := range fis {
+ filenames = append(filenames, filepath.Join(dir, fi.Name()))
+ }
+
+ t.Run(filepath.Base(dir), func(t *testing.T) {
+ testPkg(t, filenames, manual)
+ })
+}
+
// TODO(rFindley) reconcile the different test setup in go/types with types2.
-func testPkg(t *testing.T, filenames []string, goVersion string, manual bool) {
+func testPkg(t *testing.T, filenames []string, manual bool) {
srcs := make([][]byte, len(filenames))
for i, filename := range filenames {
src, err := os.ReadFile(filename)
@@ -385,5 +415,5 @@ func testPkg(t *testing.T, filenames []string, goVersion string, manual bool) {
}
srcs[i] = src
}
- checkFiles(t, nil, goVersion, filenames, srcs, manual)
+ testFiles(t, nil, filenames, srcs, manual)
}
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index 9211febc6d..12ee51b920 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -333,7 +333,7 @@ func (check *Checker) validType(typ Type, path []Object) typeInfo {
switch t.info {
case unknown:
t.info = marked
- t.info = check.validType(t.orig, append(path, t.obj)) // only types of current package added to path
+ t.info = check.validType(t.fromRHS, append(path, t.obj)) // only types of current package added to path
case marked:
// cycle detected
for i, tn := range path {
@@ -692,9 +692,8 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *Named) {
} else {
// defined type declaration
- named := check.newNamed(obj, nil, nil)
+ named := check.newNamed(obj, nil, nil, nil, nil)
def.setUnderlying(named)
- obj.typ = named // make sure recursive type declarations terminate
if tparams := typeparams.Get(tdecl); tparams != nil {
check.openScope(tdecl, "type parameters")
@@ -703,7 +702,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *Named) {
}
// determine underlying type of named
- named.orig = check.definedType(tdecl.Type, named)
+ named.fromRHS = check.definedType(tdecl.Type, named)
// The underlying type of named may be itself a named type that is
// incomplete:
@@ -718,7 +717,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *Named) {
// and which has as its underlying type the named type B.
// Determine the (final, unnamed) underlying type by resolving
// any forward chain.
- // TODO(gri) Investigate if we can just use named.origin here
+ // TODO(gri) Investigate if we can just use named.fromRHS here
// and rely on lazy computation of the underlying type.
named.underlying = under(named)
}
diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go
new file mode 100644
index 0000000000..6f8c4983f4
--- /dev/null
+++ b/src/go/types/instantiate.go
@@ -0,0 +1,63 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/token"
+)
+
+// Instantiate instantiates the type typ with the given type arguments.
+// typ must be a *Named or a *Signature type, it must be generic, and
+// its number of type parameters must match the number of provided type
+// arguments. The result is a new, instantiated (not generic) type of
+// the same kind (either a *Named or a *Signature). The type arguments
+// are not checked against the constraints of the type parameters.
+// Any methods attached to a *Named are simply copied; they are not
+// instantiated.
+func Instantiate(pos token.Pos, typ Type, targs []Type) (res Type) {
+ // TODO(gri) This code is basically identical to the prolog
+ // in Checker.instantiate. Factor.
+ var tparams []*TypeName
+ switch t := typ.(type) {
+ case *Named:
+ tparams = t.tparams
+ case *Signature:
+ tparams = t.tparams
+ defer func() {
+ // If we had an unexpected failure somewhere don't panic below when
+ // asserting res.(*Signature). Check for *Signature in case Typ[Invalid]
+ // is returned.
+ if _, ok := res.(*Signature); !ok {
+ return
+ }
+ // If the signature doesn't use its type parameters, subst
+ // will not make a copy. In that case, make a copy now (so
+ // we can set tparams to nil w/o causing side-effects).
+ if t == res {
+ copy := *t
+ res = &copy
+ }
+ // After instantiating a generic signature, it is not generic
+ // anymore; we need to set tparams to nil.
+ res.(*Signature).tparams = nil
+ }()
+
+ default:
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ }
+
+ // the number of supplied types must match the number of type parameters
+ if len(targs) != len(tparams) {
+ panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, len(targs), len(tparams)))
+ }
+
+ if len(tparams) == 0 {
+ return typ // nothing to do (minor optimization)
+ }
+
+ smap := makeSubstMap(tparams, targs)
+ return (*Checker)(nil).subst(pos, typ, smap)
+}
diff --git a/src/go/types/interface.go b/src/go/types/interface.go
new file mode 100644
index 0000000000..fd3fe0ef91
--- /dev/null
+++ b/src/go/types/interface.go
@@ -0,0 +1,357 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/internal/typeparams"
+ "go/token"
+ "sort"
+)
+
+func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
+ var tlist *ast.Ident // "type" name of first entry in a type list declaration
+ var types []ast.Expr
+ for _, f := range iface.Methods.List {
+ if len(f.Names) > 0 {
+ // We have a method with name f.Names[0], or a type
+ // of a type list (name.Name == "type").
+ // (The parser ensures that there's only one method
+ // and we don't care if a constructed AST has more.)
+ name := f.Names[0]
+ if name.Name == "_" {
+ check.errorf(name, _BlankIfaceMethod, "invalid method name _")
+ continue // ignore
+ }
+
+ if name.Name == "type" {
+ // Always collect all type list entries, even from
+ // different type lists, under the assumption that
+ // the author intended to include all types.
+ types = append(types, f.Type)
+ if tlist != nil && tlist != name {
+ check.errorf(name, _Todo, "cannot have multiple type lists in an interface")
+ }
+ tlist = name
+ continue
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.invalidAST(f.Type, "%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (This extra check is needed here because interface method signatures don't have
+ // a receiver specification.)
+ if sig.tparams != nil {
+ var at positioner = f.Type
+ if tparams := typeparams.Get(f.Type); tparams != nil {
+ at = tparams
+ }
+ check.errorf(at, _Todo, "methods cannot have type parameters")
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ recvTyp = def
+ }
+ sig.recv = NewVar(name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(name.Pos(), check.pkg, name.Name, sig)
+ check.recordDef(name, m)
+ ityp.methods = append(ityp.methods, m)
+ } else {
+ // We have an embedded type. completeInterface will
+ // eventually verify that we have an interface.
+ ityp.embeddeds = append(ityp.embeddeds, check.typ(f.Type))
+ check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
+ }
+ }
+
+ // type constraints
+ ityp.types = _NewSum(check.collectTypeConstraints(iface.Pos(), types))
+
+ if len(ityp.methods) == 0 && ityp.types == nil && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.allMethods = markComplete
+ return
+ }
+
+ // sort for API stability
+ sortMethods(ityp.methods)
+ sortTypes(ityp.embeddeds)
+
+ check.later(func() { check.completeInterface(iface.Pos(), ityp) })
+}
+
+func (check *Checker) collectTypeConstraints(pos token.Pos, types []ast.Expr) []Type {
+ list := make([]Type, 0, len(types)) // assume all types are correct
+ for _, texpr := range types {
+ if texpr == nil {
+ check.invalidAST(atPos(pos), "missing type constraint")
+ continue
+ }
+ list = append(list, check.varType(texpr))
+ }
+
+ // Ensure that each type is only present once in the type list. Types may be
+ // interfaces, which may not be complete yet. It's ok to do this check at the
+ // end because it's not a requirement for correctness of the code.
+ // Note: This is a quadratic algorithm, but type lists tend to be short.
+ check.later(func() {
+ for i, t := range list {
+ if t := asInterface(t); t != nil {
+ check.completeInterface(types[i].Pos(), t)
+ }
+ if includes(list[:i], t) {
+ check.softErrorf(types[i], _Todo, "duplicate type %s in type list", t)
+ }
+ }
+ })
+
+ return list
+}
+
+// includes reports whether typ is in list.
+func includes(list []Type, typ Type) bool {
+ for _, e := range list {
+ if Identical(typ, e) {
+ return true
+ }
+ }
+ return false
+}
+
+func (check *Checker) completeInterface(pos token.Pos, ityp *Interface) {
+ if ityp.allMethods != nil {
+ return
+ }
+
+ // completeInterface may be called via the LookupFieldOrMethod,
+ // MissingMethod, Identical, or IdenticalIgnoreTags external API
+ // in which case check will be nil. In this case, type-checking
+ // must be finished and all interfaces should have been completed.
+ if check == nil {
+ panic("internal error: incomplete interface")
+ }
+ completeInterface(check, pos, ityp)
+}
+
+func completeInterface(check *Checker, pos token.Pos, ityp *Interface) {
+ assert(ityp.allMethods == nil)
+
+ if check != nil && trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsValid() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "complete %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.allMethods = markComplete
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also issue #34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var todo []*Func
+ var seen objset
+ var methods []*Func
+ mpos := make(map[*Func]token.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos token.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ methods = append(methods, m)
+ mpos[m] = pos
+ case explicit:
+ if check == nil {
+ panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
+ }
+ check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
+ check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (issue #33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check == nil {
+ // check method signatures after all locally embedded interfaces are computed
+ todo = append(todo, m, other.(*Func))
+ break
+ }
+ check.later(func() {
+ if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
+ check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
+ check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
+ }
+ })
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect types
+ allTypes := ityp.types
+
+ var posList []token.Pos
+ if check != nil {
+ posList = check.posMap[ityp]
+ }
+ for i, typ := range ityp.embeddeds {
+ var pos token.Pos // embedding position
+ if posList != nil {
+ pos = posList[i]
+ }
+ utyp := under(typ)
+ etyp := asInterface(utyp)
+ if etyp == nil {
+ if utyp != Typ[Invalid] {
+ var format string
+ if _, ok := utyp.(*_TypeParam); ok {
+ format = "%s is a type parameter, not an interface"
+ } else {
+ format = "%s is not an interface"
+ }
+ if check != nil {
+ // TODO: correct error code.
+ check.errorf(atPos(pos), _InvalidIfaceEmbed, format, typ)
+ } else {
+ panic(fmt.Sprintf(format, typ))
+ }
+ }
+ continue
+ }
+ if etyp.allMethods == nil {
+ completeInterface(check, pos, etyp)
+ }
+ for _, m := range etyp.allMethods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+ }
+ allTypes = intersect(allTypes, etyp.allTypes)
+ }
+
+ // process todo's (this only happens if check == nil)
+ for i := 0; i < len(todo); i += 2 {
+ m := todo[i]
+ other := todo[i+1]
+ if !Identical(m.typ, other.typ) {
+ panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
+ }
+ }
+
+ if methods != nil {
+ sort.Sort(byUniqueMethodName(methods))
+ ityp.allMethods = methods
+ }
+ ityp.allTypes = allTypes
+}
+
+// intersect computes the intersection of the types x and y.
+// Note: A incomming nil type stands for the top type. A top
+// type result is returned as nil.
+func intersect(x, y Type) (r Type) {
+ defer func() {
+ if r == theTop {
+ r = nil
+ }
+ }()
+
+ switch {
+ case x == theBottom || y == theBottom:
+ return theBottom
+ case x == nil || x == theTop:
+ return y
+ case y == nil || x == theTop:
+ return x
+ }
+
+ xtypes := unpackType(x)
+ ytypes := unpackType(y)
+ // Compute the list rtypes which includes only
+ // types that are in both xtypes and ytypes.
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix this
+ var rtypes []Type
+ for _, x := range xtypes {
+ if includes(ytypes, x) {
+ rtypes = append(rtypes, x)
+ }
+ }
+
+ if rtypes == nil {
+ return theBottom
+ }
+ return _NewSum(rtypes)
+}
+
+func sortTypes(list []Type) {
+ sort.Stable(byUniqueTypeName(list))
+}
+
+// byUniqueTypeName named type lists can be sorted by their unique type names.
+type byUniqueTypeName []Type
+
+func (a byUniqueTypeName) Len() int { return len(a) }
+func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
+func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+func sortName(t Type) string {
+ if named := asNamed(t); named != nil {
+ return named.obj.Id()
+ }
+ return ""
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("internal error: assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("internal error: methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].Id() < a[j].Id() }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go
index 9c7bfd4bb9..3e89b6cc2b 100644
--- a/src/go/types/lookup.go
+++ b/src/go/types/lookup.go
@@ -327,11 +327,15 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
return m, f
}
+ // both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If the methods have type parameters we don't care whether they
// are the same or not, as long as they match up. Use unification
@@ -385,6 +389,9 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If V is a (instantiated) generic type, its methods are still
// parameterized using the original (declaration) receiver type
@@ -412,7 +419,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ u.x.init(ftyp.rparams)
if !u.unify(ftyp, mtyp) {
return m, f
}
diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go
index 114647a2ff..4892218b75 100644
--- a/src/go/types/resolver.go
+++ b/src/go/types/resolver.go
@@ -276,7 +276,7 @@ func (check *Checker) collectObjects() {
}
if name == "init" {
- check.errorf(d.spec.Name, _InvalidInitDecl, "cannot import package as init - init must be a func")
+ check.errorf(d.spec, _InvalidInitDecl, "cannot import package as init - init must be a func")
return
}
diff --git a/src/go/types/sanitize.go b/src/go/types/sanitize.go
index 727ec173ea..88fc3f8377 100644
--- a/src/go/types/sanitize.go
+++ b/src/go/types/sanitize.go
@@ -27,9 +27,9 @@ func sanitizeInfo(info *Info) {
inferred := getInferred(info)
for e, inf := range inferred {
changed := false
- for i, targ := range inf.Targs {
+ for i, targ := range inf.TArgs {
if typ := s.typ(targ); typ != targ {
- inf.Targs[i] = typ
+ inf.TArgs[i] = typ
changed = true
}
}
@@ -138,8 +138,8 @@ func (s sanitizer) typ(typ Type) Type {
if debug && t.check != nil {
panic("internal error: Named.check != nil")
}
- if orig := s.typ(t.orig); orig != t.orig {
- t.orig = orig
+ if orig := s.typ(t.fromRHS); orig != t.fromRHS {
+ t.fromRHS = orig
}
if under := s.typ(t.underlying); under != t.underlying {
t.underlying = under
diff --git a/src/go/types/signature.go b/src/go/types/signature.go
new file mode 100644
index 0000000000..5489b493ba
--- /dev/null
+++ b/src/go/types/signature.go
@@ -0,0 +1,274 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/internal/typeparams"
+ "go/token"
+)
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ var recvTyp ast.Expr // rewritten receiver type; valid if != nil
+ if recvPar != nil && len(recvPar.List) > 0 {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
+ if len(rparams) > 0 {
+ // Blank identifiers don't get declared and regular type-checking of the instantiated
+ // parameterized receiver type expression fails in Checker.collectParams of receiver.
+ // Identify blank type parameters and substitute each with a unique new identifier named
+ // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
+ // name.
+ var smap map[*ast.Ident]*ast.Ident // substitution map from "_" to "n_" identifiers
+ for i, p := range rparams {
+ if p.Name == "_" {
+ new := *p
+ new.Name = fmt.Sprintf("%d_", i)
+ rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
+ if smap == nil {
+ smap = make(map[*ast.Ident]*ast.Ident)
+ }
+ smap[p] = &new
+ }
+ }
+ if smap != nil {
+ // blank identifiers were found => use rewritten receiver type
+ recvTyp = isubst(recvPar.List[0].Type, smap)
+ }
+ sig.rparams = check.declareTypeParams(nil, rparams)
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeName
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv := asNamed(check.genericType(rname, false)); recv != nil {
+ recvTParams = recv.tparams
+ }
+ }
+ // provide type parameter bounds
+ // - only do this if we have the right number (otherwise an error is reported elsewhere)
+ if len(sig.rparams) == len(recvTParams) {
+ // We have a list of *TypeNames but we need a list of Types.
+ list := make([]Type, len(sig.rparams))
+ for i, t := range sig.rparams {
+ list[i] = t.typ
+ }
+ smap := makeSubstMap(recvTParams, list)
+ for i, tname := range sig.rparams {
+ bound := recvTParams[i].typ.(*_TypeParam).bound
+ // bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the
+ // current context.
+ // TODO(gri) should we assume now that bounds always exist?
+ // (no bound == empty interface)
+ if bound != nil {
+ bound = check.subst(tname.pos, bound, smap)
+ tname.typ.(*_TypeParam).bound = bound
+ }
+ }
+ }
+ }
+ }
+
+ if tparams := typeparams.Get(ftyp); tparams != nil {
+ sig.tparams = check.collectTypeParams(tparams)
+ // Always type-check method type parameters but complain that they are not allowed.
+ // (A separate check is needed when type-checking interface method signatures because
+ // they don't have a receiver specification.)
+ if recvPar != nil {
+ check.errorf(tparams, _Todo, "methods cannot have type parameters")
+ }
+ }
+
+ // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
+ // declarations and then squash that scope into the parent scope (and report any redeclarations at
+ // that time).
+ scope := NewScope(check.scope, token.NoPos, token.NoPos, "function body (temp. scope)")
+ recvList, _ := check.collectParams(scope, recvPar, recvTyp, false) // use rewritten receiver type, if any
+ params, variadic := check.collectParams(scope, ftyp.Params, nil, true)
+ results, _ := check.collectParams(scope, ftyp.Results, nil, false)
+ scope.squash(func(obj, alt Object) {
+ check.errorf(obj, _DuplicateDecl, "%s redeclared in this block", obj.Name())
+ check.reportAltDecl(alt)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1], _BadRecv, "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+
+ // TODO(gri) We should delay rtyp expansion to when we actually need the
+ // receiver; thus all checks here should be delayed to later.
+ rtyp, _ := deref(recv.typ)
+ rtyp = expand(rtyp)
+
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if t := rtyp; t != Typ[Invalid] {
+ var err string
+ if T := asNamed(t); T != nil {
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ } else {
+ switch u := optype(T).(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ }
+ }
+ } else {
+ err = "basic or unnamed type"
+ }
+ if err != "" {
+ check.errorf(recv, _InvalidRecv, "invalid receiver %s (%s)", recv.typ, err)
+ // ok to continue
+ }
+ }
+ sig.recv = recv
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list. If type0 != nil, it is used instead of the first type in list.
+func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, type0 ast.Expr, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+ for i, field := range list.List {
+ ftype := field.Type
+ if i == 0 && type0 != nil {
+ ftype = type0
+ }
+ if t, _ := ftype.(*ast.Ellipsis); t != nil {
+ ftype = t.Elt
+ if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, _MisplacedDotDotDot, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ := check.varType(ftype)
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if len(field.Names) > 0 {
+ // named parameter
+ for _, name := range field.Names {
+ if name.Name == "" {
+ check.invalidAST(name, "anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(name.Pos(), check.pkg, name.Name, typ)
+ check.declare(scope, name, par, scope.pos)
+ params = append(params, par)
+ }
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(ftype.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.invalidAST(list, "list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
+
+// isubst returns an x with identifiers substituted per the substitution map smap.
+// isubst only handles the case of (valid) method receiver type expressions correctly.
+func isubst(x ast.Expr, smap map[*ast.Ident]*ast.Ident) ast.Expr {
+ switch n := x.(type) {
+ case *ast.Ident:
+ if alt := smap[n]; alt != nil {
+ return alt
+ }
+ case *ast.StarExpr:
+ X := isubst(n.X, smap)
+ if X != n.X {
+ new := *n
+ new.X = X
+ return &new
+ }
+ case *ast.IndexExpr:
+ elems := typeparams.UnpackExpr(n.Index)
+ var newElems []ast.Expr
+ for i, elem := range elems {
+ new := isubst(elem, smap)
+ if new != elem {
+ if newElems == nil {
+ newElems = make([]ast.Expr, len(elems))
+ copy(newElems, elems)
+ }
+ newElems[i] = new
+ }
+ }
+ if newElems != nil {
+ index := typeparams.PackExpr(newElems)
+ new := *n
+ new.Index = index
+ return &new
+ }
+ case *ast.ParenExpr:
+ return isubst(n.X, smap) // no need to keep parentheses
+ default:
+ // Other receiver type expressions are invalid.
+ // It's fine to ignore those here as they will
+ // be checked elsewhere.
+ }
+ return x
+}
diff --git a/src/go/types/sizeof_test.go b/src/go/types/sizeof_test.go
index 5a9d07ca41..3af9079a85 100644
--- a/src/go/types/sizeof_test.go
+++ b/src/go/types/sizeof_test.go
@@ -30,7 +30,7 @@ func TestSizeof(t *testing.T) {
{Interface{}, 60, 120},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 64, 128},
+ {Named{}, 68, 136},
{_TypeParam{}, 28, 48},
{instance{}, 44, 88},
{bottom{}, 0, 0},
diff --git a/src/go/types/struct.go b/src/go/types/struct.go
new file mode 100644
index 0000000000..1fec9ea527
--- /dev/null
+++ b/src/go/types/struct.go
@@ -0,0 +1,154 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+ "strconv"
+)
+
+func (check *Checker) structType(styp *Struct, e *ast.StructType) {
+ list := e.Fields
+ if list == nil {
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *ast.Ident, embedded bool, pos token.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Name
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (issue #25627).
+ addInvalid := func(ident *ast.Ident, pos token.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ for _, f := range list.List {
+ typ = check.varType(f.Type)
+ tag = check.tag(f.Tag)
+ if len(f.Names) > 0 {
+ // named fields
+ for _, name := range f.Names {
+ add(name, false, name.Pos())
+ }
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := f.Type.Pos()
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ // TODO(rFindley): using invalidAST here causes test failures (all
+ // errors should have codes). Clean this up.
+ check.errorf(f.Type, _Todo, "invalid AST: embedded field type %s has no name", f.Type)
+ name = ast.NewIdent("_")
+ name.NamePos = pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, pos)
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+
+ // for use in the closure below
+ embeddedTyp := typ
+ embeddedPos := f.Type
+
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch t := optype(t).(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if t.kind == UnsafePointer {
+ check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer")
+ case *Interface:
+ if isPtr {
+ check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ })
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+}
+
+func embeddedFieldIdent(e ast.Expr) *ast.Ident {
+ switch e := e.(type) {
+ case *ast.Ident:
+ return e
+ case *ast.StarExpr:
+ // *T is valid, but **T is not
+ if _, ok := e.X.(*ast.StarExpr); !ok {
+ return embeddedFieldIdent(e.X)
+ }
+ case *ast.SelectorExpr:
+ return e.Sel
+ case *ast.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ check.errorf(atPos(pos), _DuplicateDecl, "%s redeclared", obj.Name())
+ check.reportAltDecl(alt)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *ast.BasicLit) string {
+ if t != nil {
+ if t.Kind == token.STRING {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.invalidAST(t, "incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
diff --git a/src/go/types/subst.go b/src/go/types/subst.go
index 931375f1f2..47b0c279db 100644
--- a/src/go/types/subst.go
+++ b/src/go/types/subst.go
@@ -121,91 +121,100 @@ func (check *Checker) instantiate(pos token.Pos, typ Type, targs []Type, poslist
// check bounds
for i, tname := range tparams {
- tpar := tname.typ.(*_TypeParam)
- iface := tpar.Bound()
- if iface.Empty() {
- continue // no type bound
- }
-
- targ := targs[i]
-
// best position for error reporting
pos := pos
if i < len(poslist) {
pos = poslist[i]
}
- // The type parameter bound is parameterized with the same type parameters
- // as the instantiated type; before we can use it for bounds checking we
- // need to instantiate it with the type arguments with which we instantiate
- // the parameterized type.
- iface = check.subst(pos, iface, smap).(*Interface)
-
- // targ must implement iface (methods)
- // - check only if we have methods
- check.completeInterface(token.NoPos, iface)
- if len(iface.allMethods) > 0 {
- // If the type argument is a pointer to a type parameter, the type argument's
- // method set is empty.
- // TODO(gri) is this what we want? (spec question)
- if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
- check.errorf(atPos(pos), 0, "%s has no methods", targ)
- break
- }
- if m, wrong := check.missingMethod(targ, iface, true); m != nil {
- // TODO(gri) needs to print updated name to avoid major confusion in error message!
- // (print warning for now)
- // Old warning:
- // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
- if m.name == "==" {
- // We don't want to report "missing method ==".
- check.softErrorf(atPos(pos), 0, "%s does not satisfy comparable", targ)
- } else if wrong != nil {
- // TODO(gri) This can still report uninstantiated types which makes the error message
- // more difficult to read then necessary.
- // TODO(rFindley) should this use parentheses rather than ':' for qualification?
- check.softErrorf(atPos(pos), _Todo,
- "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
- targ, tpar.bound, wrong, m,
- )
- } else {
- check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
- }
- break
- }
+ // stop checking bounds after the first failure
+ if !check.satisfies(pos, targs[i], tname.typ.(*_TypeParam), smap) {
+ break
}
+ }
- // targ's underlying type must also be one of the interface types listed, if any
- if iface.allTypes == nil {
- continue // nothing to do
- }
+ return check.subst(pos, typ, smap)
+}
- // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
- // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
- if targ := asTypeParam(targ); targ != nil {
- targBound := targ.Bound()
- if targBound.allTypes == nil {
- check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
- break
- }
- for _, t := range unpackType(targBound.allTypes) {
- if !iface.isSatisfiedBy(t) {
- // TODO(gri) match this error message with the one below (or vice versa)
- check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, t, iface.allTypes)
- break
- }
+// satisfies reports whether the type argument targ satisfies the constraint of type parameter
+// parameter tpar (after any of its type parameters have been substituted through smap).
+// A suitable error is reported if the result is false.
+func (check *Checker) satisfies(pos token.Pos, targ Type, tpar *_TypeParam, smap *substMap) bool {
+ iface := tpar.Bound()
+ if iface.Empty() {
+ return true // no type bound
+ }
+
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiate
+ // the parameterized type.
+ iface = check.subst(pos, iface, smap).(*Interface)
+
+ // targ must implement iface (methods)
+ // - check only if we have methods
+ check.completeInterface(token.NoPos, iface)
+ if len(iface.allMethods) > 0 {
+ // If the type argument is a pointer to a type parameter, the type argument's
+ // method set is empty.
+ // TODO(gri) is this what we want? (spec question)
+ if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
+ check.errorf(atPos(pos), 0, "%s has no methods", targ)
+ return false
+ }
+ if m, wrong := check.missingMethod(targ, iface, true); m != nil {
+ // TODO(gri) needs to print updated name to avoid major confusion in error message!
+ // (print warning for now)
+ // Old warning:
+ // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
+ if m.name == "==" {
+ // We don't want to report "missing method ==".
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy comparable", targ)
+ } else if wrong != nil {
+ // TODO(gri) This can still report uninstantiated types which makes the error message
+ // more difficult to read then necessary.
+ // TODO(rFindley) should this use parentheses rather than ':' for qualification?
+ check.softErrorf(atPos(pos), _Todo,
+ "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
+ targ, tpar.bound, wrong, m,
+ )
+ } else {
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
}
- break
+ return false
}
+ }
- // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
- if !iface.isSatisfiedBy(targ) {
- check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s or %s not found in %s)", targ, tpar.bound, targ, under(targ), iface.allTypes)
- break
+ // targ's underlying type must also be one of the interface types listed, if any
+ if iface.allTypes == nil {
+ return true // nothing to do
+ }
+
+ // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
+ // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
+ if targ := asTypeParam(targ); targ != nil {
+ targBound := targ.Bound()
+ if targBound.allTypes == nil {
+ check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
+ return false
+ }
+ for _, t := range unpackType(targBound.allTypes) {
+ if !iface.isSatisfiedBy(t) {
+ // TODO(gri) match this error message with the one below (or vice versa)
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, t, iface.allTypes)
+ return false
+ }
}
+ return false
}
- return check.subst(pos, typ, smap)
+ // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
+ if !iface.isSatisfiedBy(targ) {
+ check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, under(targ), iface.allTypes)
+ return false
+ }
+
+ return true
}
// subst returns the type typ with its type parameters tpars replaced by
@@ -311,6 +320,9 @@ func (subst *subster) typ(typ Type) Type {
embeddeds, ecopied := subst.typeList(t.embeddeds)
if mcopied || types != t.types || ecopied {
iface := &Interface{methods: methods, types: types, embeddeds: embeddeds}
+ if subst.check == nil {
+ panic("internal error: cannot instantiate interfaces yet")
+ }
subst.check.posMap[iface] = subst.check.posMap[t] // satisfy completeInterface requirement
subst.check.completeInterface(token.NoPos, iface)
return iface
@@ -330,12 +342,14 @@ func (subst *subster) typ(typ Type) Type {
}
case *Named:
- subst.check.indent++
- defer func() {
- subst.check.indent--
- }()
- dump := func(format string, args ...interface{}) {
- if trace {
+ // dump is for debugging
+ dump := func(string, ...interface{}) {}
+ if subst.check != nil && trace {
+ subst.check.indent++
+ defer func() {
+ subst.check.indent--
+ }()
+ dump = func(format string, args ...interface{}) {
subst.check.trace(subst.pos, format, args...)
}
}
@@ -381,24 +395,27 @@ func (subst *subster) typ(typ Type) Type {
// before creating a new named type, check if we have this one already
h := instantiatedHash(t, newTargs)
dump(">>> new type hash: %s", h)
- if named, found := subst.check.typMap[h]; found {
- dump(">>> found %s", named)
- subst.cache[t] = named
- return named
+ if subst.check != nil {
+ if named, found := subst.check.typMap[h]; found {
+ dump(">>> found %s", named)
+ subst.cache[t] = named
+ return named
+ }
}
// create a new named type and populate caches to avoid endless recursion
tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil)
- named := subst.check.newNamed(tname, t.underlying, t.methods) // method signatures are updated lazily
- named.tparams = t.tparams // new type is still parameterized
+ named := subst.check.newNamed(tname, t, t.underlying, t.tparams, t.methods) // method signatures are updated lazily
named.targs = newTargs
- subst.check.typMap[h] = named
+ if subst.check != nil {
+ subst.check.typMap[h] = named
+ }
subst.cache[t] = named
// do the substitution
dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, newTargs)
named.underlying = subst.typOrNil(t.underlying)
- named.orig = named.underlying // for cycle detection (Checker.validType)
+ named.fromRHS = named.underlying // for cycle detection (Checker.validType)
return named
diff --git a/src/go/types/testdata/check/decls0.src b/src/go/types/testdata/check/decls0.src
index 5ad8f53f65..09904bb303 100644
--- a/src/go/types/testdata/check/decls0.src
+++ b/src/go/types/testdata/check/decls0.src
@@ -187,10 +187,10 @@ func f4() (x *f4 /* ERROR "not a type" */ ) { return }
// TODO(#43215) this should be detected as a cycle error
func f5([unsafe.Sizeof(f5)]int) {}
-func (S0) m1 (x S0 /* ERROR value .* is not a type */ .m1) {}
-func (S0) m2 (x *S0 /* ERROR value .* is not a type */ .m2) {}
-func (S0) m3 () (x S0 /* ERROR value .* is not a type */ .m3) { return }
-func (S0) m4 () (x *S0 /* ERROR value .* is not a type */ .m4) { return }
+func (S0) m1 (x S0 /* ERROR illegal cycle in method declaration */ .m1) {}
+func (S0) m2 (x *S0 /* ERROR illegal cycle in method declaration */ .m2) {}
+func (S0) m3 () (x S0 /* ERROR illegal cycle in method declaration */ .m3) { return }
+func (S0) m4 () (x *S0 /* ERROR illegal cycle in method declaration */ .m4) { return }
// interfaces may not have any blank methods
type BlankI interface {
diff --git a/src/go/types/testdata/check/tmp.go2 b/src/go/types/testdata/check/tmp.go2
deleted file mode 100644
index dae78caff8..0000000000
--- a/src/go/types/testdata/check/tmp.go2
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is meant as "dumping ground" for debugging code.
-
-package p
-
-// fun test case
-type C[P interface{m()}] P
-
-func (r C[P]) m() { r.m() }
-
-func f[T interface{m(); n()}](x T) {
- y := C[T](x)
- y.m()
-}
diff --git a/src/go/types/testdata/fixedbugs/issue46275.go2 b/src/go/types/testdata/fixedbugs/issue46275.go2
new file mode 100644
index 0000000000..0ebde31c8e
--- /dev/null
+++ b/src/go/types/testdata/fixedbugs/issue46275.go2
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
+
diff --git a/src/go/types/testdata/manual.go2 b/src/go/types/testdata/manual.go2
new file mode 100644
index 0000000000..25e6f22f94
--- /dev/null
+++ b/src/go/types/testdata/manual.go2
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
diff --git a/src/go/types/type.go b/src/go/types/type.go
index 2660ce4408..4a39499905 100644
--- a/src/go/types/type.go
+++ b/src/go/types/type.go
@@ -5,7 +5,6 @@
package types
import (
- "fmt"
"go/token"
"sync/atomic"
)
@@ -422,79 +421,41 @@ func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
// NumMethods returns the total number of methods of interface t.
// The interface must have been completed.
-func (t *Interface) NumMethods() int { t.assertCompleteness(); return len(t.allMethods) }
-
-func (t *Interface) assertCompleteness() {
- if t.allMethods == nil {
- panic("interface is incomplete")
- }
-}
+func (t *Interface) NumMethods() int { t.Complete(); return len(t.allMethods) }
// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
// The methods are ordered by their unique Id.
// The interface must have been completed.
-func (t *Interface) Method(i int) *Func { t.assertCompleteness(); return t.allMethods[i] }
+func (t *Interface) Method(i int) *Func { t.Complete(); return t.allMethods[i] }
// Empty reports whether t is the empty interface.
func (t *Interface) Empty() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- // A non-nil allTypes may still be empty and represents the bottom type.
- return len(t.allMethods) == 0 && t.allTypes == nil
- }
- return !t.iterate(func(t *Interface) bool {
- return len(t.methods) > 0 || t.types != nil
- }, nil)
+ t.Complete()
+ // A non-nil allTypes may still have length 0 but represents the bottom type.
+ return len(t.allMethods) == 0 && t.allTypes == nil
}
// _HasTypeList reports whether interface t has a type list, possibly from an embedded type.
func (t *Interface) _HasTypeList() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- return t.allTypes != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- return t.types != nil
- }, nil)
+ t.Complete()
+ return t.allTypes != nil
}
// _IsComparable reports whether interface t is or embeds the predeclared interface "comparable".
func (t *Interface) _IsComparable() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
+ t.Complete()
+ _, m := lookupMethod(t.allMethods, nil, "==")
+ return m != nil
}
// _IsConstraint reports t.HasTypeList() || t.IsComparable().
func (t *Interface) _IsConstraint() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- if t.allTypes != nil {
- return true
- }
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- if t.types != nil {
- return true
- }
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
+ return t._HasTypeList() || t._IsComparable()
}
// iterate calls f with t and then with any embedded interface of t, recursively, until f returns true.
// iterate reports whether any call to f returned true.
+// TODO(rfindley) This is now only used by infer.go - see if we can eliminate it.
func (t *Interface) iterate(f func(*Interface) bool, seen map[*Interface]bool) bool {
if f(t) {
return true
@@ -538,64 +499,9 @@ func (t *Interface) isSatisfiedBy(typ Type) bool {
// form other types. The interface must not contain duplicate methods or a
// panic occurs. Complete returns the receiver.
func (t *Interface) Complete() *Interface {
- // TODO(gri) consolidate this method with Checker.completeInterface
- if t.allMethods != nil {
- return t
- }
-
- t.allMethods = markComplete // avoid infinite recursion
-
- var todo []*Func
- var methods []*Func
- var seen objset
- addMethod := func(m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- case explicit:
- panic("duplicate method " + m.name)
- default:
- // check method signatures after all locally embedded interfaces are computed
- todo = append(todo, m, other.(*Func))
- }
- }
-
- for _, m := range t.methods {
- addMethod(m, true)
- }
-
- allTypes := t.types
-
- for _, typ := range t.embeddeds {
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- panic(fmt.Sprintf("%s is not an interface", typ))
- }
- continue
- }
- etyp.Complete()
- for _, m := range etyp.allMethods {
- addMethod(m, false)
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- for i := 0; i < len(todo); i += 2 {
- m := todo[i]
- other := todo[i+1]
- if !Identical(m.typ, other.typ) {
- panic("duplicate method " + m.name)
- }
- }
-
- if methods != nil {
- sortMethods(methods)
- t.allMethods = methods
+ if t.allMethods == nil {
+ completeInterface(nil, token.NoPos, t)
}
- t.allTypes = allTypes
-
return t
}
@@ -642,12 +548,15 @@ func (c *Chan) Dir() ChanDir { return c.dir }
// Elem returns the element type of channel c.
func (c *Chan) Elem() Type { return c.elem }
+// TODO(rfindley) Clean up Named struct below; specifically the fromRHS field (can we use underlying?).
+
// A Named represents a named (defined) type.
type Named struct {
check *Checker // for Named.under implementation; nilled once under has been called
info typeInfo // for cycle detection
obj *TypeName // corresponding declared object
- orig Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
+ orig *Named // original, uninstantiated type
+ fromRHS Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
underlying Type // possibly a *Named during setup; never a *Named once set up completely
tparams []*TypeName // type parameters, or nil
targs []Type // type arguments (after instantiation), or nil
@@ -661,11 +570,14 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
if _, ok := underlying.(*Named); ok {
panic("types.NewNamed: underlying type must not be *Named")
}
- return (*Checker)(nil).newNamed(obj, underlying, methods)
+ return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
}
-func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
- typ := &Named{check: check, obj: obj, orig: underlying, underlying: underlying, methods: methods}
+func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams []*TypeName, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
+ if typ.orig == nil {
+ typ.orig = typ
+ }
if obj.typ == nil {
obj.typ = typ
}
@@ -692,6 +604,10 @@ func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func)
// Obj returns the type name for the named type t.
func (t *Named) Obj() *TypeName { return t.obj }
+// _Orig returns the original generic type an instantiated type is derived from.
+// If t is not an instantiated type, the result is t.
+func (t *Named) _Orig() *Named { return t.orig }
+
// TODO(gri) Come up with a better representation and API to distinguish
// between parameterized instantiated and non-instantiated types.
@@ -699,10 +615,13 @@ func (t *Named) Obj() *TypeName { return t.obj }
// The result is non-nil for an (originally) parameterized type even if it is instantiated.
func (t *Named) _TParams() []*TypeName { return t.tparams }
+// _SetTParams sets the type parameters of the named type t.
+func (t *Named) _SetTParams(tparams []*TypeName) { t.tparams = tparams }
+
// _TArgs returns the type arguments after instantiation of the named type t, or nil if not instantiated.
func (t *Named) _TArgs() []Type { return t.targs }
-// _SetTArgs sets the type arguments of Named.
+// SetTArgs sets the type arguments of the named type t.
func (t *Named) _SetTArgs(args []Type) { t.targs = args }
// NumMethods returns the number of explicit methods whose receiver is named type t.
@@ -732,25 +651,32 @@ func (t *Named) AddMethod(m *Func) {
// Note: This is a uint32 rather than a uint64 because the
// respective 64 bit atomic instructions are not available
// on all platforms.
-var lastId uint32
+var lastID uint32
-// nextId returns a value increasing monotonically by 1 with
+// nextID returns a value increasing monotonically by 1 with
// each call, starting with 1. It may be called concurrently.
-func nextId() uint64 { return uint64(atomic.AddUint32(&lastId, 1)) }
+func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
// A _TypeParam represents a type parameter type.
type _TypeParam struct {
check *Checker // for lazy type bound completion
- id uint64 // unique id
+ id uint64 // unique id, for debugging only
obj *TypeName // corresponding type name
- index int // parameter index
+ index int // type parameter index in source order, starting at 0
bound Type // *Named or *Interface; underlying type is always *Interface
}
-// newTypeParam returns a new TypeParam.
func (check *Checker) newTypeParam(obj *TypeName, index int, bound Type) *_TypeParam {
assert(bound != nil)
- typ := &_TypeParam{check: check, id: nextId(), obj: obj, index: index, bound: bound}
+
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+
+ typ := &_TypeParam{check: check, id: id, obj: obj, index: index, bound: bound}
if obj.typ == nil {
obj.typ = typ
}
diff --git a/src/go/types/types_test.go b/src/go/types/types_test.go
index 25cd996628..7990414f42 100644
--- a/src/go/types/types_test.go
+++ b/src/go/types/types_test.go
@@ -4,16 +4,11 @@
package types
-import "sync/atomic"
-
-// Upon calling ResetId, nextId starts with 1 again.
-// It may be called concurrently. This is only needed
-// for tests where we may want to have a consistent
-// numbering for each individual test case.
-func ResetId() { atomic.StoreUint32(&lastId, 0) }
-
// SetGoVersion sets the unexported goVersion field on config, so that tests
// which assert on behavior for older Go versions can set it.
func SetGoVersion(config *Config, goVersion string) {
config.goVersion = goVersion
}
+
+// Debug is set if go/types is built with debug mode enabled.
+const Debug = debug
diff --git a/src/go/types/typestring.go b/src/go/types/typestring.go
index fe27f0f276..ff93f3b3c3 100644
--- a/src/go/types/typestring.go
+++ b/src/go/types/typestring.go
@@ -227,7 +227,7 @@ func writeType(buf *bytes.Buffer, typ Type, qf Qualifier, visited []Type) {
empty = false
}
}
- if t.allMethods == nil || len(t.methods) > len(t.allMethods) {
+ if debug && (t.allMethods == nil || len(t.methods) > len(t.allMethods)) {
if !empty {
buf.WriteByte(' ')
}
diff --git a/src/go/types/typestring_test.go b/src/go/types/typestring_test.go
index b16529dc64..55ee4b987f 100644
--- a/src/go/types/typestring_test.go
+++ b/src/go/types/typestring_test.go
@@ -143,6 +143,10 @@ func TestTypeString(t *testing.T) {
}
func TestIncompleteInterfaces(t *testing.T) {
+ if !Debug {
+ t.Skip("requires type checker to be compiled with debug = true")
+ }
+
sig := NewSignature(nil, nil, nil, false)
m := NewFunc(token.NoPos, nil, "m", sig)
for _, test := range []struct {
diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go
index 5185c33fcb..97df908ae9 100644
--- a/src/go/types/typexpr.go
+++ b/src/go/types/typexpr.go
@@ -12,8 +12,6 @@ import (
"go/constant"
"go/internal/typeparams"
"go/token"
- "sort"
- "strconv"
"strings"
)
@@ -194,205 +192,6 @@ func (check *Checker) genericType(e ast.Expr, reportErr bool) Type {
return typ
}
-// isubst returns an x with identifiers substituted per the substitution map smap.
-// isubst only handles the case of (valid) method receiver type expressions correctly.
-func isubst(x ast.Expr, smap map[*ast.Ident]*ast.Ident) ast.Expr {
- switch n := x.(type) {
- case *ast.Ident:
- if alt := smap[n]; alt != nil {
- return alt
- }
- case *ast.StarExpr:
- X := isubst(n.X, smap)
- if X != n.X {
- new := *n
- new.X = X
- return &new
- }
- case *ast.IndexExpr:
- elems := typeparams.UnpackExpr(n.Index)
- var newElems []ast.Expr
- for i, elem := range elems {
- new := isubst(elem, smap)
- if new != elem {
- if newElems == nil {
- newElems = make([]ast.Expr, len(elems))
- copy(newElems, elems)
- }
- newElems[i] = new
- }
- }
- if newElems != nil {
- index := typeparams.PackExpr(newElems)
- new := *n
- new.Index = index
- return &new
- }
- case *ast.ParenExpr:
- return isubst(n.X, smap) // no need to keep parentheses
- default:
- // Other receiver type expressions are invalid.
- // It's fine to ignore those here as they will
- // be checked elsewhere.
- }
- return x
-}
-
-// funcType type-checks a function or method type.
-func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast.FuncType) {
- check.openScope(ftyp, "function")
- check.scope.isFunc = true
- check.recordScope(ftyp, check.scope)
- sig.scope = check.scope
- defer check.closeScope()
-
- var recvTyp ast.Expr // rewritten receiver type; valid if != nil
- if recvPar != nil && len(recvPar.List) > 0 {
- // collect generic receiver type parameters, if any
- // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
- // - the receiver specification acts as local declaration for its type parameters, which may be blank
- _, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
- if len(rparams) > 0 {
- // Blank identifiers don't get declared and regular type-checking of the instantiated
- // parameterized receiver type expression fails in Checker.collectParams of receiver.
- // Identify blank type parameters and substitute each with a unique new identifier named
- // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
- // name.
- var smap map[*ast.Ident]*ast.Ident // substitution map from "_" to "n_" identifiers
- for i, p := range rparams {
- if p.Name == "_" {
- new := *p
- new.Name = fmt.Sprintf("%d_", i)
- rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
- if smap == nil {
- smap = make(map[*ast.Ident]*ast.Ident)
- }
- smap[p] = &new
- }
- }
- if smap != nil {
- // blank identifiers were found => use rewritten receiver type
- recvTyp = isubst(recvPar.List[0].Type, smap)
- }
- sig.rparams = check.declareTypeParams(nil, rparams)
- // determine receiver type to get its type parameters
- // and the respective type parameter bounds
- var recvTParams []*TypeName
- if rname != nil {
- // recv should be a Named type (otherwise an error is reported elsewhere)
- // Also: Don't report an error via genericType since it will be reported
- // again when we type-check the signature.
- // TODO(gri) maybe the receiver should be marked as invalid instead?
- if recv := asNamed(check.genericType(rname, false)); recv != nil {
- recvTParams = recv.tparams
- }
- }
- // provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if len(sig.rparams) == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, len(sig.rparams))
- for i, t := range sig.rparams {
- list[i] = t.typ
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tname := range sig.rparams {
- bound := recvTParams[i].typ.(*_TypeParam).bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- // TODO(gri) should we assume now that bounds always exist?
- // (no bound == empty interface)
- if bound != nil {
- bound = check.subst(tname.pos, bound, smap)
- tname.typ.(*_TypeParam).bound = bound
- }
- }
- }
- }
- }
-
- if tparams := typeparams.Get(ftyp); tparams != nil {
- sig.tparams = check.collectTypeParams(tparams)
- // Always type-check method type parameters but complain that they are not allowed.
- // (A separate check is needed when type-checking interface method signatures because
- // they don't have a receiver specification.)
- if recvPar != nil {
- check.errorf(tparams, _Todo, "methods cannot have type parameters")
- }
- }
-
- // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
- // declarations and then squash that scope into the parent scope (and report any redeclarations at
- // that time).
- scope := NewScope(check.scope, token.NoPos, token.NoPos, "function body (temp. scope)")
- recvList, _ := check.collectParams(scope, recvPar, recvTyp, false) // use rewritten receiver type, if any
- params, variadic := check.collectParams(scope, ftyp.Params, nil, true)
- results, _ := check.collectParams(scope, ftyp.Results, nil, false)
- scope.squash(func(obj, alt Object) {
- check.errorf(obj, _DuplicateDecl, "%s redeclared in this block", obj.Name())
- check.reportAltDecl(alt)
- })
-
- if recvPar != nil {
- // recv parameter list present (may be empty)
- // spec: "The receiver is specified via an extra parameter section preceding the
- // method name. That parameter section must declare a single parameter, the receiver."
- var recv *Var
- switch len(recvList) {
- case 0:
- // error reported by resolver
- recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
- default:
- // more than one receiver
- check.error(recvList[len(recvList)-1], _BadRecv, "method must have exactly one receiver")
- fallthrough // continue with first receiver
- case 1:
- recv = recvList[0]
- }
-
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
- rtyp = expand(rtyp)
-
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if t := rtyp; t != Typ[Invalid] {
- var err string
- if T := asNamed(t); T != nil {
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
- } else {
- switch u := optype(T).(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
- }
- case *Pointer, *Interface:
- err = "pointer or interface type"
- }
- }
- } else {
- err = "basic or unnamed type"
- }
- if err != "" {
- check.errorf(recv, _InvalidRecv, "invalid receiver %s (%s)", recv.typ, err)
- // ok to continue
- }
- }
- sig.recv = recv
- }
-
- sig.params = NewTuple(params...)
- sig.results = NewTuple(results...)
- sig.variadic = variadic
-}
-
// goTypeName returns the Go type name for typ and
// removes any occurrences of "types." from that name.
func goTypeName(typ Type) string {
@@ -685,518 +484,3 @@ func (check *Checker) typeList(list []ast.Expr) []Type {
}
return res
}
-
-// collectParams declares the parameters of list in scope and returns the corresponding
-// variable list. If type0 != nil, it is used instead of the first type in list.
-func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, type0 ast.Expr, variadicOk bool) (params []*Var, variadic bool) {
- if list == nil {
- return
- }
-
- var named, anonymous bool
- for i, field := range list.List {
- ftype := field.Type
- if i == 0 && type0 != nil {
- ftype = type0
- }
- if t, _ := ftype.(*ast.Ellipsis); t != nil {
- ftype = t.Elt
- if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 {
- variadic = true
- } else {
- check.softErrorf(t, _MisplacedDotDotDot, "can only use ... with final parameter in list")
- // ignore ... and continue
- }
- }
- typ := check.varType(ftype)
- // The parser ensures that f.Tag is nil and we don't
- // care if a constructed AST contains a non-nil tag.
- if len(field.Names) > 0 {
- // named parameter
- for _, name := range field.Names {
- if name.Name == "" {
- check.invalidAST(name, "anonymous parameter")
- // ok to continue
- }
- par := NewParam(name.Pos(), check.pkg, name.Name, typ)
- check.declare(scope, name, par, scope.pos)
- params = append(params, par)
- }
- named = true
- } else {
- // anonymous parameter
- par := NewParam(ftype.Pos(), check.pkg, "", typ)
- check.recordImplicit(field, par)
- params = append(params, par)
- anonymous = true
- }
- }
-
- if named && anonymous {
- check.invalidAST(list, "list contains both named and anonymous parameters")
- // ok to continue
- }
-
- // For a variadic function, change the last parameter's type from T to []T.
- // Since we type-checked T rather than ...T, we also need to retro-actively
- // record the type for ...T.
- if variadic {
- last := params[len(params)-1]
- last.typ = &Slice{elem: last.typ}
- check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil)
- }
-
- return
-}
-
-func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool {
- if alt := oset.insert(obj); alt != nil {
- check.errorf(atPos(pos), _DuplicateDecl, "%s redeclared", obj.Name())
- check.reportAltDecl(alt)
- return false
- }
- return true
-}
-
-func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
- var tlist *ast.Ident // "type" name of first entry in a type list declaration
- var types []ast.Expr
- for _, f := range iface.Methods.List {
- if len(f.Names) > 0 {
- // We have a method with name f.Names[0], or a type
- // of a type list (name.Name == "type").
- // (The parser ensures that there's only one method
- // and we don't care if a constructed AST has more.)
- name := f.Names[0]
- if name.Name == "_" {
- check.errorf(name, _BlankIfaceMethod, "invalid method name _")
- continue // ignore
- }
-
- if name.Name == "type" {
- // Always collect all type list entries, even from
- // different type lists, under the assumption that
- // the author intended to include all types.
- types = append(types, f.Type)
- if tlist != nil && tlist != name {
- check.errorf(name, _Todo, "cannot have multiple type lists in an interface")
- }
- tlist = name
- continue
- }
-
- typ := check.typ(f.Type)
- sig, _ := typ.(*Signature)
- if sig == nil {
- if typ != Typ[Invalid] {
- check.invalidAST(f.Type, "%s is not a method signature", typ)
- }
- continue // ignore
- }
-
- // Always type-check method type parameters but complain if they are not enabled.
- // (This extra check is needed here because interface method signatures don't have
- // a receiver specification.)
- if sig.tparams != nil {
- var at positioner = f.Type
- if tparams := typeparams.Get(f.Type); tparams != nil {
- at = tparams
- }
- check.errorf(at, _Todo, "methods cannot have type parameters")
- }
-
- // use named receiver type if available (for better error messages)
- var recvTyp Type = ityp
- if def != nil {
- recvTyp = def
- }
- sig.recv = NewVar(name.Pos(), check.pkg, "", recvTyp)
-
- m := NewFunc(name.Pos(), check.pkg, name.Name, sig)
- check.recordDef(name, m)
- ityp.methods = append(ityp.methods, m)
- } else {
- // We have an embedded type. completeInterface will
- // eventually verify that we have an interface.
- ityp.embeddeds = append(ityp.embeddeds, check.typ(f.Type))
- check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
- }
- }
-
- // type constraints
- ityp.types = _NewSum(check.collectTypeConstraints(iface.Pos(), types))
-
- if len(ityp.methods) == 0 && ityp.types == nil && len(ityp.embeddeds) == 0 {
- // empty interface
- ityp.allMethods = markComplete
- return
- }
-
- // sort for API stability
- sortMethods(ityp.methods)
- sortTypes(ityp.embeddeds)
-
- check.later(func() { check.completeInterface(iface.Pos(), ityp) })
-}
-
-func (check *Checker) completeInterface(pos token.Pos, ityp *Interface) {
- if ityp.allMethods != nil {
- return
- }
-
- // completeInterface may be called via the LookupFieldOrMethod,
- // MissingMethod, Identical, or IdenticalIgnoreTags external API
- // in which case check will be nil. In this case, type-checking
- // must be finished and all interfaces should have been completed.
- if check == nil {
- panic("internal error: incomplete interface")
- }
-
- if trace {
- // Types don't generally have position information.
- // If we don't have a valid pos provided, try to use
- // one close enough.
- if !pos.IsValid() && len(ityp.methods) > 0 {
- pos = ityp.methods[0].pos
- }
-
- check.trace(pos, "complete %s", ityp)
- check.indent++
- defer func() {
- check.indent--
- check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
- }()
- }
-
- // An infinitely expanding interface (due to a cycle) is detected
- // elsewhere (Checker.validType), so here we simply assume we only
- // have valid interfaces. Mark the interface as complete to avoid
- // infinite recursion if the validType check occurs later for some
- // reason.
- ityp.allMethods = markComplete
-
- // Methods of embedded interfaces are collected unchanged; i.e., the identity
- // of a method I.m's Func Object of an interface I is the same as that of
- // the method m in an interface that embeds interface I. On the other hand,
- // if a method is embedded via multiple overlapping embedded interfaces, we
- // don't provide a guarantee which "original m" got chosen for the embedding
- // interface. See also issue #34421.
- //
- // If we don't care to provide this identity guarantee anymore, instead of
- // reusing the original method in embeddings, we can clone the method's Func
- // Object and give it the position of a corresponding embedded interface. Then
- // we can get rid of the mpos map below and simply use the cloned method's
- // position.
-
- var seen objset
- var methods []*Func
- mpos := make(map[*Func]token.Pos) // method specification or method embedding position, for good error messages
- addMethod := func(pos token.Pos, m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- mpos[m] = pos
- case explicit:
- check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
- check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
- default:
- // We have a duplicate method name in an embedded (not explicitly declared) method.
- // Check method signatures after all types are computed (issue #33656).
- // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
- // error here as well (even though we could do it eagerly) because it's the same
- // error message.
- check.later(func() {
- if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
- check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
- check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
- }
- })
- }
- }
-
- for _, m := range ityp.methods {
- addMethod(m.pos, m, true)
- }
-
- // collect types
- allTypes := ityp.types
-
- posList := check.posMap[ityp]
- for i, typ := range ityp.embeddeds {
- pos := posList[i] // embedding position
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- var format string
- if _, ok := utyp.(*_TypeParam); ok {
- format = "%s is a type parameter, not an interface"
- } else {
- format = "%s is not an interface"
- }
- // TODO: correct error code.
- check.errorf(atPos(pos), _InvalidIfaceEmbed, format, typ)
- }
- continue
- }
- check.completeInterface(pos, etyp)
- for _, m := range etyp.allMethods {
- addMethod(pos, m, false) // use embedding position pos rather than m.pos
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- if methods != nil {
- sort.Sort(byUniqueMethodName(methods))
- ityp.allMethods = methods
- }
- ityp.allTypes = allTypes
-}
-
-// intersect computes the intersection of the types x and y.
-// Note: A incomming nil type stands for the top type. A top
-// type result is returned as nil.
-func intersect(x, y Type) (r Type) {
- defer func() {
- if r == theTop {
- r = nil
- }
- }()
-
- switch {
- case x == theBottom || y == theBottom:
- return theBottom
- case x == nil || x == theTop:
- return y
- case y == nil || x == theTop:
- return x
- }
-
- xtypes := unpackType(x)
- ytypes := unpackType(y)
- // Compute the list rtypes which includes only
- // types that are in both xtypes and ytypes.
- // Quadratic algorithm, but good enough for now.
- // TODO(gri) fix this
- var rtypes []Type
- for _, x := range xtypes {
- if includes(ytypes, x) {
- rtypes = append(rtypes, x)
- }
- }
-
- if rtypes == nil {
- return theBottom
- }
- return _NewSum(rtypes)
-}
-
-func sortTypes(list []Type) {
- sort.Stable(byUniqueTypeName(list))
-}
-
-// byUniqueTypeName named type lists can be sorted by their unique type names.
-type byUniqueTypeName []Type
-
-func (a byUniqueTypeName) Len() int { return len(a) }
-func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
-func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func sortName(t Type) string {
- if named := asNamed(t); named != nil {
- return named.obj.Id()
- }
- return ""
-}
-
-func sortMethods(list []*Func) {
- sort.Sort(byUniqueMethodName(list))
-}
-
-func assertSortedMethods(list []*Func) {
- if !debug {
- panic("internal error: assertSortedMethods called outside debug mode")
- }
- if !sort.IsSorted(byUniqueMethodName(list)) {
- panic("internal error: methods not sorted")
- }
-}
-
-// byUniqueMethodName method lists can be sorted by their unique method names.
-type byUniqueMethodName []*Func
-
-func (a byUniqueMethodName) Len() int { return len(a) }
-func (a byUniqueMethodName) Less(i, j int) bool { return a[i].Id() < a[j].Id() }
-func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (check *Checker) tag(t *ast.BasicLit) string {
- if t != nil {
- if t.Kind == token.STRING {
- if val, err := strconv.Unquote(t.Value); err == nil {
- return val
- }
- }
- check.invalidAST(t, "incorrect tag syntax: %q", t.Value)
- }
- return ""
-}
-
-func (check *Checker) structType(styp *Struct, e *ast.StructType) {
- list := e.Fields
- if list == nil {
- return
- }
-
- // struct fields and tags
- var fields []*Var
- var tags []string
-
- // for double-declaration checks
- var fset objset
-
- // current field typ and tag
- var typ Type
- var tag string
- add := func(ident *ast.Ident, embedded bool, pos token.Pos) {
- if tag != "" && tags == nil {
- tags = make([]string, len(fields))
- }
- if tags != nil {
- tags = append(tags, tag)
- }
-
- name := ident.Name
- fld := NewField(pos, check.pkg, name, typ, embedded)
- // spec: "Within a struct, non-blank field names must be unique."
- if name == "_" || check.declareInSet(&fset, pos, fld) {
- fields = append(fields, fld)
- check.recordDef(ident, fld)
- }
- }
-
- // addInvalid adds an embedded field of invalid type to the struct for
- // fields with errors; this keeps the number of struct fields in sync
- // with the source as long as the fields are _ or have different names
- // (issue #25627).
- addInvalid := func(ident *ast.Ident, pos token.Pos) {
- typ = Typ[Invalid]
- tag = ""
- add(ident, true, pos)
- }
-
- for _, f := range list.List {
- typ = check.varType(f.Type)
- tag = check.tag(f.Tag)
- if len(f.Names) > 0 {
- // named fields
- for _, name := range f.Names {
- add(name, false, name.Pos())
- }
- } else {
- // embedded field
- // spec: "An embedded type must be specified as a type name T or as a
- // pointer to a non-interface type name *T, and T itself may not be a
- // pointer type."
- pos := f.Type.Pos()
- name := embeddedFieldIdent(f.Type)
- if name == nil {
- // TODO(rFindley): using invalidAST here causes test failures (all
- // errors should have codes). Clean this up.
- check.errorf(f.Type, _Todo, "invalid AST: embedded field type %s has no name", f.Type)
- name = ast.NewIdent("_")
- name.NamePos = pos
- addInvalid(name, pos)
- continue
- }
- add(name, true, pos)
-
- // Because we have a name, typ must be of the form T or *T, where T is the name
- // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
- // We must delay this check to the end because we don't want to instantiate
- // (via under(t)) a possibly incomplete type.
-
- // for use in the closure below
- embeddedTyp := typ
- embeddedPos := f.Type
-
- check.later(func() {
- t, isPtr := deref(embeddedTyp)
- switch t := optype(t).(type) {
- case *Basic:
- if t == Typ[Invalid] {
- // error was reported before
- return
- }
- // unsafe.Pointer is treated like a regular pointer
- if t.kind == UnsafePointer {
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer")
- }
- case *Pointer:
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer")
- case *Interface:
- if isPtr {
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface")
- }
- }
- })
- }
- }
-
- styp.fields = fields
- styp.tags = tags
-}
-
-func embeddedFieldIdent(e ast.Expr) *ast.Ident {
- switch e := e.(type) {
- case *ast.Ident:
- return e
- case *ast.StarExpr:
- // *T is valid, but **T is not
- if _, ok := e.X.(*ast.StarExpr); !ok {
- return embeddedFieldIdent(e.X)
- }
- case *ast.SelectorExpr:
- return e.Sel
- case *ast.IndexExpr:
- return embeddedFieldIdent(e.X)
- }
- return nil // invalid embedded field
-}
-
-func (check *Checker) collectTypeConstraints(pos token.Pos, types []ast.Expr) []Type {
- list := make([]Type, 0, len(types)) // assume all types are correct
- for _, texpr := range types {
- if texpr == nil {
- check.invalidAST(atPos(pos), "missing type constraint")
- continue
- }
- list = append(list, check.varType(texpr))
- }
-
- // Ensure that each type is only present once in the type list. Types may be
- // interfaces, which may not be complete yet. It's ok to do this check at the
- // end because it's not a requirement for correctness of the code.
- // Note: This is a quadratic algorithm, but type lists tend to be short.
- check.later(func() {
- for i, t := range list {
- if t := asInterface(t); t != nil {
- check.completeInterface(types[i].Pos(), t)
- }
- if includes(list[:i], t) {
- check.softErrorf(types[i], _Todo, "duplicate type %s in type list", t)
- }
- }
- })
-
- return list
-}
-
-// includes reports whether typ is in list.
-func includes(list []Type, typ Type) bool {
- for _, e := range list {
- if Identical(typ, e) {
- return true
- }
- }
- return false
-}
diff --git a/src/internal/abi/abi_arm64.go b/src/internal/abi/abi_arm64.go
new file mode 100644
index 0000000000..7544d7506e
--- /dev/null
+++ b/src/internal/abi/abi_arm64.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.regabireflect
+// +build goexperiment.regabireflect
+
+package abi
+
+const (
+ // See abi_generic.go.
+
+ // R0 - R15.
+ IntArgRegs = 16
+
+ // F0 - F15.
+ FloatArgRegs = 16
+
+ EffectiveFloatRegSize = 8
+)
diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go
index 2435a79dce..e78f987999 100644
--- a/src/internal/buildcfg/exp.go
+++ b/src/internal/buildcfg/exp.go
@@ -20,16 +20,15 @@ import (
// was built with.)
var Experiment goexperiment.Flags = parseExperiments()
-var regabiSupported = GOARCH == "amd64" && (GOOS == "android" || GOOS == "linux" || GOOS == "darwin" || GOOS == "windows")
+var regabiSupported = GOARCH == "amd64" || GOARCH == "arm64"
+var regabiDeveloping = false
// experimentBaseline specifies the experiment flags that are enabled by
// default in the current toolchain. This is, in effect, the "control"
// configuration and any variation from this is an experiment.
var experimentBaseline = goexperiment.Flags{
RegabiWrappers: regabiSupported,
- RegabiG: regabiSupported,
RegabiReflect: regabiSupported,
- RegabiDefer: regabiSupported,
RegabiArgs: regabiSupported,
}
@@ -67,9 +66,7 @@ func parseExperiments() goexperiment.Flags {
// do the right thing.
names["regabi"] = func(v bool) {
flags.RegabiWrappers = v
- flags.RegabiG = v
flags.RegabiReflect = v
- flags.RegabiDefer = v
flags.RegabiArgs = v
}
@@ -98,20 +95,19 @@ func parseExperiments() goexperiment.Flags {
}
}
- // regabi is only supported on amd64.
- if GOARCH != "amd64" {
+ // regabiwrappers is always enabled on amd64.
+ if GOARCH == "amd64" {
+ flags.RegabiWrappers = true
+ }
+ // regabi is only supported on amd64 and arm64.
+ if GOARCH != "amd64" && GOARCH != "arm64" {
flags.RegabiWrappers = false
- flags.RegabiG = false
flags.RegabiReflect = false
- flags.RegabiDefer = false
flags.RegabiArgs = false
}
// Check regabi dependencies.
- if flags.RegabiG && !flags.RegabiWrappers {
- Error = fmt.Errorf("GOEXPERIMENT regabig requires regabiwrappers")
- }
- if flags.RegabiArgs && !(flags.RegabiWrappers && flags.RegabiG && flags.RegabiReflect && flags.RegabiDefer) {
- Error = fmt.Errorf("GOEXPERIMENT regabiargs requires regabiwrappers,regabig,regabireflect,regabidefer")
+ if flags.RegabiArgs && !(flags.RegabiWrappers && flags.RegabiReflect) {
+ Error = fmt.Errorf("GOEXPERIMENT regabiargs requires regabiwrappers,regabireflect")
}
return flags
}
diff --git a/src/internal/bytealg/compare_arm64.s b/src/internal/bytealg/compare_arm64.s
index 56d56f241e..5a80207258 100644
--- a/src/internal/bytealg/compare_arm64.s
+++ b/src/internal/bytealg/compare_arm64.s
@@ -5,65 +5,88 @@
#include "go_asm.h"
#include "textflag.h"
-TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56
- MOVD a_base+0(FP), R2
- MOVD a_len+8(FP), R0
- MOVD b_base+24(FP), R3
- MOVD b_len+32(FP), R1
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
+#ifdef GOEXPERIMENT_regabiargs
+ // R0 = a_base (want in R0)
+ // R1 = a_len (want in R1)
+ // R2 = a_cap (unused)
+ // R3 = b_base (want in R2)
+ // R4 = b_len (want in R3)
+ // R5 = b_cap (unused)
+ MOVD R3, R2
+ MOVD R4, R3
+#else
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+24(FP), R2
+ MOVD b_len+32(FP), R3
MOVD $ret+48(FP), R7
+#endif
B cmpbody<>(SB)
-TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40
- MOVD a_base+0(FP), R2
- MOVD a_len+8(FP), R0
- MOVD b_base+16(FP), R3
- MOVD b_len+24(FP), R1
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+#ifdef GOEXPERIMENT_regabiargs
+ // R0 = a_base
+ // R1 = a_len
+ // R2 = b_base
+ // R3 = b_len
+#else
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+16(FP), R2
+ MOVD b_len+24(FP), R3
MOVD $ret+32(FP), R7
+#endif
B cmpbody<>(SB)
// On entry:
-// R0 is the length of a
-// R1 is the length of b
-// R2 points to the start of a
-// R3 points to the start of b
+// R0 points to the start of a
+// R1 is the length of a
+// R2 points to the start of b
+// R3 is the length of b
+#ifndef GOEXPERIMENT_regabiargs
// R7 points to return value (-1/0/1 will be written here)
+#endif
//
// On exit:
+#ifdef GOEXPERIMENT_regabiargs
+// R0 is the result
+#endif
// R4, R5, R6, R8, R9 and R10 are clobbered
TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
- CMP R2, R3
+ CMP R0, R2
BEQ samebytes // same starting pointers; compare lengths
- CMP R0, R1
- CSEL LT, R1, R0, R6 // R6 is min(R0, R1)
+ CMP R1, R3
+ CSEL LT, R3, R1, R6 // R6 is min(R1, R3)
CBZ R6, samebytes
BIC $0xf, R6, R10
CBZ R10, small // length < 16
- ADD R2, R10 // end of chunk16
+ ADD R0, R10 // end of chunk16
// length >= 16
chunk16_loop:
- LDP.P 16(R2), (R4, R8)
- LDP.P 16(R3), (R5, R9)
+ LDP.P 16(R0), (R4, R8)
+ LDP.P 16(R2), (R5, R9)
CMP R4, R5
BNE cmp
CMP R8, R9
BNE cmpnext
- CMP R10, R2
+ CMP R10, R0
BNE chunk16_loop
AND $0xf, R6, R6
CBZ R6, samebytes
SUBS $8, R6
BLT tail
// the length of tail > 8 bytes
- MOVD.P 8(R2), R4
- MOVD.P 8(R3), R5
+ MOVD.P 8(R0), R4
+ MOVD.P 8(R2), R5
CMP R4, R5
BNE cmp
SUB $8, R6
// compare last 8 bytes
tail:
- MOVD (R2)(R6), R4
- MOVD (R3)(R6), R5
+ MOVD (R0)(R6), R4
+ MOVD (R2)(R6), R5
CMP R4, R5
BEQ samebytes
cmp:
@@ -71,52 +94,56 @@ cmp:
REV R5, R5
CMP R4, R5
ret:
- MOVD $1, R4
- CNEG HI, R4, R4
- MOVD R4, (R7)
+ MOVD $1, R0
+ CNEG HI, R0, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD R0, (R7)
+#endif
RET
small:
TBZ $3, R6, lt_8
- MOVD (R2), R4
- MOVD (R3), R5
+ MOVD (R0), R4
+ MOVD (R2), R5
CMP R4, R5
BNE cmp
SUBS $8, R6
BEQ samebytes
+ ADD $8, R0
ADD $8, R2
- ADD $8, R3
SUB $8, R6
B tail
lt_8:
TBZ $2, R6, lt_4
- MOVWU (R2), R4
- MOVWU (R3), R5
+ MOVWU (R0), R4
+ MOVWU (R2), R5
CMPW R4, R5
BNE cmp
SUBS $4, R6
BEQ samebytes
+ ADD $4, R0
ADD $4, R2
- ADD $4, R3
lt_4:
TBZ $1, R6, lt_2
- MOVHU (R2), R4
- MOVHU (R3), R5
+ MOVHU (R0), R4
+ MOVHU (R2), R5
CMPW R4, R5
BNE cmp
+ ADD $2, R0
ADD $2, R2
- ADD $2, R3
lt_2:
TBZ $0, R6, samebytes
one:
- MOVBU (R2), R4
- MOVBU (R3), R5
+ MOVBU (R0), R4
+ MOVBU (R2), R5
CMPW R4, R5
BNE ret
samebytes:
- CMP R1, R0
- CSET NE, R4
- CNEG LO, R4, R4
- MOVD R4, (R7)
+ CMP R3, R1
+ CSET NE, R0
+ CNEG LO, R0, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD R0, (R7)
+#endif
RET
cmpnext:
REV R8, R4
diff --git a/src/internal/bytealg/equal_arm64.s b/src/internal/bytealg/equal_arm64.s
index 01aa7b7b7a..cf5cf54e59 100644
--- a/src/internal/bytealg/equal_arm64.s
+++ b/src/internal/bytealg/equal_arm64.s
@@ -6,58 +6,70 @@
#include "textflag.h"
// memequal(a, b unsafe.Pointer, size uintptr) bool
-TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
- MOVD size+16(FP), R1
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD size+16(FP), R2
+#endif
// short path to handle 0-byte case
- CBZ R1, equal
+ CBZ R2, equal
+#ifndef GOEXPERIMENT_regabiargs
MOVD a+0(FP), R0
- MOVD b+8(FP), R2
+ MOVD b+8(FP), R1
MOVD $ret+24(FP), R8
+#endif
B memeqbody<>(SB)
equal:
MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
MOVB R0, ret+24(FP)
+#endif
RET
// memequal_varlen(a, b unsafe.Pointer) bool
-TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
- MOVD a+0(FP), R3
- MOVD b+8(FP), R4
- CMP R3, R4
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD a+0(FP), R0
+ MOVD b+8(FP), R1
+#endif
+ CMP R0, R1
BEQ eq
- MOVD 8(R26), R5 // compiler stores size at offset 8 in the closure
- CBZ R5, eq
- MOVD R3, 8(RSP)
- MOVD R4, 16(RSP)
- MOVD R5, 24(RSP)
- BL runtime·memequal(SB)
- MOVBU 32(RSP), R3
- MOVB R3, ret+16(FP)
- RET
+ MOVD 8(R26), R2 // compiler stores size at offset 8 in the closure
+ CBZ R2, eq
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD $ret+16(FP), R8
+#endif
+ B memeqbody<>(SB)
eq:
- MOVD $1, R3
- MOVB R3, ret+16(FP)
+ MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVB R0, ret+16(FP)
+#endif
RET
// input:
// R0: pointer a
-// R1: data len
-// R2: pointer b
+// R1: pointer b
+// R2: data len
+#ifdef GOEXPERIMENT_regabiargs
+// at return: result in R0
+#else
// R8: address to put result
+#endif
+
TEXT memeqbody<>(SB),NOSPLIT,$0
- CMP $1, R1
+ CMP $1, R2
// handle 1-byte special case for better performance
BEQ one
- CMP $16, R1
+ CMP $16, R2
// handle specially if length < 16
BLO tail
- BIC $0x3f, R1, R3
+ BIC $0x3f, R2, R3
CBZ R3, chunk16
// work with 64-byte chunks
ADD R3, R0, R6 // end of chunks
chunk64_loop:
VLD1.P (R0), [V0.D2, V1.D2, V2.D2, V3.D2]
- VLD1.P (R2), [V4.D2, V5.D2, V6.D2, V7.D2]
+ VLD1.P (R1), [V4.D2, V5.D2, V6.D2, V7.D2]
VCMEQ V0.D2, V4.D2, V8.D2
VCMEQ V1.D2, V5.D2, V9.D2
VCMEQ V2.D2, V6.D2, V10.D2
@@ -71,66 +83,72 @@ chunk64_loop:
CBZ R4, not_equal
CBZ R5, not_equal
BNE chunk64_loop
- AND $0x3f, R1, R1
- CBZ R1, equal
+ AND $0x3f, R2, R2
+ CBZ R2, equal
chunk16:
// work with 16-byte chunks
- BIC $0xf, R1, R3
+ BIC $0xf, R2, R3
CBZ R3, tail
ADD R3, R0, R6 // end of chunks
chunk16_loop:
LDP.P 16(R0), (R4, R5)
- LDP.P 16(R2), (R7, R9)
+ LDP.P 16(R1), (R7, R9)
EOR R4, R7
CBNZ R7, not_equal
EOR R5, R9
CBNZ R9, not_equal
CMP R0, R6
BNE chunk16_loop
- AND $0xf, R1, R1
- CBZ R1, equal
+ AND $0xf, R2, R2
+ CBZ R2, equal
tail:
// special compare of tail with length < 16
- TBZ $3, R1, lt_8
+ TBZ $3, R2, lt_8
MOVD (R0), R4
- MOVD (R2), R5
+ MOVD (R1), R5
EOR R4, R5
CBNZ R5, not_equal
- SUB $8, R1, R6 // offset of the last 8 bytes
+ SUB $8, R2, R6 // offset of the last 8 bytes
MOVD (R0)(R6), R4
- MOVD (R2)(R6), R5
+ MOVD (R1)(R6), R5
EOR R4, R5
CBNZ R5, not_equal
B equal
lt_8:
- TBZ $2, R1, lt_4
+ TBZ $2, R2, lt_4
MOVWU (R0), R4
- MOVWU (R2), R5
+ MOVWU (R1), R5
EOR R4, R5
CBNZ R5, not_equal
- SUB $4, R1, R6 // offset of the last 4 bytes
+ SUB $4, R2, R6 // offset of the last 4 bytes
MOVWU (R0)(R6), R4
- MOVWU (R2)(R6), R5
+ MOVWU (R1)(R6), R5
EOR R4, R5
CBNZ R5, not_equal
B equal
lt_4:
- TBZ $1, R1, lt_2
+ TBZ $1, R2, lt_2
MOVHU.P 2(R0), R4
- MOVHU.P 2(R2), R5
+ MOVHU.P 2(R1), R5
CMP R4, R5
BNE not_equal
lt_2:
- TBZ $0, R1, equal
+ TBZ $0, R2, equal
one:
MOVBU (R0), R4
- MOVBU (R2), R5
+ MOVBU (R1), R5
CMP R4, R5
BNE not_equal
equal:
MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
MOVB R0, (R8)
+#endif
RET
not_equal:
+#ifdef GOEXPERIMENT_regabiargs
+ MOVB ZR, R0
+#else
MOVB ZR, (R8)
+#endif
RET
diff --git a/src/internal/goexperiment/exp_regabi_off.go b/src/internal/goexperiment/exp_regabi_off.go
deleted file mode 100644
index 5d8823843d..0000000000
--- a/src/internal/goexperiment/exp_regabi_off.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabi
-// +build !goexperiment.regabi
-
-package goexperiment
-
-const Regabi = false
-const RegabiInt = 0
diff --git a/src/internal/goexperiment/exp_regabi_on.go b/src/internal/goexperiment/exp_regabi_on.go
deleted file mode 100644
index c08d58e9b2..0000000000
--- a/src/internal/goexperiment/exp_regabi_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabi
-// +build goexperiment.regabi
-
-package goexperiment
-
-const Regabi = true
-const RegabiInt = 1
diff --git a/src/internal/goexperiment/exp_regabidefer_off.go b/src/internal/goexperiment/exp_regabidefer_off.go
deleted file mode 100644
index b47c0c2cf5..0000000000
--- a/src/internal/goexperiment/exp_regabidefer_off.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabidefer
-// +build !goexperiment.regabidefer
-
-package goexperiment
-
-const RegabiDefer = false
-const RegabiDeferInt = 0
diff --git a/src/internal/goexperiment/exp_regabidefer_on.go b/src/internal/goexperiment/exp_regabidefer_on.go
deleted file mode 100644
index bbf2f6c69b..0000000000
--- a/src/internal/goexperiment/exp_regabidefer_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabidefer
-// +build goexperiment.regabidefer
-
-package goexperiment
-
-const RegabiDefer = true
-const RegabiDeferInt = 1
diff --git a/src/internal/goexperiment/exp_regabig_off.go b/src/internal/goexperiment/exp_regabig_off.go
deleted file mode 100644
index 1b37d45186..0000000000
--- a/src/internal/goexperiment/exp_regabig_off.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabig
-// +build !goexperiment.regabig
-
-package goexperiment
-
-const RegabiG = false
-const RegabiGInt = 0
diff --git a/src/internal/goexperiment/exp_regabig_on.go b/src/internal/goexperiment/exp_regabig_on.go
deleted file mode 100644
index 7e5b162e0b..0000000000
--- a/src/internal/goexperiment/exp_regabig_on.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabig
-// +build goexperiment.regabig
-
-package goexperiment
-
-const RegabiG = true
-const RegabiGInt = 1
diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go
index cd4c178818..71e38cd047 100644
--- a/src/internal/goexperiment/flags.go
+++ b/src/internal/goexperiment/flags.go
@@ -68,19 +68,11 @@ type Flags struct {
// ABI0 and ABIInternal functions. Without this, the ABIs are
// assumed to be identical so cross-ABI calls are direct.
RegabiWrappers bool
- // RegabiG enables dedicated G and zero registers in
- // ABIInternal.
- //
- // Requires wrappers because it makes the ABIs incompatible.
- RegabiG bool
// RegabiReflect enables the register-passing paths in
// reflection calls. This is also gated by intArgRegs in
// reflect and runtime (which are disabled by default) so it
// can be used in targeted tests.
RegabiReflect bool
- // RegabiDefer enables desugaring defer and go calls
- // into argument-less closures.
- RegabiDefer bool
// RegabiArgs enables register arguments/results in all
// compiled Go functions.
//
diff --git a/src/internal/goversion/goversion.go b/src/internal/goversion/goversion.go
index 4cc15688c0..8fcea100dc 100644
--- a/src/internal/goversion/goversion.go
+++ b/src/internal/goversion/goversion.go
@@ -9,4 +9,4 @@ package goversion
//
// It should be updated at the start of each development cycle to be
// the version of the next Go 1.x release. See golang.org/issue/40705.
-const Version = 17
+const Version = 18
diff --git a/src/reflect/abi_test.go b/src/reflect/abi_test.go
index 5a0130f7b4..2b247d1d79 100644
--- a/src/reflect/abi_test.go
+++ b/src/reflect/abi_test.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.regabireflect
-// +build goexperiment.regabireflect
+//go:build goexperiment.regabireflect && goexperiment.regabiargs
+// +build goexperiment.regabireflect,goexperiment.regabiargs
package reflect_test
diff --git a/src/reflect/asm_amd64.s b/src/reflect/asm_amd64.s
index facf07516d..d21d498063 100644
--- a/src/reflect/asm_amd64.s
+++ b/src/reflect/asm_amd64.s
@@ -24,15 +24,13 @@
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-// makeFuncStub must be ABIInternal because it is placed directly
-// in function values.
// This frame contains two locals. See the comment above LOCAL_RETVALID.
-TEXT ·makeFuncStub<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$312
NO_LOCAL_POINTERS
// NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
// frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·spillArgs<ABIInternal>(SB)
+ CALL runtime·spillArgs(SB)
MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
MOVQ DX, 0(SP)
MOVQ R12, 8(SP)
@@ -48,22 +46,20 @@ TEXT ·makeFuncStub<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
MOVQ AX, 24(SP)
CALL ·callReflect(SB)
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·unspillArgs<ABIInternal>(SB)
+ CALL runtime·unspillArgs(SB)
RET
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-// methodValueCall must be ABIInternal because it is placed directly
-// in function values.
// This frame contains two locals. See the comment above LOCAL_RETVALID.
-TEXT ·methodValueCall<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$312
NO_LOCAL_POINTERS
// NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
// frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·spillArgs<ABIInternal>(SB)
+ CALL runtime·spillArgs(SB)
MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
MOVQ DX, 0(SP)
MOVQ R12, 8(SP)
@@ -79,5 +75,5 @@ TEXT ·methodValueCall<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
MOVQ AX, 24(SP)
CALL ·callMethod(SB)
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·unspillArgs<ABIInternal>(SB)
+ CALL runtime·unspillArgs(SB)
RET
diff --git a/src/reflect/asm_arm64.s b/src/reflect/asm_arm64.s
index 5fe88e27e4..5b9b3573fa 100644
--- a/src/reflect/asm_arm64.s
+++ b/src/reflect/asm_arm64.s
@@ -5,34 +5,75 @@
#include "textflag.h"
#include "funcdata.h"
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+#define LOCAL_RETVALID 40
+#define LOCAL_REGARGS 48
+
+// The frame size of the functions below is
+// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432.
+
// makeFuncStub is the code half of the function returned by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No arg size here, runtime pulls arg map out of the func value.
-TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432
NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, 8(RSP)
+ MOVD R20, 16(RSP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
MOVD R3, 16(RSP)
- MOVB $0, 40(RSP)
- ADD $40, RSP, R3
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
MOVD R3, 24(RSP)
- MOVD $0, 32(RSP)
- BL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
RET
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432
NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, 8(RSP)
+ MOVD R20, 16(RSP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
MOVD R3, 16(RSP)
- MOVB $0, 40(RSP)
- ADD $40, RSP, R3
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
MOVD R3, 24(RSP)
- MOVD $0, 32(RSP)
- BL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
RET
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
index d53e68a359..588be8bcc1 100644
--- a/src/reflect/makefunc.go
+++ b/src/reflect/makefunc.go
@@ -52,11 +52,7 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
t := typ.common()
ftyp := (*funcType)(unsafe.Pointer(t))
- // Indirect Go func value (dummy) to obtain
- // actual code address. (A Go func value is a pointer
- // to a C function pointer. https://golang.org/s/go11func.)
- dummy := makeFuncStub
- code := **(**uintptr)(unsafe.Pointer(&dummy))
+ code := abi.FuncPCABI0(makeFuncStub)
// makeFuncImpl contains a stack map for use by the runtime
_, _, abi := funcLayout(ftyp, nil)
@@ -111,11 +107,7 @@ func makeMethodValue(op string, v Value) Value {
// v.Type returns the actual type of the method value.
ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
- // Indirect Go func value (dummy) to obtain
- // actual code address. (A Go func value is a pointer
- // to a C function pointer. https://golang.org/s/go11func.)
- dummy := methodValueCall
- code := **(**uintptr)(unsafe.Pointer(&dummy))
+ code := abi.FuncPCABI0(methodValueCall)
// methodValue contains a stack map for use by the runtime
_, _, abi := funcLayout(ftyp, nil)
diff --git a/src/runtime/asm.s b/src/runtime/asm.s
index 72c744925d..0e14fcd3e6 100644
--- a/src/runtime/asm.s
+++ b/src/runtime/asm.s
@@ -13,6 +13,6 @@ DATA runtime·no_pointers_stackmap+0x04(SB)/4, $0
GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
#ifndef GOARCH_amd64
-TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·sigpanic0(SB),NOSPLIT,$0-0
JMP ·sigpanic<ABIInternal>(SB)
#endif
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index ec5ea58028..dd2ea458cc 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -244,10 +244,8 @@ ok:
// create a new goroutine to start program
PUSHL $runtime·mainPC(SB) // entry
- PUSHL $0 // arg size
CALL runtime·newproc(SB)
POPL AX
- POPL AX
// start this M
CALL runtime·mstart(SB)
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 14f29e1964..50ffa30ac5 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -214,10 +214,8 @@ ok:
// create a new goroutine to start program
MOVQ $runtime·mainPC(SB), AX // entry
PUSHQ AX
- PUSHQ $0 // arg size
CALL runtime·newproc(SB)
POPQ AX
- POPQ AX
// start this M
CALL runtime·mstart(SB)
@@ -469,7 +467,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
#ifdef GOEXPERIMENT_regabireflect
// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
-TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
MOVQ AX, 0(R12)
MOVQ BX, 8(R12)
MOVQ CX, 16(R12)
@@ -497,7 +495,7 @@ TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
RET
// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
-TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
MOVQ 0(R12), AX
MOVQ 8(R12), BX
MOVQ 16(R12), CX
@@ -525,11 +523,11 @@ TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
RET
#else
// spillArgs stores return values from registers to a pointer in R12.
-TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
RET
// unspillArgs loads args into registers from a pointer in R12.
-TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
RET
#endif
@@ -588,7 +586,7 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
REP;MOVSB; \
/* set up argument registers */ \
MOVQ regArgs+40(FP), R12; \
- CALL ·unspillArgs<ABIInternal>(SB); \
+ CALL ·unspillArgs(SB); \
/* call function */ \
MOVQ f+8(FP), DX; \
PCDATA $PCDATA_StackMapIndex, $0; \
@@ -596,7 +594,7 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
CALL R12; \
/* copy register return values back */ \
MOVQ regArgs+40(FP), R12; \
- CALL ·spillArgs<ABIInternal>(SB); \
+ CALL ·spillArgs(SB); \
MOVLQZX stackArgsSize+24(FP), CX; \
MOVLQZX stackRetOffset+28(FP), BX; \
MOVQ stackArgs+16(FP), DI; \
@@ -685,10 +683,6 @@ TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
// or else unwinding from systemstack_switch is incorrect.
// Smashes R9.
TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R14)
- MOVQ g(R14), R14
-#endif
MOVQ $runtime·systemstack_switch(SB), R9
MOVQ R9, (g_sched+gobuf_pc)(R14)
LEAQ 8(SP), R9
@@ -1286,10 +1280,8 @@ aes65to128:
PXOR X10, X8
PXOR X11, X9
PXOR X9, X8
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
#ifdef GOEXPERIMENT_regabiargs
MOVQ X8, AX // return X8
#else
@@ -1410,10 +1402,8 @@ aesloop:
PXOR X10, X8
PXOR X11, X9
PXOR X9, X8
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
#ifdef GOEXPERIMENT_regabiargs
MOVQ X8, AX // return X8
#else
@@ -1596,10 +1586,10 @@ TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
// This function is injected from the signal handler for panicking
// signals. It is quite painful to set X15 in the signal context,
// so we do it here.
-TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
-#ifdef GOEXPERIMENT_regabig
+TEXT ·sigpanic0(SB),NOSPLIT,$0-0
get_tls(R14)
MOVQ g(R14), R14
+#ifndef GOOS_plan9
XORPS X15, X15
#endif
JMP ·sigpanic<ABIInternal>(SB)
@@ -1619,13 +1609,7 @@ TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$112
MOVQ R13, 104(SP)
// TODO: Consider passing g.m.p in as an argument so they can be shared
// across a sequence of write barriers.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), R13
-#else
- get_tls(R13)
- MOVQ g(R13), R13
- MOVQ g_m(R13), R13
-#endif
MOVQ m_p(R13), R13
MOVQ (p_wbBuf+wbBuf_next)(R13), R12
// Increment wbBuf.next position.
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index 6d3573d68f..5c2bc00fe8 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -168,14 +168,13 @@ TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0
BL runtime·schedinit(SB)
// create a new goroutine to start program
+ SUB $8, R13
MOVW $runtime·mainPC(SB), R0
- MOVW.W R0, -4(R13)
- MOVW $8, R0
- MOVW.W R0, -4(R13)
+ MOVW R0, 4(R13) // arg 1: fn
MOVW $0, R0
- MOVW.W R0, -4(R13) // push $0 as guard
+ MOVW R0, 0(R13) // dummy LR
BL runtime·newproc(SB)
- MOVW $12(R13), R13 // pop args and LR
+ ADD $8, R13 // pop args and LR
// start this M
BL runtime·mstart(SB)
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index 2d495397a8..e7c5fa3225 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -87,14 +87,11 @@ nocgo:
// create a new goroutine to start program
MOVD $runtime·mainPC(SB), R0 // entry
- MOVD RSP, R7
- MOVD.W $0, -8(R7)
- MOVD.W R0, -8(R7)
- MOVD.W $0, -8(R7)
- MOVD.W $0, -8(R7)
- MOVD R7, RSP
+ SUB $16, RSP
+ MOVD R0, 8(RSP) // arg
+ MOVD $0, 0(RSP) // dummy LR
BL runtime·newproc(SB)
- ADD $32, RSP
+ ADD $16, RSP
// start this M
BL runtime·mstart(SB)
@@ -103,7 +100,7 @@ nocgo:
MOVD R0, (R0) // boom
UNDEF
-DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
+DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
GLOBL runtime·mainPC(SB),RODATA,$8
TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
@@ -152,7 +149,13 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
-TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
+TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R26 // context
+#else
+ MOVD fn+0(FP), R26 // context
+#endif
+
// Save caller state in g->sched
MOVD RSP, R0
MOVD R0, (g_sched+gobuf_sp)(g)
@@ -168,14 +171,18 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
CMP g, R3
BNE 2(PC)
B runtime·badmcall(SB)
- MOVD fn+0(FP), R26 // context
- MOVD 0(R26), R4 // code pointer
+
MOVD (g_sched+gobuf_sp)(g), R0
MOVD R0, RSP // sp = m->g0->sched.sp
MOVD (g_sched+gobuf_bp)(g), R29
- MOVD R3, -8(RSP)
- MOVD $0, -16(RSP)
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R3, R0 // arg = g
+#else
+ MOVD R3, -8(RSP) // arg = g
+#endif
+ MOVD $0, -16(RSP) // dummy LR
SUB $16, RSP
+ MOVD 0(R26), R4 // code pointer
BL (R4)
B runtime·badmcall2(SB)
@@ -310,6 +317,86 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
MOVW $0, R26
B runtime·morestack(SB)
+#ifdef GOEXPERIMENT_regabireflect
+// spillArgs stores return values from registers to a *internal/abi.RegArgs in R20.
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+ MOVD R0, (0*8)(R20)
+ MOVD R1, (1*8)(R20)
+ MOVD R2, (2*8)(R20)
+ MOVD R3, (3*8)(R20)
+ MOVD R4, (4*8)(R20)
+ MOVD R5, (5*8)(R20)
+ MOVD R6, (6*8)(R20)
+ MOVD R7, (7*8)(R20)
+ MOVD R8, (8*8)(R20)
+ MOVD R9, (9*8)(R20)
+ MOVD R10, (10*8)(R20)
+ MOVD R11, (11*8)(R20)
+ MOVD R12, (12*8)(R20)
+ MOVD R13, (13*8)(R20)
+ MOVD R14, (14*8)(R20)
+ MOVD R15, (15*8)(R20)
+ FMOVD F0, (16*8)(R20)
+ FMOVD F1, (17*8)(R20)
+ FMOVD F2, (18*8)(R20)
+ FMOVD F3, (19*8)(R20)
+ FMOVD F4, (20*8)(R20)
+ FMOVD F5, (21*8)(R20)
+ FMOVD F6, (22*8)(R20)
+ FMOVD F7, (23*8)(R20)
+ FMOVD F8, (24*8)(R20)
+ FMOVD F9, (25*8)(R20)
+ FMOVD F10, (26*8)(R20)
+ FMOVD F11, (27*8)(R20)
+ FMOVD F12, (28*8)(R20)
+ FMOVD F13, (29*8)(R20)
+ FMOVD F14, (30*8)(R20)
+ FMOVD F15, (31*8)(R20)
+ RET
+
+// unspillArgs loads args into registers from a *internal/abi.RegArgs in R20.
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+ MOVD (0*8)(R20), R0
+ MOVD (1*8)(R20), R1
+ MOVD (2*8)(R20), R2
+ MOVD (3*8)(R20), R3
+ MOVD (4*8)(R20), R4
+ MOVD (5*8)(R20), R5
+ MOVD (6*8)(R20), R6
+ MOVD (7*8)(R20), R7
+ MOVD (8*8)(R20), R8
+ MOVD (9*8)(R20), R9
+ MOVD (10*8)(R20), R10
+ MOVD (11*8)(R20), R11
+ MOVD (12*8)(R20), R12
+ MOVD (13*8)(R20), R13
+ MOVD (14*8)(R20), R14
+ MOVD (15*8)(R20), R15
+ FMOVD (16*8)(R20), F0
+ FMOVD (17*8)(R20), F1
+ FMOVD (18*8)(R20), F2
+ FMOVD (19*8)(R20), F3
+ FMOVD (20*8)(R20), F4
+ FMOVD (21*8)(R20), F5
+ FMOVD (22*8)(R20), F6
+ FMOVD (23*8)(R20), F7
+ FMOVD (24*8)(R20), F8
+ FMOVD (25*8)(R20), F9
+ FMOVD (26*8)(R20), F10
+ FMOVD (27*8)(R20), F11
+ FMOVD (28*8)(R20), F12
+ FMOVD (29*8)(R20), F13
+ FMOVD (30*8)(R20), F14
+ FMOVD (31*8)(R20), F15
+ RET
+#else
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+ RET
+
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+ RET
+#endif
+
// reflectcall: call a function with the given argument list
// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
@@ -381,12 +468,17 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
MOVBU.P R7, 1(R5); \
CMP R5, R6; \
BNE -3(PC); \
+ /* set up argument registers */ \
+ MOVD regArgs+40(FP), R20; \
+ CALL ·unspillArgs(SB); \
/* call function */ \
MOVD f+8(FP), R26; \
- MOVD (R26), R0; \
- PCDATA $PCDATA_StackMapIndex, $0; \
- BL (R0); \
+ MOVD (R26), R20; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ BL (R20); \
/* copy return values back */ \
+ MOVD regArgs+40(FP), R20; \
+ CALL ·spillArgs(SB); \
MOVD stackArgsType+0(FP), R7; \
MOVD stackArgs+16(FP), R3; \
MOVWU stackArgsSize+24(FP), R4; \
@@ -403,11 +495,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $48-0
+ NO_LOCAL_POINTERS
MOVD R7, 8(RSP)
MOVD R3, 16(RSP)
MOVD R5, 24(RSP)
MOVD R4, 32(RSP)
- MOVD $0, 40(RSP)
+ MOVD R20, 40(RSP)
BL runtime·reflectcallmove(SB)
RET
@@ -440,12 +533,14 @@ CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
// func memhash32(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
MOVD h+8(FP), R1
MOVD $ret+16(FP), R2
+#endif
MOVD $runtime·aeskeysched+0(SB), R3
VEOR V0.B16, V0.B16, V0.B16
@@ -459,18 +554,24 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
AESMC V0.B16, V0.B16
AESE V2.B16, V0.B16
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
VST1 [V0.D1], (R2)
+#endif
RET
noaes:
- B runtime·memhash32Fallback(SB)
+ B runtime·memhash32Fallback<ABIInternal>(SB)
// func memhash64(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
MOVD h+8(FP), R1
MOVD $ret+16(FP), R2
+#endif
MOVD $runtime·aeskeysched+0(SB), R3
VEOR V0.B16, V0.B16, V0.B16
@@ -484,75 +585,89 @@ TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
AESMC V0.B16, V0.B16
AESE V2.B16, V0.B16
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
VST1 [V0.D1], (R2)
+#endif
RET
noaes:
- B runtime·memhash64Fallback(SB)
+ B runtime·memhash64Fallback<ABIInternal>(SB)
// func memhash(p unsafe.Pointer, h, size uintptr) uintptr
-TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
- MOVD s+16(FP), R1
- MOVD h+8(FP), R3
- MOVD $ret+24(FP), R2
+ MOVD h+8(FP), R1
+ MOVD s+16(FP), R2
+ MOVD $ret+24(FP), R8
+#endif
B aeshashbody<>(SB)
noaes:
- B runtime·memhashFallback(SB)
+ B runtime·memhashFallback<ABIInternal>(SB)
// func strhash(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
- MOVD p+0(FP), R10 // string pointer
- LDP (R10), (R0, R1) //string data/ length
- MOVD h+8(FP), R3
- MOVD $ret+16(FP), R2 // return adddress
+TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifdef GOEXPERIMENT_regabiargs
+ LDP (R0), (R0, R2) // string data / length
+#else
+ MOVD p+0(FP), R10 // string pointer
+ LDP (R10), (R0, R2) // string data / length
+ MOVD h+8(FP), R1
+ MOVD $ret+16(FP), R8 // return adddress
+#endif
B aeshashbody<>(SB)
noaes:
- B runtime·strhashFallback(SB)
+ B runtime·strhashFallback<ABIInternal>(SB)
// R0: data
-// R1: length
-// R2: address to put return value
-// R3: seed data
+// R1: seed data
+// R2: length
+#ifdef GOEXPERIMENT_regabiargs
+// At return, R0 = return value
+#else
+// R8: address to put return value
+#endif
TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0
VEOR V30.B16, V30.B16, V30.B16
- VMOV R3, V30.D[0]
- VMOV R1, V30.D[1] // load length into seed
+ VMOV R1, V30.D[0]
+ VMOV R2, V30.D[1] // load length into seed
MOVD $runtime·aeskeysched+0(SB), R4
VLD1.P 16(R4), [V0.B16]
AESE V30.B16, V0.B16
AESMC V0.B16, V0.B16
- CMP $16, R1
+ CMP $16, R2
BLO aes0to15
BEQ aes16
- CMP $32, R1
+ CMP $32, R2
BLS aes17to32
- CMP $64, R1
+ CMP $64, R2
BLS aes33to64
- CMP $128, R1
+ CMP $128, R2
BLS aes65to128
B aes129plus
aes0to15:
- CBZ R1, aes0
+ CBZ R2, aes0
VEOR V2.B16, V2.B16, V2.B16
- TBZ $3, R1, less_than_8
+ TBZ $3, R2, less_than_8
VLD1.P 8(R0), V2.D[0]
less_than_8:
- TBZ $2, R1, less_than_4
+ TBZ $2, R2, less_than_4
VLD1.P 4(R0), V2.S[2]
less_than_4:
- TBZ $1, R1, less_than_2
+ TBZ $1, R2, less_than_2
VLD1.P 2(R0), V2.H[6]
less_than_2:
- TBZ $0, R1, done
+ TBZ $0, R2, done
VLD1 (R0), V2.B[14]
done:
AESE V0.B16, V2.B16
@@ -561,11 +676,21 @@ done:
AESMC V2.B16, V2.B16
AESE V0.B16, V2.B16
- VST1 [V2.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V2.D[0], R0
+#else
+ VST1 [V2.D1], (R8)
+#endif
RET
+
aes0:
- VST1 [V0.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
+ VST1 [V0.D1], (R8)
+#endif
RET
+
aes16:
VLD1 (R0), [V2.B16]
B done
@@ -575,7 +700,7 @@ aes17to32:
VLD1 (R4), [V1.B16]
AESE V30.B16, V1.B16
AESMC V1.B16, V1.B16
- SUB $16, R1, R10
+ SUB $16, R2, R10
VLD1.P (R0)(R10), [V2.B16]
VLD1 (R0), [V3.B16]
@@ -593,7 +718,11 @@ aes17to32:
AESE V1.B16, V3.B16
VEOR V3.B16, V2.B16, V2.B16
- VST1 [V2.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V2.D[0], R0
+#else
+ VST1 [V2.D1], (R8)
+#endif
RET
aes33to64:
@@ -604,7 +733,7 @@ aes33to64:
AESMC V2.B16, V2.B16
AESE V30.B16, V3.B16
AESMC V3.B16, V3.B16
- SUB $32, R1, R10
+ SUB $32, R2, R10
VLD1.P (R0)(R10), [V4.B16, V5.B16]
VLD1 (R0), [V6.B16, V7.B16]
@@ -636,7 +765,11 @@ aes33to64:
VEOR V7.B16, V5.B16, V5.B16
VEOR V5.B16, V4.B16, V4.B16
- VST1 [V4.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V4.D[0], R0
+#else
+ VST1 [V4.D1], (R8)
+#endif
RET
aes65to128:
@@ -657,7 +790,7 @@ aes65to128:
AESE V30.B16, V7.B16
AESMC V7.B16, V7.B16
- SUB $64, R1, R10
+ SUB $64, R2, R10
VLD1.P (R0)(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
VLD1 (R0), [V12.B16, V13.B16, V14.B16, V15.B16]
AESE V0.B16, V8.B16
@@ -711,7 +844,11 @@ aes65to128:
VEOR V11.B16, V9.B16, V9.B16
VEOR V9.B16, V8.B16, V8.B16
- VST1 [V8.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V8.D[0], R0
+#else
+ VST1 [V8.D1], (R8)
+#endif
RET
aes129plus:
@@ -732,12 +869,12 @@ aes129plus:
AESMC V6.B16, V6.B16
AESE V30.B16, V7.B16
AESMC V7.B16, V7.B16
- ADD R0, R1, R10
+ ADD R0, R2, R10
SUB $128, R10, R10
VLD1.P 64(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
VLD1 (R10), [V12.B16, V13.B16, V14.B16, V15.B16]
- SUB $1, R1, R1
- LSR $7, R1, R1
+ SUB $1, R2, R2
+ LSR $7, R2, R2
aesloop:
AESE V8.B16, V0.B16
@@ -776,8 +913,8 @@ aesloop:
AESMC V6.B16, V6.B16
AESE V15.B16, V7.B16
AESMC V7.B16, V7.B16
- SUB $1, R1, R1
- CBNZ R1, aesloop
+ SUB $1, R2, R2
+ CBNZ R2, aesloop
AESE V8.B16, V0.B16
AESMC V0.B16, V0.B16
@@ -830,7 +967,11 @@ aesloop:
VEOR V4.B16, V6.B16, V4.B16
VEOR V4.B16, V0.B16, V0.B16
- VST1 [V0.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
+ VST1 [V0.D1], (R8)
+#endif
RET
TEXT runtime·procyield(SB),NOSPLIT,$0-0
@@ -1052,7 +1193,8 @@ havem:
MOVD R1, 8(RSP)
MOVD R2, 16(RSP)
MOVD R3, 24(RSP)
- BL runtime·cgocallbackg(SB)
+ MOVD $runtime·cgocallbackg(SB), R0
+ CALL (R0) // indirect call to bypass nosplit check. We're on a different stack now.
// Restore g->sched (== m->curg->sched) from saved values.
MOVD 0(RSP), R5
@@ -1158,7 +1300,10 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1
// It does not clobber any general-purpose registers,
// but may clobber others (e.g., floating point registers)
// The act of CALLing gcWriteBarrier will clobber R30 (LR).
-TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$200
+//
+// Defined as ABIInternal since the compiler generates ABIInternal
+// calls to it directly and it does not use the stack-based Go ABI.
+TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$200
// Save the registers clobbered by the fast path.
MOVD R0, 184(RSP)
MOVD R1, 192(RSP)
@@ -1250,71 +1395,129 @@ flush:
// in the caller's stack frame. These stubs write the args into that stack space and
// then tail call to the corresponding runtime handler.
// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
+//
+// Defined as ABIInternal since the compiler generates ABIInternal
+// calls to it directly and it does not use the stack-based Go ABI.
+TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicIndex<ABIInternal>(SB)
+TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicIndexU<ABIInternal>(SB)
+TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
+TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
+TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
+TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
+TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSliceB<ABIInternal>(SB)
+TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSliceBU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
+TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
+TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
+TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3B<ABIInternal>(SB)
+TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
+TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSlice3C<ABIInternal>(SB)
+TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
+TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSliceConvert(SB)
+#endif
+ JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index d4d2280105..f3ac453d99 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -63,12 +63,11 @@ nocgo:
// create a new goroutine to start program
MOVV $runtime·mainPC(SB), R1 // entry
- ADDV $-24, R29
- MOVV R1, 16(R29)
- MOVV R0, 8(R29)
+ ADDV $-16, R29
+ MOVV R1, 8(R29)
MOVV R0, 0(R29)
JAL runtime·newproc(SB)
- ADDV $24, R29
+ ADDV $16, R29
// start this M
JAL runtime·mstart(SB)
diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s
index ea7edf20cf..4dc165849e 100644
--- a/src/runtime/asm_mipsx.s
+++ b/src/runtime/asm_mipsx.s
@@ -64,12 +64,11 @@ nocgo:
// create a new goroutine to start program
MOVW $runtime·mainPC(SB), R1 // entry
- ADDU $-12, R29
- MOVW R1, 8(R29)
- MOVW R0, 4(R29)
+ ADDU $-8, R29
+ MOVW R1, 4(R29)
MOVW R0, 0(R29)
JAL runtime·newproc(SB)
- ADDU $12, R29
+ ADDU $8, R29
// start this M
JAL runtime·mstart(SB)
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index 942cc14f17..a789d041e4 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -94,9 +94,8 @@ nocgo:
MOVDU R0, -8(R1)
MOVDU R0, -8(R1)
MOVDU R0, -8(R1)
- MOVDU R0, -8(R1)
BL runtime·newproc(SB)
- ADD $(16+FIXED_FRAME), R1
+ ADD $(8+FIXED_FRAME), R1
// start this M
BL runtime·mstart(SB)
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index ef7af4e10d..69ab88f1d2 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -57,12 +57,11 @@ nocgo:
// create a new goroutine to start program
MOV $runtime·mainPC(SB), T0 // entry
- ADD $-24, X2
- MOV T0, 16(X2)
- MOV ZERO, 8(X2)
+ ADD $-16, X2
+ MOV T0, 8(X2)
MOV ZERO, 0(X2)
CALL runtime·newproc(SB)
- ADD $24, X2
+ ADD $16, X2
// start this M
CALL runtime·mstart(SB)
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index fb38271630..534cb6112c 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -147,12 +147,11 @@ nocgo:
// create a new goroutine to start program
MOVD $runtime·mainPC(SB), R2 // entry
- SUB $24, R15
- MOVD R2, 16(R15)
- MOVD $0, 8(R15)
+ SUB $16, R15
+ MOVD R2, 8(R15)
MOVD $0, 0(R15)
BL runtime·newproc(SB)
- ADD $24, R15
+ ADD $16, R15
// start this M
BL runtime·mstart(SB)
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index 33c335ba5a..53c271aa70 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -18,8 +18,7 @@ TEXT runtime·rt0_go(SB), NOSPLIT|NOFRAME|TOPFRAME, $0
CALLNORESUME runtime·args(SB)
CALLNORESUME runtime·osinit(SB)
CALLNORESUME runtime·schedinit(SB)
- MOVD $0, 0(SP)
- MOVD $runtime·mainPC(SB), 8(SP)
+ MOVD $runtime·mainPC(SB), 0(SP)
CALLNORESUME runtime·newproc(SB)
CALL runtime·mstart(SB) // WebAssembly stack will unwind when switching to another goroutine
UNDEF
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index f2a75b30f4..3cdb5dce11 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -18,6 +18,7 @@ package runtime
// c.qcount < c.dataqsiz implies that c.sendq is empty.
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/math"
"unsafe"
@@ -169,7 +170,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
}
if raceenabled {
- racereadpc(c.raceaddr(), callerpc, funcPC(chansend))
+ racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend))
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
@@ -365,7 +366,7 @@ func closechan(c *hchan) {
if raceenabled {
callerpc := getcallerpc()
- racewritepc(c.raceaddr(), callerpc, funcPC(closechan))
+ racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
racerelease(c.raceaddr())
}
diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go
index e5d0193b9c..c81ab710c2 100644
--- a/src/runtime/cpuprof.go
+++ b/src/runtime/cpuprof.go
@@ -13,6 +13,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -166,8 +167,8 @@ func (p *cpuProfile) addExtra() {
if p.lostExtra > 0 {
hdr := [1]uint64{p.lostExtra}
lostStk := [2]uintptr{
- funcPC(_LostExternalCode) + sys.PCQuantum,
- funcPC(_ExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_LostExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostExtra = 0
@@ -176,8 +177,8 @@ func (p *cpuProfile) addExtra() {
if p.lostAtomic > 0 {
hdr := [1]uint64{p.lostAtomic}
lostStk := [2]uintptr{
- funcPC(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
- funcPC(_System) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_System) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostAtomic = 0
diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go
index faddf59eed..ad66a18c26 100644
--- a/src/runtime/debugcall.go
+++ b/src/runtime/debugcall.go
@@ -112,7 +112,7 @@ func debugCallWrap(dispatch uintptr) {
// closure and start the goroutine with that closure, but the compiler disallows
// implicit closure allocation in the runtime.
fn := debugCallWrap1
- newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), nil, 0, gp, callerpc)
+ newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
args := &debugCallWrapArgs{
dispatch: dispatch,
callingG: gp,
diff --git a/src/runtime/defs_plan9_386.go b/src/runtime/defs_plan9_386.go
index 49129b3c3f..428044df68 100644
--- a/src/runtime/defs_plan9_386.go
+++ b/src/runtime/defs_plan9_386.go
@@ -61,4 +61,4 @@ func dumpregs(u *ureg) {
print("gs ", hex(u.gs), "\n")
}
-func sigpanictramp() {}
+func sigpanictramp()
diff --git a/src/runtime/defs_plan9_amd64.go b/src/runtime/defs_plan9_amd64.go
index 0099563034..15a27fc7db 100644
--- a/src/runtime/defs_plan9_amd64.go
+++ b/src/runtime/defs_plan9_amd64.go
@@ -78,4 +78,4 @@ func dumpregs(u *ureg) {
print("gs ", hex(u.gs), "\n")
}
-func sigpanictramp() {}
+func sigpanictramp()
diff --git a/src/runtime/duff_arm64.s b/src/runtime/duff_arm64.s
index 128b076af9..33c4905078 100644
--- a/src/runtime/duff_arm64.s
+++ b/src/runtime/duff_arm64.s
@@ -4,7 +4,7 @@
#include "textflag.h"
-TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0
+TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
STP.P (ZR, ZR), 16(R20)
STP.P (ZR, ZR), 16(R20)
STP.P (ZR, ZR), 16(R20)
@@ -71,7 +71,7 @@ TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0
STP (ZR, ZR), (R20)
RET
-TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0
+TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
LDP.P 16(R20), (R26, R27)
STP.P (R26, R27), 16(R21)
diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go
index fe4c9045c1..9808fd5299 100644
--- a/src/runtime/export_debug_test.go
+++ b/src/runtime/export_debug_test.go
@@ -125,7 +125,7 @@ func (h *debugCallHandler) inject(info *siginfo, ctxt *sigctxt, gp2 *g) bool {
h.savedFP = *h.savedRegs.fpstate
h.savedRegs.fpstate = nil
// Set PC to debugCallV2.
- ctxt.set_rip(uint64(funcPC(debugCallV2)))
+ ctxt.set_rip(uint64(abi.FuncPCABIInternal(debugCallV2)))
// Call injected. Switch to the debugCall protocol.
testSigtrap = h.handleF
case _Grunnable:
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index c8d01fbb15..60c06c3f10 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -27,8 +27,6 @@ var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
var Xadduintptr = atomic.Xadduintptr
-var FuncPC = funcPC
-
var Fastlog2 = fastlog2
var Atoi = atoi
@@ -147,40 +145,28 @@ func RunSchedLocalQueueStealTest() {
}
}
-// Temporary to enable register ABI bringup.
-// TODO(register args): convert back to local variables in RunSchedLocalQueueEmptyTest that
-// get passed to the "go" stmts there.
-var RunSchedLocalQueueEmptyState struct {
- done chan bool
- ready *uint32
- p *p
-}
-
func RunSchedLocalQueueEmptyTest(iters int) {
// Test that runq is not spuriously reported as empty.
// Runq emptiness affects scheduling decisions and spurious emptiness
// can lead to underutilization (both runnable Gs and idle Ps coexist
// for arbitrary long time).
done := make(chan bool, 1)
- RunSchedLocalQueueEmptyState.done = done
p := new(p)
- RunSchedLocalQueueEmptyState.p = p
gs := make([]g, 2)
ready := new(uint32)
- RunSchedLocalQueueEmptyState.ready = ready
for i := 0; i < iters; i++ {
*ready = 0
next0 := (i & 1) == 0
next1 := (i & 2) == 0
runqput(p, &gs[0], next0)
go func() {
- for atomic.Xadd(RunSchedLocalQueueEmptyState.ready, 1); atomic.Load(RunSchedLocalQueueEmptyState.ready) != 2; {
+ for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
- if runqempty(RunSchedLocalQueueEmptyState.p) {
- //println("next:", next0, next1)
+ if runqempty(p) {
+ println("next:", next0, next1)
throw("queue is empty")
}
- RunSchedLocalQueueEmptyState.done <- true
+ done <- true
}()
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 934e55f495..47e4b6b0d1 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -381,12 +381,13 @@ func dumpgoroutine(gp *g) {
dumpint(uint64(uintptr(unsafe.Pointer(gp))))
dumpint(uint64(d.sp))
dumpint(uint64(d.pc))
- dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
- if d.fn == nil {
+ fn := *(**funcval)(unsafe.Pointer(&d.fn))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn))))
+ if fn == nil {
// d.fn can be nil for open-coded defers
dumpint(uint64(0))
} else {
- dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
}
dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
}
diff --git a/src/runtime/iface.go b/src/runtime/iface.go
index cd5fead999..b397d1ff8d 100644
--- a/src/runtime/iface.go
+++ b/src/runtime/iface.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -317,7 +318,7 @@ var (
func convT2E(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2E))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2E))
}
if msanenabled {
msanread(elem, t.size)
@@ -390,7 +391,7 @@ func convTslice(val []byte) (x unsafe.Pointer) {
func convT2Enoptr(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Enoptr))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2Enoptr))
}
if msanenabled {
msanread(elem, t.size)
@@ -405,7 +406,7 @@ func convT2Enoptr(t *_type, elem unsafe.Pointer) (e eface) {
func convT2I(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2I))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2I))
}
if msanenabled {
msanread(elem, t.size)
@@ -420,7 +421,7 @@ func convT2I(tab *itab, elem unsafe.Pointer) (i iface) {
func convT2Inoptr(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Inoptr))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2Inoptr))
}
if msanenabled {
msanread(elem, t.size)
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 2759bbdaf9..c5f62483ff 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -420,8 +420,6 @@ func mallocinit() {
throw("bad TinySizeClass")
}
- testdefersizes()
-
if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
// heapBits expects modular arithmetic on bitmap
// addresses to work.
@@ -1088,15 +1086,6 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var scanSize uintptr
if !noscan {
- // If allocating a defer+arg block, now that we've picked a malloc size
- // large enough to hold everything, cut the "asked for" size down to
- // just the defer header, so that the GC bitmap will record the arg block
- // as containing nothing at all (as if it were unused space at the end of
- // a malloc block caused by size rounding).
- // The defer arg areas are scanned as part of scanstack.
- if typ == deferType {
- dataSize = unsafe.Sizeof(_defer{})
- }
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.size {
// Array allocation. If there are any
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 111db56b01..5575040f2a 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -54,6 +54,7 @@ package runtime
// before the table grows. Typical tables will be somewhat less loaded.
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/math"
"runtime/internal/sys"
@@ -394,7 +395,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapaccess1)
+ pc := abi.FuncPCABIInternal(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -452,7 +453,7 @@ bucketloop:
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapaccess2)
+ pc := abi.FuncPCABIInternal(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -574,7 +575,7 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(mapassign)
+ pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -685,7 +686,7 @@ done:
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapdelete)
+ pc := abi.FuncPCABIInternal(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
@@ -802,7 +803,7 @@ search:
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
}
if h == nil || h.count == 0 {
@@ -852,7 +853,7 @@ func mapiternext(it *hiter) {
h := it.h
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
}
if h.flags&hashWriting != 0 {
throw("concurrent map iteration and map write")
@@ -978,7 +979,7 @@ next:
func mapclear(t *maptype, h *hmap) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapclear)
+ pc := abi.FuncPCABIInternal(mapclear)
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
@@ -1363,7 +1364,7 @@ func reflect_maplen(h *hmap) int {
}
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
}
@@ -1375,7 +1376,7 @@ func reflectlite_maplen(h *hmap) int {
}
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
}
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index 8d52dad217..420a01daec 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -12,7 +13,7 @@ import (
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
@@ -52,7 +53,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
@@ -95,7 +96,7 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -185,7 +186,7 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -272,7 +273,7 @@ done:
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
}
if h == nil || h.count == 0 {
return
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index f1368dc774..cb202113ac 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -12,7 +13,7 @@ import (
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
@@ -52,7 +53,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
@@ -95,7 +96,7 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -185,7 +186,7 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -272,7 +273,7 @@ done:
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
}
if h == nil || h.count == 0 {
return
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index 0673dd39c8..ed7e46b5f6 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -12,7 +13,7 @@ import (
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
@@ -107,7 +108,7 @@ dohash:
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
@@ -205,7 +206,7 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
@@ -300,7 +301,7 @@ done:
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
}
if h == nil || h.count == 0 {
return
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index 4994347bde..b06ee725dd 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -177,8 +177,8 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
- raceWriteObjectPC(typ, dst, getcallerpc(), funcPC(reflect_typedmemmove))
- raceReadObjectPC(typ, src, getcallerpc(), funcPC(reflect_typedmemmove))
+ raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
+ raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.size)
@@ -254,7 +254,7 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe
// code and needs its own instrumentation.
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(slicecopy)
+ pc := abi.FuncPCABIInternal(slicecopy)
racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc)
racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc)
}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 32b8db7a50..819acf40bd 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -386,10 +386,10 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
if s == nil {
- if GOARCH == "amd64" && p == clobberdeadPtr && debug.invalidptr != 0 {
- // Crash if clobberdeadPtr is seen. Only on AMD64 for now, as
- // it is the only platform where compiler's clobberdead mode is
- // implemented. On AMD64 clobberdeadPtr cannot be a valid address.
+ if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
+ // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
+ // as they are the only platform where compiler's clobberdead mode is
+ // implemented. On these platforms clobberdeadPtr cannot be a valid address.
badPointer(s, p, refBase, refOff)
}
return
diff --git a/src/runtime/memclr_amd64.s b/src/runtime/memclr_amd64.s
index a10f57bd8c..6c78869f4c 100644
--- a/src/runtime/memclr_amd64.s
+++ b/src/runtime/memclr_amd64.s
@@ -37,9 +37,6 @@ tail:
JE _8
CMPQ BX, $16
JBE _9through16
-#ifndef GOEXPERIMENT_regabig
- PXOR X15, X15
-#endif
CMPQ BX, $32
JBE _17through32
CMPQ BX, $64
diff --git a/src/runtime/memclr_arm64.s b/src/runtime/memclr_arm64.s
index c1a0dcef58..b80cca6a1c 100644
--- a/src/runtime/memclr_arm64.s
+++ b/src/runtime/memclr_arm64.s
@@ -8,9 +8,11 @@
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
// Also called from assembly in sys_windows_arm64.s without g (but using Go stack convention).
-TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
+TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB),NOSPLIT,$0-16
+#ifndef GOEXPERIMENT_regabiargs
MOVD ptr+0(FP), R0
MOVD n+8(FP), R1
+#endif
CMP $16, R1
// If n is equal to 16 bytes, use zero_exact_16 to zero
diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s
index 24c6529f58..af538d4bce 100644
--- a/src/runtime/memmove_amd64.s
+++ b/src/runtime/memmove_amd64.s
@@ -254,10 +254,8 @@ move_129through256:
MOVOU X13, -48(DI)(BX*1)
MOVOU X14, -32(DI)(BX*1)
MOVOU X15, -16(DI)(BX*1)
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
RET
move_256through2048:
SUBQ $256, BX
@@ -297,10 +295,8 @@ move_256through2048:
LEAQ 256(SI), SI
LEAQ 256(DI), DI
JGE move_256through2048
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
JMP tail
avxUnaligned:
diff --git a/src/runtime/memmove_arm64.s b/src/runtime/memmove_arm64.s
index 43d27629e5..bee3b00c47 100644
--- a/src/runtime/memmove_arm64.s
+++ b/src/runtime/memmove_arm64.s
@@ -26,10 +26,12 @@
// The loop tail is handled by always copying 64 bytes from the end.
// func memmove(to, from unsafe.Pointer, n uintptr)
-TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24
+TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24
+#ifndef GOEXPERIMENT_regabiargs
MOVD to+0(FP), R0
MOVD from+8(FP), R1
MOVD n+16(FP), R2
+#endif
CBZ R2, copy0
// Small copies: 1..16 bytes
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 4585663535..34b5b482a3 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -167,22 +167,17 @@ func gcinit() {
lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
}
-// Temporary in order to enable register ABI work.
-// TODO(register args): convert back to local chan in gcenabled, passed to "go" stmts.
-var gcenable_setup chan int
-
// gcenable is called after the bulk of the runtime initialization,
// just before we're about to start letting user code run.
// It kicks off the background sweeper goroutine, the background
// scavenger goroutine, and enables GC.
func gcenable() {
// Kick off sweeping and scavenging.
- gcenable_setup = make(chan int, 2)
- go bgsweep()
- go bgscavenge()
- <-gcenable_setup
- <-gcenable_setup
- gcenable_setup = nil
+ c := make(chan int, 2)
+ go bgsweep(c)
+ go bgscavenge(c)
+ <-c
+ <-c
memstats.enablegc = true // now that runtime is initialized, GC is okay
}
@@ -1563,19 +1558,17 @@ func clearpools() {
sched.sudogcache = nil
unlock(&sched.sudoglock)
- // Clear central defer pools.
+ // Clear central defer pool.
// Leave per-P pools alone, they have strictly bounded size.
lock(&sched.deferlock)
- for i := range sched.deferpool {
- // disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- var d, dlink *_defer
- for d = sched.deferpool[i]; d != nil; d = dlink {
- dlink = d.link
- d.link = nil
- }
- sched.deferpool[i] = nil
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var d, dlink *_defer
+ for d = sched.deferpool; d != nil; d = dlink {
+ dlink = d.link
+ d.link = nil
}
+ sched.deferpool = nil
unlock(&sched.deferlock)
}
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 1fd0732d62..eb70ae9f49 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -750,14 +750,11 @@ func scanstack(gp *g, gcw *gcWork) {
// Find additional pointers that point into the stack from the heap.
// Currently this includes defers and panics. See also function copystack.
- // Find and trace all defer arguments.
- tracebackdefers(gp, scanframe, nil)
-
// Find and trace other pointers in defer records.
for d := gp._defer; d != nil; d = d.link {
if d.fn != nil {
- // tracebackdefers above does not scan the func value, which could
- // be a stack allocated closure. See issue 30453.
+ // Scan the func value, which could be a stack allocated closure.
+ // See issue 30453.
scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
}
if d.link != nil {
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index 7578129f9d..9cb61ed0a6 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -249,7 +249,7 @@ func scavengeSleep(ns int64) int64 {
// The background scavenger maintains the RSS of the application below
// the line described by the proportional scavenging statistics in
// the mheap struct.
-func bgscavenge() {
+func bgscavenge(c chan int) {
scavenge.g = getg()
lockInit(&scavenge.lock, lockRankScavenge)
@@ -261,7 +261,7 @@ func bgscavenge() {
wakeScavenger()
}
- gcenable_setup <- 1
+ c <- 1
goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
// Exponentially-weighted moving average of the fraction of time this
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 8fe3a65340..1812644623 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -153,13 +153,13 @@ func finishsweep_m() {
nextMarkBitArenaEpoch()
}
-func bgsweep() {
+func bgsweep(c chan int) {
sweep.g = getg()
lockInit(&sweep.lock, lockRankSweep)
lock(&sweep.lock)
sweep.parked = true
- gcenable_setup <- 1
+ c <- 1
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
for {
diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go
index da191cc594..f036745092 100644
--- a/src/runtime/mkduff.go
+++ b/src/runtime/mkduff.go
@@ -154,7 +154,7 @@ func zeroARM64(w io.Writer) {
// ZR: always zero
// R20: ptr to memory to be zeroed
// On return, R20 points to the last zeroed dword.
- fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
for i := 0; i < 63; i++ {
fmt.Fprintln(w, "\tSTP.P\t(ZR, ZR), 16(R20)")
}
@@ -167,7 +167,7 @@ func copyARM64(w io.Writer) {
// R21: ptr to destination memory
// R26, R27 (aka REGTMP): scratch space
// R20 and R21 are updated as a side effect
- fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
for i := 0; i < 64; i++ {
fmt.Fprintln(w, "\tLDP.P\t16(R20), (R26, R27)")
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 6c980540f5..f2b90307ca 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -128,8 +128,7 @@ func header(arch string) {
}
fmt.Fprintf(out, "#include \"go_asm.h\"\n")
fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
- fmt.Fprintf(out, "// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.\n")
- fmt.Fprintf(out, "TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0\n")
+ fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
}
func p(f string, args ...interface{}) {
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 5235b898e4..0ba415ba5a 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -621,7 +622,7 @@ func record(r *MemProfileRecord, b *bucket) {
r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.active.frees)
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(MemProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
@@ -674,7 +675,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
}
r.Cycles = bp.cycles
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(BlockProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
diff --git a/src/runtime/norace_linux_test.go b/src/runtime/norace_linux_test.go
index 94b7c7a467..b199aa633c 100644
--- a/src/runtime/norace_linux_test.go
+++ b/src/runtime/norace_linux_test.go
@@ -9,6 +9,7 @@
package runtime_test
import (
+ "internal/abi"
"runtime"
"testing"
"time"
@@ -25,7 +26,7 @@ func newOSProcCreated() {
// Can't be run with -race because it inserts calls into newOSProcCreated()
// that require a valid G/M.
func TestNewOSProc0(t *testing.T) {
- runtime.NewOSProc0(0x800000, unsafe.Pointer(runtime.FuncPC(newOSProcCreated)))
+ runtime.NewOSProc0(0x800000, unsafe.Pointer(abi.FuncPCABIInternal(newOSProcCreated)))
check := time.NewTicker(100 * time.Millisecond)
defer check.Stop()
end := time.After(5 * time.Second)
diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go
index c5dc23de8b..ce8bc7f103 100644
--- a/src/runtime/os3_plan9.go
+++ b/src/runtime/os3_plan9.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -98,9 +99,9 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
}
}
if usesLR {
- c.setpc(funcPC(sigpanictramp))
+ c.setpc(abi.FuncPCABI0(sigpanictramp))
} else {
- c.setpc(funcPC(sigpanic0))
+ c.setpc(abi.FuncPCABI0(sigpanic0))
}
return _NCONT
}
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 39ef831acf..bfd7c7eb64 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -172,7 +173,7 @@ func newosproc(mp *m) {
// Disable signals during create, so that the new thread starts
// with signals disabled. It will enable them in minit.
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret = pthread_create(&tid, &attr, funcPC(tstart_sysvicall), unsafe.Pointer(mp))
+ ret = pthread_create(&tid, &attr, abi.FuncPCABI0(tstart_sysvicall), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret != 0 {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
@@ -215,7 +216,7 @@ func miniterrno()
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
- asmcgocall(unsafe.Pointer(funcPC(miniterrno)), unsafe.Pointer(&libc____errno))
+ asmcgocall(unsafe.Pointer(abi.FuncPCABI0(miniterrno)), unsafe.Pointer(&libc____errno))
minitSignals()
@@ -241,8 +242,8 @@ func setsig(i uint32, fn uintptr) {
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
*((*uintptr)(unsafe.Pointer(&sa._funcptr))) = fn
sigaction(i, &sa, nil)
@@ -390,6 +391,7 @@ func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (un
}
//go:nosplit
+//go:cgo_unsafe_args
func doMmap(addr, n, prot, flags, fd, off uintptr) (uintptr, uintptr) {
var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(&libc_mmap))
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index 4fb1c8e845..478dde2fc3 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"unsafe"
)
@@ -267,7 +268,7 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
fn = uintptr(unsafe.Pointer(&sigtramp))
}
sa.sa_handler = fn
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 079be107d7..ca61f20e8a 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -369,7 +369,7 @@ func setsig(i uint32, fn uintptr) {
var sa usigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = ^uint32(0)
- if fn == funcPC(sighandler) { // funcPC(sighandler) matches the callers in signal_unix.go
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
fn = abi.FuncPCABI0(cgoSigtramp)
} else {
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index 5c688a3109..0c81ed4d7c 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -147,14 +148,14 @@ func lwp_start(uintptr)
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", funcPC(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", abi.FuncPCABI0(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
params := lwpparams{
- start_func: funcPC(lwp_start),
+ start_func: abi.FuncPCABI0(lwp_start),
arg: unsafe.Pointer(mp),
stack: uintptr(stk),
tid1: nil, // minit will record tid
@@ -226,8 +227,8 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index 09dd50ce59..151a5fd91a 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -197,11 +198,11 @@ func thr_start()
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", funcPC(thr_start), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", abi.FuncPCABI0(thr_start), " id=", mp.id, " ostk=", &mp, "\n")
}
param := thrparam{
- start_func: funcPC(thr_start),
+ start_func: abi.FuncPCABI0(thr_start),
arg: unsafe.Pointer(mp),
stack_base: mp.g0.stack.lo,
stack_size: uintptr(stk) - mp.g0.stack.lo,
@@ -236,7 +237,7 @@ func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
// However, newosproc0 is currently unreachable because builds
// utilizing c-shared/c-archive force external linking.
param := thrparam{
- start_func: funcPC(fn),
+ start_func: uintptr(fn),
arg: nil,
stack_base: uintptr(stack), //+stacksize?
stack_size: stacksize,
diff --git a/src/runtime/os_freebsd2.go b/src/runtime/os_freebsd2.go
index fde6fbf1b1..7e266dc27e 100644
--- a/src/runtime/os_freebsd2.go
+++ b/src/runtime/os_freebsd2.go
@@ -7,14 +7,16 @@
package runtime
+import "internal/abi"
+
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_handler = fn
sigaction(i, &sa, nil)
diff --git a/src/runtime/os_freebsd_amd64.go b/src/runtime/os_freebsd_amd64.go
index dc0bb9ff96..b179383eac 100644
--- a/src/runtime/os_freebsd_amd64.go
+++ b/src/runtime/os_freebsd_amd64.go
@@ -4,6 +4,8 @@
package runtime
+import "internal/abi"
+
func cgoSigtramp()
//go:nosplit
@@ -12,11 +14,11 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
- fn = funcPC(cgoSigtramp)
+ fn = abi.FuncPCABI0(cgoSigtramp)
} else {
- fn = funcPC(sigtramp)
+ fn = abi.FuncPCABI0(sigtramp)
}
}
sa.sa_handler = fn
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index c8b29e396c..1984bf6844 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -142,14 +143,14 @@ func newosproc(mp *m) {
* note: strace gets confused if we use CLONE_PTRACE here.
*/
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", funcPC(clone), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", abi.FuncPCABI0(clone), " id=", mp.id, " ostk=", &mp, "\n")
}
// Disable signals during clone, so that the new thread starts
// with signals disabled. It will enable them in minit.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(funcPC(mstart)))
+ ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(abi.FuncPCABI0(mstart)))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
@@ -429,13 +430,13 @@ func setsig(i uint32, fn uintptr) {
// should not be used". x86_64 kernel requires it. Only use it on
// x86.
if GOARCH == "386" || GOARCH == "amd64" {
- sa.sa_restorer = funcPC(sigreturn)
+ sa.sa_restorer = abi.FuncPCABI0(sigreturn)
}
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
- fn = funcPC(cgoSigtramp)
+ fn = abi.FuncPCABI0(cgoSigtramp)
} else {
- fn = funcPC(sigtramp)
+ fn = abi.FuncPCABI0(sigtramp)
}
}
sa.sa_handler = fn
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index 6fbb3aa694..151cd17bbe 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -215,7 +216,7 @@ func newosproc(mp *m) {
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, funcPC(netbsdMstart))
+ lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, abi.FuncPCABI0(netbsdMstart))
ret := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
sigprocmask(_SIG_SETMASK, &oset, nil)
@@ -318,8 +319,8 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
diff --git a/src/runtime/os_netbsd_386.go b/src/runtime/os_netbsd_386.go
index 037f7e36dc..ac89b9852c 100644
--- a/src/runtime/os_netbsd_386.go
+++ b/src/runtime/os_netbsd_386.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_EIP] = uint32(funcPC(lwp_tramp))
+ mc.__gregs[_REG_EIP] = uint32(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_UESP] = uint32(uintptr(stk))
mc.__gregs[_REG_EBX] = uint32(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_EDX] = uint32(uintptr(unsafe.Pointer(gp)))
diff --git a/src/runtime/os_netbsd_amd64.go b/src/runtime/os_netbsd_amd64.go
index 5118b0c4ff..74eea0ceab 100644
--- a/src/runtime/os_netbsd_amd64.go
+++ b/src/runtime/os_netbsd_amd64.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_RIP] = uint64(funcPC(lwp_tramp))
+ mc.__gregs[_REG_RIP] = uint64(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_RSP] = uint64(uintptr(stk))
mc.__gregs[_REG_R8] = uint64(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_R9] = uint64(uintptr(unsafe.Pointer(gp)))
diff --git a/src/runtime/os_netbsd_arm.go b/src/runtime/os_netbsd_arm.go
index b5ec23e45b..5fb4e08d66 100644
--- a/src/runtime/os_netbsd_arm.go
+++ b/src/runtime/os_netbsd_arm.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_R15] = uint32(funcPC(lwp_tramp))
+ mc.__gregs[_REG_R15] = uint32(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_R13] = uint32(uintptr(stk))
mc.__gregs[_REG_R0] = uint32(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_R1] = uint32(uintptr(unsafe.Pointer(gp)))
diff --git a/src/runtime/os_netbsd_arm64.go b/src/runtime/os_netbsd_arm64.go
index 8d21b0a430..2dda9c9274 100644
--- a/src/runtime/os_netbsd_arm64.go
+++ b/src/runtime/os_netbsd_arm64.go
@@ -4,11 +4,14 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_ELR] = uint64(funcPC(lwp_tramp))
+ mc.__gregs[_REG_ELR] = uint64(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_X31] = uint64(uintptr(stk))
mc.__gregs[_REG_X0] = uint64(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_X1] = uint64(uintptr(unsafe.Pointer(mp.g0)))
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 6259b96c22..54f36c6ebf 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -191,8 +192,8 @@ func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = uint32(sigset_all)
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
diff --git a/src/runtime/os_openbsd_libc.go b/src/runtime/os_openbsd_libc.go
index 0a342e5533..981e49827f 100644
--- a/src/runtime/os_openbsd_libc.go
+++ b/src/runtime/os_openbsd_libc.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"unsafe"
)
@@ -48,7 +49,7 @@ func newosproc(mp *m) {
// setup and then calls mstart.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- err := pthread_create(&attr, funcPC(mstart_stub), unsafe.Pointer(mp))
+ err := pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if err != 0 {
write(2, unsafe.Pointer(&failThreadCreate[0]), int32(len(failThreadCreate)))
diff --git a/src/runtime/os_openbsd_syscall.go b/src/runtime/os_openbsd_syscall.go
index 3cdcb6c707..a04eb4fc4d 100644
--- a/src/runtime/os_openbsd_syscall.go
+++ b/src/runtime/os_openbsd_syscall.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -33,7 +34,7 @@ func newosproc(mp *m) {
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, funcPC(mstart))
+ ret := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, abi.FuncPCABI0(mstart))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go
index 4d428346f0..975d460a7d 100644
--- a/src/runtime/os_plan9.go
+++ b/src/runtime/os_plan9.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -346,7 +347,7 @@ func getRandomData(r []byte) {
func initsig(preinit bool) {
if !preinit {
- notify(unsafe.Pointer(funcPC(sigtramp)))
+ notify(unsafe.Pointer(abi.FuncPCABI0(sigtramp)))
}
}
diff --git a/src/runtime/os_solaris.go b/src/runtime/os_solaris.go
index 89129e5f1a..8ac1b08f69 100644
--- a/src/runtime/os_solaris.go
+++ b/src/runtime/os_solaris.go
@@ -179,6 +179,7 @@ func sysvicall3Err(fn *libcFunc, a1, a2, a3 uintptr) (r1, err uintptr) {
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
@@ -208,6 +209,7 @@ func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
@@ -237,6 +239,7 @@ func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index f0935264ac..d82173e738 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -543,7 +544,7 @@ func initLongPathSupport() {
}
func osinit() {
- asmstdcallAddr = unsafe.Pointer(funcPC(asmstdcall))
+ asmstdcallAddr = unsafe.Pointer(abi.FuncPCABI0(asmstdcall))
setBadSignalMsg()
@@ -906,7 +907,7 @@ func semacreate(mp *m) {
func newosproc(mp *m) {
// We pass 0 for the stack size to use the default for this binary.
thandle := stdcall6(_CreateThread, 0, 0,
- funcPC(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
+ abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
0, 0)
if thandle == 0 {
@@ -1385,7 +1386,7 @@ func preemptM(mp *m) {
if gp != nil && wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, c.ip(), c.sp(), c.lr()); ok {
// Inject call to asyncPreempt
- targetPC := funcPC(asyncPreempt)
+ targetPC := abi.FuncPCABI0(asyncPreempt)
switch GOARCH {
default:
throw("unsupported architecture")
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index f6c38aafcc..86d41c4e1c 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -5,8 +5,6 @@
package runtime
import (
- "internal/abi"
- "internal/goexperiment"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -226,31 +224,19 @@ func panicmemAddr(addr uintptr) {
panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
}
-// Create a new deferred function fn with siz bytes of arguments.
+// Create a new deferred function fn, which has no arguments and results.
// The compiler turns a defer statement into a call to this.
-//go:nosplit
-func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
+func deferproc(fn func()) {
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
- if goexperiment.RegabiDefer && siz != 0 {
- // TODO: Make deferproc just take a func().
- throw("defer with non-empty frame")
- }
-
- // the arguments of fn are in a perilous state. The stack map
- // for deferproc does not describe them. So we can't let garbage
- // collection or stack copying trigger until we've copied them out
- // to somewhere safe. The memmove below does that.
- // Until the copy completes, we can only call nosplit routines.
sp := getcallersp()
- argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
callerpc := getcallerpc()
- d := newdefer(siz)
+ d := newdefer()
if d._panic != nil {
throw("deferproc: d.panic != nil after newdefer")
}
@@ -259,14 +245,6 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
d.fn = fn
d.pc = callerpc
d.sp = sp
- switch siz {
- case 0:
- // Do nothing.
- case sys.PtrSize:
- *(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
- default:
- memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
- }
// deferproc returns 0 normally.
// a deferred func that stops a panic
@@ -280,7 +258,7 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
}
// deferprocStack queues a new deferred function with a defer record on the stack.
-// The defer record must have its siz and fn fields initialized.
+// The defer record must have its fn field initialized.
// All other fields can contain junk.
// The defer record must be immediately followed in memory by
// the arguments of the defer.
@@ -293,10 +271,7 @@ func deferprocStack(d *_defer) {
// go code on the system stack can't defer
throw("defer on system stack")
}
- if goexperiment.RegabiDefer && d.siz != 0 {
- throw("defer with non-empty frame")
- }
- // siz and fn are already set.
+ // fn is already set.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
d.started = false
@@ -327,132 +302,38 @@ func deferprocStack(d *_defer) {
// been set and must not be clobbered.
}
-// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
-// Each P holds a pool for defers with small arg sizes.
-// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
-
-const (
- deferHeaderSize = unsafe.Sizeof(_defer{})
- minDeferAlloc = (deferHeaderSize + 15) &^ 15
- minDeferArgs = minDeferAlloc - deferHeaderSize
-)
-
-// defer size class for arg size sz
-//go:nosplit
-func deferclass(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return 0
- }
- return (siz - minDeferArgs + 15) / 16
-}
-
-// total size of memory block for defer with arg size sz
-func totaldefersize(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return minDeferAlloc
- }
- return deferHeaderSize + siz
-}
-
-// Ensure that defer arg sizes that map to the same defer size class
-// also map to the same malloc size class.
-func testdefersizes() {
- var m [len(p{}.deferpool)]int32
-
- for i := range m {
- m[i] = -1
- }
- for i := uintptr(0); ; i++ {
- defersc := deferclass(i)
- if defersc >= uintptr(len(m)) {
- break
- }
- siz := roundupsize(totaldefersize(i))
- if m[defersc] < 0 {
- m[defersc] = int32(siz)
- continue
- }
- if m[defersc] != int32(siz) {
- print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
- throw("bad defer size class")
- }
- }
-}
-
-// The arguments associated with a deferred call are stored
-// immediately after the _defer header in memory.
-//go:nosplit
-func deferArgs(d *_defer) unsafe.Pointer {
- if d.siz == 0 {
- // Avoid pointer past the defer allocation.
- return nil
- }
- return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
-}
-
-// deferFunc returns d's deferred function. This is temporary while we
-// support both modes of GOEXPERIMENT=regabidefer. Once we commit to
-// that experiment, we should change the type of d.fn.
-//go:nosplit
-func deferFunc(d *_defer) func() {
- if !goexperiment.RegabiDefer {
- throw("requires GOEXPERIMENT=regabidefer")
- }
- var fn func()
- *(**funcval)(unsafe.Pointer(&fn)) = d.fn
- return fn
-}
-
-var deferType *_type // type of _defer struct
-
-func init() {
- var x interface{}
- x = (*_defer)(nil)
- deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
-}
+// Each P holds a pool for defers.
// Allocate a Defer, usually using per-P pool.
// Each defer must be released with freedefer. The defer is not
// added to any defer chain yet.
-//
-// This must not grow the stack because there may be a frame without
-// stack map information when this is called.
-//
-//go:nosplit
-func newdefer(siz int32) *_defer {
+func newdefer() *_defer {
var d *_defer
- sc := deferclass(uintptr(siz))
gp := getg()
- if sc < uintptr(len(p{}.deferpool)) {
- pp := gp.m.p.ptr()
- if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
- // Take the slow path on the system stack so
- // we don't grow newdefer's stack.
- systemstack(func() {
- lock(&sched.deferlock)
- for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
- d := sched.deferpool[sc]
- sched.deferpool[sc] = d.link
- d.link = nil
- pp.deferpool[sc] = append(pp.deferpool[sc], d)
- }
- unlock(&sched.deferlock)
- })
- }
- if n := len(pp.deferpool[sc]); n > 0 {
- d = pp.deferpool[sc][n-1]
- pp.deferpool[sc][n-1] = nil
- pp.deferpool[sc] = pp.deferpool[sc][:n-1]
- }
- }
- if d == nil {
- // Allocate new defer+args.
+ pp := gp.m.p.ptr()
+ if len(pp.deferpool) == 0 && sched.deferpool != nil {
+ // Take the slow path on the system stack so
+ // we don't grow newdefer's stack.
systemstack(func() {
- total := roundupsize(totaldefersize(uintptr(siz)))
- d = (*_defer)(mallocgc(total, deferType, true))
+ lock(&sched.deferlock)
+ for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
+ d := sched.deferpool
+ sched.deferpool = d.link
+ d.link = nil
+ pp.deferpool = append(pp.deferpool, d)
+ }
+ unlock(&sched.deferlock)
})
}
- d.siz = siz
+ if n := len(pp.deferpool); n > 0 {
+ d = pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
+ }
+ if d == nil {
+ // Allocate new defer.
+ d = new(_defer)
+ }
d.heap = true
return d
}
@@ -474,23 +355,19 @@ func freedefer(d *_defer) {
if !d.heap {
return
}
- sc := deferclass(uintptr(d.siz))
- if sc >= uintptr(len(p{}.deferpool)) {
- return
- }
pp := getg().m.p.ptr()
- if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
+ if len(pp.deferpool) == cap(pp.deferpool) {
// Transfer half of local cache to the central cache.
//
// Take this slow path on the system stack so
// we don't grow freedefer's stack.
systemstack(func() {
var first, last *_defer
- for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
- n := len(pp.deferpool[sc])
- d := pp.deferpool[sc][n-1]
- pp.deferpool[sc][n-1] = nil
- pp.deferpool[sc] = pp.deferpool[sc][:n-1]
+ for len(pp.deferpool) > cap(pp.deferpool)/2 {
+ n := len(pp.deferpool)
+ d := pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
if first == nil {
first = d
} else {
@@ -499,15 +376,14 @@ func freedefer(d *_defer) {
last = d
}
lock(&sched.deferlock)
- last.link = sched.deferpool[sc]
- sched.deferpool[sc] = first
+ last.link = sched.deferpool
+ sched.deferpool = first
unlock(&sched.deferlock)
})
}
// These lines used to be simply `*d = _defer{}` but that
// started causing a nosplit stack overflow via typedmemmove.
- d.siz = 0
d.started = false
d.openDefer = false
d.sp = 0
@@ -520,7 +396,7 @@ func freedefer(d *_defer) {
// both of which throw.
d.link = nil
- pp.deferpool[sc] = append(pp.deferpool[sc], d)
+ pp.deferpool = append(pp.deferpool, d)
}
// Separate function so that it can split stack.
@@ -576,14 +452,6 @@ func deferreturn() {
// of the arguments until the jmpdefer can flip the PC over to
// fn.
argp := getcallersp() + sys.MinFrameSize
- switch d.siz {
- case 0:
- // Do nothing.
- case sys.PtrSize:
- *(*uintptr)(unsafe.Pointer(argp)) = *(*uintptr)(deferArgs(d))
- default:
- memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz))
- }
fn := d.fn
d.fn = nil
gp._defer = d.link
@@ -593,7 +461,9 @@ func deferreturn() {
// called with a callback on an LR architecture and jmpdefer is on the
// stack, because the stack trace can be incorrect in that case - see
// issue #8153).
- _ = fn.fn
+ if fn == nil {
+ fn()
+ }
jmpdefer(fn, argp)
}
@@ -655,15 +525,9 @@ func Goexit() {
addOneOpenDeferFrame(gp, 0, nil)
}
} else {
- if goexperiment.RegabiDefer {
- // Save the pc/sp in deferCallSave(), so we can "recover" back to this
- // loop if necessary.
- deferCallSave(&p, deferFunc(d))
- } else {
- // Save the pc/sp in reflectcallSave(), so we can "recover" back to this
- // loop if necessary.
- reflectcallSave(&p, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz))
- }
+ // Save the pc/sp in deferCallSave(), so we can "recover" back to this
+ // loop if necessary.
+ deferCallSave(&p, d.fn)
}
if p.aborted {
// We had a recursive panic in the defer d we started, and
@@ -783,8 +647,7 @@ func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
throw("missing deferreturn")
}
- maxargsize, _ := readvarintUnsafe(fd)
- d1 := newdefer(int32(maxargsize))
+ d1 := newdefer()
d1.openDefer = true
d1._panic = nil
// These are the pc/sp to set after we've
@@ -845,57 +708,27 @@ func runOpenDeferFrame(gp *g, d *_defer) bool {
done := true
fd := d.fd
- // Skip the maxargsize
- _, fd = readvarintUnsafe(fd)
deferBitsOffset, fd := readvarintUnsafe(fd)
nDefers, fd := readvarintUnsafe(fd)
deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
for i := int(nDefers) - 1; i >= 0; i-- {
// read the funcdata info for this defer
- var argWidth, closureOffset, nArgs uint32
- argWidth, fd = readvarintUnsafe(fd)
+ var closureOffset uint32
closureOffset, fd = readvarintUnsafe(fd)
- nArgs, fd = readvarintUnsafe(fd)
- if goexperiment.RegabiDefer && argWidth != 0 {
- throw("defer with non-empty frame")
- }
if deferBits&(1<<i) == 0 {
- for j := uint32(0); j < nArgs; j++ {
- _, fd = readvarintUnsafe(fd)
- _, fd = readvarintUnsafe(fd)
- _, fd = readvarintUnsafe(fd)
- }
continue
}
- closure := *(**funcval)(unsafe.Pointer(d.varp - uintptr(closureOffset)))
+ closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset)))
d.fn = closure
- deferArgs := deferArgs(d)
- // If there is an interface receiver or method receiver, it is
- // described/included as the first arg.
- for j := uint32(0); j < nArgs; j++ {
- var argOffset, argLen, argCallOffset uint32
- argOffset, fd = readvarintUnsafe(fd)
- argLen, fd = readvarintUnsafe(fd)
- argCallOffset, fd = readvarintUnsafe(fd)
- memmove(unsafe.Pointer(uintptr(deferArgs)+uintptr(argCallOffset)),
- unsafe.Pointer(d.varp-uintptr(argOffset)),
- uintptr(argLen))
- }
deferBits = deferBits &^ (1 << i)
*(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
p := d._panic
- if goexperiment.RegabiDefer {
- deferCallSave(p, deferFunc(d))
- } else {
- reflectcallSave(p, unsafe.Pointer(closure), deferArgs, argWidth)
- }
+ deferCallSave(p, d.fn)
if p != nil && p.aborted {
break
}
d.fn = nil
- // These args are just a copy, so can be cleared immediately
- memclrNoHeapPointers(deferArgs, uintptr(argWidth))
if d._panic != nil && d._panic.recovered {
done = deferBits == 0
break
@@ -905,32 +738,6 @@ func runOpenDeferFrame(gp *g, d *_defer) bool {
return done
}
-// reflectcallSave calls reflectcall after saving the caller's pc and sp in the
-// panic record. This allows the runtime to return to the Goexit defer processing
-// loop, in the unusual case where the Goexit may be bypassed by a successful
-// recover.
-//
-// This is marked as a wrapper by the compiler so it doesn't appear in
-// tracebacks.
-func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) {
- if goexperiment.RegabiDefer {
- throw("not allowed with GOEXPERIMENT=regabidefer")
- }
- if p != nil {
- p.argp = unsafe.Pointer(getargp())
- p.pc = getcallerpc()
- p.sp = unsafe.Pointer(getcallersp())
- }
- // Pass a dummy RegArgs since we'll only take this path if
- // we're not using the register ABI.
- var regs abi.RegArgs
- reflectcall(nil, fn, arg, argsize, argsize, argsize, &regs)
- if p != nil {
- p.pc = 0
- p.sp = unsafe.Pointer(nil)
- }
-}
-
// deferCallSave calls fn() after saving the caller's pc and sp in the
// panic record. This allows the runtime to return to the Goexit defer
// processing loop, in the unusual case where the Goexit may be
@@ -939,9 +746,6 @@ func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) {
// This is marked as a wrapper by the compiler so it doesn't appear in
// tracebacks.
func deferCallSave(p *_panic, fn func()) {
- if !goexperiment.RegabiDefer {
- throw("only allowed with GOEXPERIMENT=regabidefer")
- }
if p != nil {
p.argp = unsafe.Pointer(getargp())
p.pc = getcallerpc()
@@ -1041,16 +845,7 @@ func gopanic(e interface{}) {
}
} else {
p.argp = unsafe.Pointer(getargp())
-
- if goexperiment.RegabiDefer {
- fn := deferFunc(d)
- fn()
- } else {
- // Pass a dummy RegArgs since we'll only take this path if
- // we're not using the register ABI.
- var regs abi.RegArgs
- reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), &regs)
- }
+ d.fn()
}
p.argp = nil
diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go
index 99eda10f1c..000abf935c 100644
--- a/src/runtime/pprof/pprof.go
+++ b/src/runtime/pprof/pprof.go
@@ -76,6 +76,7 @@ import (
"bufio"
"bytes"
"fmt"
+ "internal/abi"
"io"
"runtime"
"sort"
@@ -289,7 +290,7 @@ func (p *Profile) Add(value interface{}, skip int) {
stk = stk[:n]
if len(stk) == 0 {
// The value for skip is too large, and there's no stack trace to record.
- stk = []uintptr{funcPC(lostProfileEvent)}
+ stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent)}
}
p.mu.Lock()
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 7cbb4fc7ae..cfcf379d1f 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -11,6 +11,7 @@ import (
"bytes"
"context"
"fmt"
+ "internal/abi"
"internal/profile"
"internal/testenv"
"io"
@@ -116,7 +117,7 @@ func containsInlinedCall(f interface{}, maxBytes int) bool {
// findInlinedCall returns the PC of an inlined function call within
// the function body for the function f if any.
func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
- fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
+ fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
if fFunc == nil || fFunc.Entry() == 0 {
panic("failed to locate function entry")
}
diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go
index bdb4454b6e..6862513956 100644
--- a/src/runtime/pprof/proto.go
+++ b/src/runtime/pprof/proto.go
@@ -8,6 +8,7 @@ import (
"bytes"
"compress/gzip"
"fmt"
+ "internal/abi"
"io"
"os"
"runtime"
@@ -21,11 +22,6 @@ import (
// (The name shows up in the pprof graphs.)
func lostProfileEvent() { lostProfileEvent() }
-// funcPC returns the PC for the func value f.
-func funcPC(f interface{}) uintptr {
- return *(*[2]*uintptr)(unsafe.Pointer(&f))[1]
-}
-
// A profileBuilder writes a profile incrementally from a
// stream of profile samples delivered by the runtime.
type profileBuilder struct {
@@ -325,7 +321,7 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error
// gentraceback guarantees that PCs in the
// stack can be unconditionally decremented and
// still be valid, so we must do the same.
- uint64(funcPC(lostProfileEvent) + 1),
+ uint64(abi.FuncPCABIInternal(lostProfileEvent) + 1),
}
}
b.m.lookup(stk, tag).count += int64(count)
diff --git a/src/runtime/pprof/proto_test.go b/src/runtime/pprof/proto_test.go
index 5eb1aab140..d052b9fa42 100644
--- a/src/runtime/pprof/proto_test.go
+++ b/src/runtime/pprof/proto_test.go
@@ -8,6 +8,7 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "internal/abi"
"internal/profile"
"internal/testenv"
"os"
@@ -97,11 +98,11 @@ func testPCs(t *testing.T) (addr1, addr2 uint64, map1, map2 *profile.Mapping) {
map2 = mprof.Mapping[1]
map2.BuildID, _ = elfBuildID(map2.File)
case "js":
- addr1 = uint64(funcPC(f1))
- addr2 = uint64(funcPC(f2))
+ addr1 = uint64(abi.FuncPCABIInternal(f1))
+ addr2 = uint64(abi.FuncPCABIInternal(f2))
default:
- addr1 = uint64(funcPC(f1))
- addr2 = uint64(funcPC(f2))
+ addr1 = uint64(abi.FuncPCABIInternal(f1))
+ addr2 = uint64(abi.FuncPCABIInternal(f2))
// Fake mapping - HasFunctions will be true because two PCs from Go
// will be fully symbolized.
fake := &profile.Mapping{ID: 1, HasFunctions: true}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index 1d5aae1363..d6cdf1b8f8 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -53,6 +53,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -315,9 +316,9 @@ func asyncPreempt2() {
var asyncPreemptStack = ^uintptr(0)
func init() {
- f := findfunc(funcPC(asyncPreempt))
+ f := findfunc(abi.FuncPCABI0(asyncPreempt))
total := funcMaxSPDelta(f)
- f = findfunc(funcPC(asyncPreempt2))
+ f = findfunc(abi.FuncPCABIInternal(asyncPreempt2))
total += funcMaxSPDelta(f)
// Add some overhead for return PCs, etc.
asyncPreemptStack = uintptr(total) + 8*sys.PtrSize
diff --git a/src/runtime/preempt_386.s b/src/runtime/preempt_386.s
index a803b24dc6..c3a5fa1f36 100644
--- a/src/runtime/preempt_386.s
+++ b/src/runtime/preempt_386.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
PUSHFL
ADJSP $156
NOP SP
diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s
index dc7af806d3..31f7c8b66f 100644
--- a/src/runtime/preempt_amd64.s
+++ b/src/runtime/preempt_amd64.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
PUSHQ BP
MOVQ SP, BP
// Save flags before clobbering them
diff --git a/src/runtime/preempt_arm.s b/src/runtime/preempt_arm.s
index bbc9fbb1ea..8f243c0dcd 100644
--- a/src/runtime/preempt_arm.s
+++ b/src/runtime/preempt_arm.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVW.W R14, -188(R13)
MOVW R0, 4(R13)
MOVW R1, 8(R13)
diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s
index 2b70a28479..36ee13282c 100644
--- a/src/runtime/preempt_arm64.s
+++ b/src/runtime/preempt_arm64.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R30, -496(RSP)
SUB $496, RSP
#ifdef GOOS_linux
diff --git a/src/runtime/preempt_mips64x.s b/src/runtime/preempt_mips64x.s
index b755425bc5..c1249e382e 100644
--- a/src/runtime/preempt_mips64x.s
+++ b/src/runtime/preempt_mips64x.s
@@ -6,8 +6,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVV R31, -488(R29)
SUBV $488, R29
MOVV R1, 8(R29)
diff --git a/src/runtime/preempt_mipsx.s b/src/runtime/preempt_mipsx.s
index c1bff60859..70b79e05b9 100644
--- a/src/runtime/preempt_mipsx.s
+++ b/src/runtime/preempt_mipsx.s
@@ -6,8 +6,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVW R31, -244(R29)
SUB $244, R29
MOVW R1, 4(R29)
diff --git a/src/runtime/preempt_ppc64x.s b/src/runtime/preempt_ppc64x.s
index 70bd91982b..7ed4021dde 100644
--- a/src/runtime/preempt_ppc64x.s
+++ b/src/runtime/preempt_ppc64x.s
@@ -6,8 +6,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R31, -488(R1)
MOVD LR, R31
MOVDU R31, -520(R1)
diff --git a/src/runtime/preempt_riscv64.s b/src/runtime/preempt_riscv64.s
index d4f9cc277f..eb68dcba2b 100644
--- a/src/runtime/preempt_riscv64.s
+++ b/src/runtime/preempt_riscv64.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOV X1, -472(X2)
ADD $-472, X2
MOV X3, 8(X2)
diff --git a/src/runtime/preempt_s390x.s b/src/runtime/preempt_s390x.s
index c6f11571df..ca9e47cde1 100644
--- a/src/runtime/preempt_s390x.s
+++ b/src/runtime/preempt_s390x.s
@@ -3,8 +3,7 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
IPM R10
MOVD R14, -248(R15)
ADD $-248, R15
diff --git a/src/runtime/preempt_wasm.s b/src/runtime/preempt_wasm.s
index da90e8aa6d..0cf57d3d22 100644
--- a/src/runtime/preempt_wasm.s
+++ b/src/runtime/preempt_wasm.s
@@ -3,7 +3,6 @@
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
// No async preemption on wasm
UNDEF
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 59160c6525..4a116130a5 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -7,7 +7,6 @@ package runtime
import (
"internal/abi"
"internal/cpu"
- "internal/goexperiment"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -466,18 +465,6 @@ func releaseSudog(s *sudog) {
releasem(mp)
}
-// funcPC returns the entry PC of the function f.
-// It assumes that f is a func value. Otherwise the behavior is undefined.
-// CAREFUL: In programs with plugins, funcPC can return different values
-// for the same function (because there are actually multiple copies of
-// the same function in the address space). To be safe, don't use the
-// results of this function in any == expression. It is only safe to
-// use the result as an address at which to start executing code.
-//go:nosplit
-func funcPC(f interface{}) uintptr {
- return *(*uintptr)(efaceOf(&f).data)
-}
-
// called from assembly
func badmcall(fn func(*g)) {
throw("runtime: mcall called on m->g0 stack")
@@ -2043,7 +2030,7 @@ func oneNewExtraM() {
gp.lockedm.set(mp)
gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
if raceenabled {
- gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
+ gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
}
// put on allg for garbage collector
allgadd(gp)
@@ -2236,7 +2223,7 @@ func newm1(mp *m) {
}
ts.g.set(mp.g0)
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
- ts.fn = unsafe.Pointer(funcPC(mstart))
+ ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
if msanenabled {
msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
}
@@ -4230,27 +4217,14 @@ func malg(stacksize int32) *g {
return newg
}
-// Create a new g running fn with siz bytes of arguments.
+// Create a new g running fn.
// Put it on the queue of g's waiting to run.
// The compiler turns a go statement into a call to this.
-//
-// The stack layout of this call is unusual: it assumes that the
-// arguments to pass to fn are on the stack sequentially immediately
-// after &fn. Hence, they are logically part of newproc's argument
-// frame, even though they don't appear in its signature (and can't
-// because their types differ between call sites).
-//
-// This must be nosplit because this stack layout means there are
-// untyped arguments in newproc's argument frame. Stack copies won't
-// be able to adjust them and stack splits won't be able to copy them.
-//
-//go:nosplit
-func newproc(siz int32, fn *funcval) {
- argp := add(unsafe.Pointer(&fn), sys.PtrSize)
+func newproc(fn *funcval) {
gp := getg()
pc := getcallerpc()
systemstack(func() {
- newg := newproc1(fn, argp, siz, gp, pc)
+ newg := newproc1(fn, gp, pc)
_p_ := getg().m.p.ptr()
runqput(_p_, newg, true)
@@ -4261,24 +4235,10 @@ func newproc(siz int32, fn *funcval) {
})
}
-// Create a new g in state _Grunnable, starting at fn, with narg bytes
-// of arguments starting at argp. callerpc is the address of the go
-// statement that created this. The caller is responsible for adding
-// the new g to the scheduler.
-//
-// This must run on the system stack because it's the continuation of
-// newproc, which cannot split the stack.
-//
-//go:systemstack
-func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g {
- if goexperiment.RegabiDefer && narg != 0 {
- // TODO: When we commit to GOEXPERIMENT=regabidefer,
- // rewrite the comments for newproc and newproc1.
- // newproc will no longer have a funny stack layout or
- // need to be nosplit.
- throw("go with non-empty frame")
- }
-
+// Create a new g in state _Grunnable, starting at fn. callerpc is the
+// address of the go statement that created this. The caller is responsible
+// for adding the new g to the scheduler.
+func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
_g_ := getg()
if fn == nil {
@@ -4286,16 +4246,6 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
throw("go of nil func value")
}
acquirem() // disable preemption because it can be holding p in a local var
- siz := narg
- siz = (siz + 7) &^ 7
-
- // We could allocate a larger initial stack if necessary.
- // Not worth it: this is almost always an error.
- // 4*PtrSize: extra space added below
- // PtrSize: caller's LR (arm) or return address (x86, in gostartcall).
- if siz >= _StackMin-4*sys.PtrSize-sys.PtrSize {
- throw("newproc: function arguments too large for new goroutine")
- }
_p_ := _g_.m.p.ptr()
newg := gfget(_p_)
@@ -4312,8 +4262,8 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
throw("newproc1: new g is not Gdead")
}
- totalSize := 4*sys.PtrSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
- totalSize += -totalSize & (sys.StackAlign - 1) // align to StackAlign
+ totalSize := uintptr(4*sys.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
+ totalSize = alignUp(totalSize, sys.StackAlign)
sp := newg.stack.hi - totalSize
spArg := sp
if usesLR {
@@ -4322,24 +4272,6 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
prepGoExitFrame(sp)
spArg += sys.MinFrameSize
}
- if narg > 0 {
- memmove(unsafe.Pointer(spArg), argp, uintptr(narg))
- // This is a stack-to-stack copy. If write barriers
- // are enabled and the source stack is grey (the
- // destination is always black), then perform a
- // barrier copy. We do this *after* the memmove
- // because the destination stack may have garbage on
- // it.
- if writeBarrier.needed && !_g_.m.curg.gcscandone {
- f := findfunc(fn.fn)
- stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
- if stkmap.nbit > 0 {
- // We're in the prologue, so it's always stack map index 0.
- bv := stackmapdata(stkmap, 0)
- bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
- }
- }
- }
memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
newg.sched.sp = sp
@@ -4749,16 +4681,16 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// If all of the above has failed, account it against abstract "System" or "GC".
n = 2
if inVDSOPage(pc) {
- pc = funcPC(_VDSO) + sys.PCQuantum
+ pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
} else if pc > firstmoduledata.etext {
// "ExternalCode" is better than "etext".
- pc = funcPC(_ExternalCode) + sys.PCQuantum
+ pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" {
- stk[1] = funcPC(_GC) + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
- stk[1] = funcPC(_System) + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
}
}
@@ -4802,7 +4734,7 @@ func sigprofNonGoPC(pc uintptr) {
if prof.hz != 0 {
stk := []uintptr{
pc,
- funcPC(_ExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
cpuprof.addNonGo(stk)
}
@@ -4852,9 +4784,7 @@ func (pp *p) init(id int32) {
pp.id = id
pp.status = _Pgcstop
pp.sudogcache = pp.sudogbuf[:0]
- for i := range pp.deferpool {
- pp.deferpool[i] = pp.deferpoolbuf[i][:0]
- }
+ pp.deferpool = pp.deferpoolbuf[:0]
pp.wbBuf.reset()
if pp.mcache == nil {
if id == 0 {
@@ -4932,12 +4862,10 @@ func (pp *p) destroy() {
pp.sudogbuf[i] = nil
}
pp.sudogcache = pp.sudogbuf[:0]
- for i := range pp.deferpool {
- for j := range pp.deferpoolbuf[i] {
- pp.deferpoolbuf[i][j] = nil
- }
- pp.deferpool[i] = pp.deferpoolbuf[i][:0]
+ for j := range pp.deferpoolbuf {
+ pp.deferpoolbuf[j] = nil
}
+ pp.deferpool = pp.deferpoolbuf[:0]
systemstack(func() {
for i := 0; i < pp.mspancache.len; i++ {
// Safe to call since the world is stopped.
@@ -6495,7 +6423,8 @@ func doInit(t *initTask) {
// Load stats non-atomically since tracinit is updated only by this init goroutine.
after := inittrace
- pkg := funcpkgpath(findfunc(funcPC(firstFunc)))
+ f := *(*func())(unsafe.Pointer(&firstFunc))
+ pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
var sbuf [24]byte
print("init ", pkg, " @")
diff --git a/src/runtime/race.go b/src/runtime/race.go
index cc8c5db1bd..f1c3c3098d 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"unsafe"
)
@@ -361,7 +362,7 @@ func raceinit() (gctx, pctx uintptr) {
throw("raceinit: race build must use cgo")
}
- racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
+ racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
// Round data segment to page boundaries, because it's used in mmap().
start := ^uintptr(0)
diff --git a/src/runtime/race/output_test.go b/src/runtime/race/output_test.go
index 99052071d0..63fcd847dc 100644
--- a/src/runtime/race/output_test.go
+++ b/src/runtime/race/output_test.go
@@ -148,7 +148,7 @@ exit status 66
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
@@ -162,7 +162,7 @@ func main() {
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
@@ -178,7 +178,7 @@ func main() {
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index 8d4813eadd..8a17113232 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -161,10 +161,6 @@ TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
// If addr (RARG1) is out of range, do nothing.
// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
TEXT racecalladdr<>(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
CMPQ RARG1, runtime·racearenastart(SB)
@@ -192,10 +188,6 @@ TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
// R11 = caller's return address
TEXT racefuncenter<>(SB), NOSPLIT, $0-0
MOVQ DX, BX // save function entry context (for closures)
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ R11, RARG1
// void __tsan_func_enter(ThreadState *thr, void *pc);
@@ -208,10 +200,6 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0
// func runtime·racefuncexit()
// Called from instrumented code.
TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// void __tsan_func_exit(ThreadState *thr);
MOVQ $__tsan_func_exit(SB), AX
@@ -370,10 +358,6 @@ racecallatomic_data:
JAE racecallatomic_ignore
racecallatomic_ok:
// Addr is within the good range, call the atomic function.
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ 8(SP), RARG1 // caller pc
MOVQ (SP), RARG2 // pc
@@ -385,10 +369,6 @@ racecallatomic_ignore:
// An attempt to synchronize on the address would cause crash.
MOVQ AX, BX // remember the original function
MOVQ $__tsan_go_ignore_sync_begin(SB), AX
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
CALL racecall<>(SB)
MOVQ BX, AX // restore the original function
@@ -416,10 +396,6 @@ TEXT runtime·racecall(SB), NOSPLIT, $0-0
// Switches SP to g0 stack and calls (AX). Arguments already set.
TEXT racecall<>(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_m(R14), R13
// Switch to g0 stack.
MOVQ SP, R12 // callee-saved, preserved across the CALL
@@ -441,9 +417,7 @@ call:
// The overall effect of Go->C->Go call chain is similar to that of mcall.
// RARG0 contains command code. RARG1 contains command-specific context.
// See racecallback for command codes.
-// Defined as ABIInternal so as to avoid introducing a wrapper,
-// because its address is passed to C via funcPC.
-TEXT runtime·racecallbackthunk<ABIInternal>(SB), NOSPLIT, $0-0
+TEXT runtime·racecallbackthunk(SB), NOSPLIT, $0-0
// Handle command raceGetProcCmd (0) here.
// First, code below assumes that we are on curg, while raceGetProcCmd
// can be executed on g0. Second, it is called frequently, so will
diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s
index c6d5b91edc..2b2413b6b7 100644
--- a/src/runtime/race_arm64.s
+++ b/src/runtime/race_arm64.s
@@ -43,8 +43,14 @@
// func runtime·raceread(addr uintptr)
// Called from instrumented code.
-TEXT runtime·raceread(SB), NOSPLIT, $0-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
+#endif
MOVD LR, R2
// void __tsan_read(ThreadState *thr, void *addr, void *pc);
MOVD $__tsan_read(SB), R9
@@ -66,8 +72,14 @@ TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
// func runtime·racewrite(addr uintptr)
// Called from instrumented code.
-TEXT runtime·racewrite(SB), NOSPLIT, $0-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
+#endif
MOVD LR, R2
// void __tsan_write(ThreadState *thr, void *addr, void *pc);
MOVD $__tsan_write(SB), R9
@@ -89,9 +101,16 @@ TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
// func runtime·racereadrange(addr, size uintptr)
// Called from instrumented code.
-TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
MOVD size+8(FP), R2
+#endif
MOVD LR, R3
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVD $__tsan_read_range(SB), R9
@@ -114,9 +133,16 @@ TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
// func runtime·racewriterange(addr, size uintptr)
// Called from instrumented code.
-TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
MOVD size+8(FP), R2
+#endif
MOVD LR, R3
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVD $__tsan_write_range(SB), R9
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 0e0eb0b728..75c4818599 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -613,8 +613,8 @@ type p struct {
pcache pageCache
raceprocctx uintptr
- deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
- deferpoolbuf [5][32]*_defer
+ deferpool []*_defer // pool of available defer structs (see panic.go)
+ deferpoolbuf [32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
@@ -801,9 +801,9 @@ type schedt struct {
sudoglock mutex
sudogcache *sudog
- // Central pool of available defer structs of different sizes.
+ // Central pool of available defer structs.
deferlock mutex
- deferpool [5]*_defer
+ deferpool *_defer
// freem is the list of m's waiting to be freed when their
// m.exited is set. Linked through m.freelink.
@@ -940,24 +940,23 @@ func extendRandom(r []byte, n int) {
// A _defer holds an entry on the list of deferred calls.
// If you add a field here, add code to clear it in freedefer and deferProcStack
-// This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct
-// and cmd/compile/internal/gc/ssa.go:(*state).call.
+// This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
+// and cmd/compile/internal/ssagen/ssa.go:(*state).call.
// Some defers will be allocated on the stack and some on the heap.
// All defers are logically part of the stack, so write barriers to
// initialize them are not required. All defers must be manually scanned,
// and for heap defers, marked.
type _defer struct {
- siz int32 // includes both arguments and results
started bool
heap bool
// openDefer indicates that this _defer is for a frame with open-coded
// defers. We have only one defer record for the entire frame (which may
// currently have 0, 1, or more defers active).
openDefer bool
- sp uintptr // sp at time of defer
- pc uintptr // pc at time of defer
- fn *funcval // can be nil for open-coded defers
- _panic *_panic // panic that is running defer
+ sp uintptr // sp at time of defer
+ pc uintptr // pc at time of defer
+ fn func() // can be nil for open-coded defers
+ _panic *_panic // panic that is running defer
link *_defer
// If openDefer is true, the fields below record values about the stack
diff --git a/src/runtime/select.go b/src/runtime/select.go
index e72761bfa9..74f0c29194 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -7,6 +7,7 @@ package runtime
// This file contains the implementation of Go select statements.
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -22,8 +23,8 @@ type scase struct {
}
var (
- chansendpc = funcPC(chansend)
- chanrecvpc = funcPC(chanrecv)
+ chansendpc = abi.FuncPCABIInternal(chansend)
+ chanrecvpc = abi.FuncPCABIInternal(chanrecv)
)
func selectsetpc(pc *uintptr) {
diff --git a/src/runtime/signal_386.go b/src/runtime/signal_386.go
index 5824eaddb5..c77a9cc522 100644
--- a/src/runtime/signal_386.go
+++ b/src/runtime/signal_386.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -42,10 +43,10 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
sp := uintptr(c.esp())
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
- c.pushCall(funcPC(sigpanic), pc)
+ c.pushCall(abi.FuncPCABIInternal(sigpanic), pc)
} else {
// Not safe to push the call. Just clobber the frame.
- c.set_eip(uint32(funcPC(sigpanic)))
+ c.set_eip(uint32(abi.FuncPCABIInternal(sigpanic)))
}
}
diff --git a/src/runtime/signal_amd64.go b/src/runtime/signal_amd64.go
index e45fbb4a87..afcf4404fb 100644
--- a/src/runtime/signal_amd64.go
+++ b/src/runtime/signal_amd64.go
@@ -9,6 +9,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -70,10 +71,10 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// Go special registers. We inject sigpanic0 (instead of sigpanic),
// which takes care of that.
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
- c.pushCall(funcPC(sigpanic0), pc)
+ c.pushCall(abi.FuncPCABI0(sigpanic0), pc)
} else {
// Not safe to push the call. Just clobber the frame.
- c.set_rip(uint64(funcPC(sigpanic0)))
+ c.set_rip(uint64(abi.FuncPCABI0(sigpanic0)))
}
}
diff --git a/src/runtime/signal_arm.go b/src/runtime/signal_arm.go
index 4d9c6224a2..a0780788f8 100644
--- a/src/runtime/signal_arm.go
+++ b/src/runtime/signal_arm.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func dumpregs(c *sigctxt) {
print("trap ", hex(c.trap()), "\n")
@@ -61,7 +64,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r10(uint32(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint32(funcPC(sigpanic)))
+ c.set_pc(uint32(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_arm64.go b/src/runtime/signal_arm64.go
index f04750084f..9d4a8b8a99 100644
--- a/src/runtime/signal_arm64.go
+++ b/src/runtime/signal_arm64.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -77,7 +78,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r28(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_linux_s390x.go b/src/runtime/signal_linux_s390x.go
index 12d5c31593..03c58cbbb6 100644
--- a/src/runtime/signal_linux_s390x.go
+++ b/src/runtime/signal_linux_s390x.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -107,7 +108,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r0(0)
c.set_r13(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_mips64x.go b/src/runtime/signal_mips64x.go
index 1616b57027..eebcc74886 100644
--- a/src/runtime/signal_mips64x.go
+++ b/src/runtime/signal_mips64x.go
@@ -9,6 +9,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -80,7 +81,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
}
// In case we are panicking from external C code
- sigpanicPC := uint64(funcPC(sigpanic))
+ sigpanicPC := uint64(abi.FuncPCABIInternal(sigpanic))
c.set_r28(sigpanicPC >> 32 << 32) // RSB register
c.set_r30(uint64(uintptr(unsafe.Pointer(gp))))
c.set_pc(sigpanicPC)
diff --git a/src/runtime/signal_mipsx.go b/src/runtime/signal_mipsx.go
index dcc7f1e9dd..5067799bd6 100644
--- a/src/runtime/signal_mipsx.go
+++ b/src/runtime/signal_mipsx.go
@@ -9,6 +9,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -78,7 +79,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r30(uint32(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint32(funcPC(sigpanic)))
+ c.set_pc(uint32(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_ppc64x.go b/src/runtime/signal_ppc64x.go
index f2225da9a1..8a39d59957 100644
--- a/src/runtime/signal_ppc64x.go
+++ b/src/runtime/signal_ppc64x.go
@@ -9,6 +9,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -83,8 +84,8 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_r0(0)
c.set_r30(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_r12(uint64(funcPC(sigpanic)))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_r12(uint64(abi.FuncPCABIInternal(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_riscv64.go b/src/runtime/signal_riscv64.go
index e6b1b14130..aaaa217051 100644
--- a/src/runtime/signal_riscv64.go
+++ b/src/runtime/signal_riscv64.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -76,7 +77,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// In case we are panicking from external C code
c.set_gp(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index f2e526973d..0b3414d457 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -8,6 +8,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
@@ -143,7 +144,7 @@ func initsig(preinit bool) {
}
handlingSig[i] = 1
- setsig(i, funcPC(sighandler))
+ setsig(i, abi.FuncPCABIInternal(sighandler))
}
}
@@ -194,7 +195,7 @@ func sigenable(sig uint32) {
<-maskUpdatedChan
if atomic.Cas(&handlingSig[sig], 0, 1) {
atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
- setsig(sig, funcPC(sighandler))
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
}
}
}
@@ -271,7 +272,7 @@ func setProcessCPUProfiler(hz int32) {
// Enable the Go signal handler if not enabled.
if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
atomic.Storeuintptr(&fwdSig[_SIGPROF], getsig(_SIGPROF))
- setsig(_SIGPROF, funcPC(sighandler))
+ setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
}
var it itimerval
@@ -329,7 +330,7 @@ func doSigPreempt(gp *g, ctxt *sigctxt) {
if wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
// Adjust the PC and inject a call to asyncPreempt.
- ctxt.pushCall(funcPC(asyncPreempt), newpc)
+ ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
}
}
@@ -843,7 +844,7 @@ func raisebadsignal(sig uint32, c *sigctxt) {
// We may receive another instance of the signal before we
// restore the Go handler, but that is not so bad: we know
// that the Go program has been ignoring the signal.
- setsig(sig, funcPC(sighandler))
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
}
//go:nosplit
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index f2ce24d735..af15709a4a 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -27,15 +28,15 @@ func firstcontinuetramp()
func lastcontinuetramp()
func initExceptionHandler() {
- stdcall2(_AddVectoredExceptionHandler, 1, funcPC(exceptiontramp))
+ stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
if _AddVectoredContinueHandler == nil || GOARCH == "386" {
// use SetUnhandledExceptionFilter for windows-386 or
// if VectoredContinueHandler is unavailable.
// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
- stdcall1(_SetUnhandledExceptionFilter, funcPC(lastcontinuetramp))
+ stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
} else {
- stdcall2(_AddVectoredContinueHandler, 1, funcPC(firstcontinuetramp))
- stdcall2(_AddVectoredContinueHandler, 0, funcPC(lastcontinuetramp))
+ stdcall2(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
+ stdcall2(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
}
}
@@ -133,7 +134,7 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
// The exception is not from asyncPreempt, so not to push a
// sigpanic call to make it look like that. Instead, just
// overwrite the PC. (See issue #35773)
- if r.ip() != 0 && r.ip() != funcPC(asyncPreempt) {
+ if r.ip() != 0 && r.ip() != abi.FuncPCABI0(asyncPreempt) {
sp := unsafe.Pointer(r.sp())
delta := uintptr(sys.StackAlign)
sp = add(sp, -delta)
@@ -145,7 +146,7 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
*((*uintptr)(sp)) = r.ip()
}
}
- r.set_ip(funcPC(sigpanic0))
+ r.set_ip(abi.FuncPCABI0(sigpanic0))
return _EXCEPTION_CONTINUE_EXECUTION
}
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index f9d4154acf..7a470f09b6 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/math"
"runtime/internal/sys"
"unsafe"
@@ -68,7 +69,7 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(makeslicecopy)
+ pc := abi.FuncPCABIInternal(makeslicecopy)
racereadrangepc(from, copymem, callerpc, pc)
}
if msanenabled {
@@ -144,7 +145,7 @@ func panicunsafeslicelen() {
func growslice(et *_type, old slice, cap int) slice {
if raceenabled {
callerpc := getcallerpc()
- racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice))
+ racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
}
if msanenabled {
msanread(old.array, uintptr(old.len*int(et.size)))
@@ -280,7 +281,7 @@ func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen
size := uintptr(n) * width
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(slicecopy)
+ pc := abi.FuncPCABIInternal(slicecopy)
racereadrangepc(fromPtr, size, callerpc, pc)
racewriterangepc(toPtr, size, callerpc, pc)
}
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index b21c9c9518..b5545ac796 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -753,11 +753,6 @@ func adjustdefers(gp *g, adjinfo *adjustinfo) {
adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
}
-
- // Adjust defer argument blocks the same way we adjust active stack frames.
- // Note: this code is after the loop above, so that if a defer record is
- // stack allocated, we work on the copy in the new stack.
- tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
}
func adjustpanics(gp *g, adjinfo *adjustinfo) {
@@ -1112,7 +1107,7 @@ func gostartcallfn(gobuf *gobuf, fv *funcval) {
if fv != nil {
fn = unsafe.Pointer(fv.fn)
} else {
- fn = unsafe.Pointer(funcPC(nilfunc))
+ fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
}
gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
@@ -1318,11 +1313,11 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args
}
// stack objects.
- if GOARCH == "amd64" && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
+ if (GOARCH == "amd64" || GOARCH == "arm64") && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
// We don't actually use argmap in this case, but we need to fake the stack object
- // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset
- // on amd64.
+ // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset.
+ // This offset matches the assembly code on amd64 and arm64.
objs = methodValueCallFrameObjs
} else {
p := funcdata(f, _FUNCDATA_StackObjects)
diff --git a/src/runtime/string.go b/src/runtime/string.go
index d6030a1dca..3c215d3754 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"internal/bytealg"
"runtime/internal/sys"
"unsafe"
@@ -88,7 +89,7 @@ func slicebytetostring(buf *tmpBuf, ptr *byte, n int) (str string) {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
getcallerpc(),
- funcPC(slicebytetostring))
+ abi.FuncPCABIInternal(slicebytetostring))
}
if msanenabled {
msanread(unsafe.Pointer(ptr), uintptr(n))
@@ -152,7 +153,7 @@ func slicebytetostringtmp(ptr *byte, n int) (str string) {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
getcallerpc(),
- funcPC(slicebytetostringtmp))
+ abi.FuncPCABIInternal(slicebytetostringtmp))
}
if msanenabled && n > 0 {
msanread(unsafe.Pointer(ptr), uintptr(n))
@@ -203,7 +204,7 @@ func slicerunetostring(buf *tmpBuf, a []rune) string {
racereadrangepc(unsafe.Pointer(&a[0]),
uintptr(len(a))*unsafe.Sizeof(a[0]),
getcallerpc(),
- funcPC(slicerunetostring))
+ abi.FuncPCABIInternal(slicerunetostring))
}
if msanenabled && len(a) > 0 {
msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 16d7583202..b94acdea1f 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -177,7 +177,7 @@ func cgocallback(fn, frame, ctxt uintptr)
func gogo(buf *gobuf)
//go:noescape
-func jmpdefer(fv *funcval, argp uintptr)
+func jmpdefer(fv func(), argp uintptr)
func asminit()
func setg(gg *g)
func breakpoint()
diff --git a/src/runtime/stubs_arm64.go b/src/runtime/stubs_arm64.go
index f5e3bb4854..bd0533d158 100644
--- a/src/runtime/stubs_arm64.go
+++ b/src/runtime/stubs_arm64.go
@@ -14,3 +14,10 @@ func save_g()
func asmcgocall_no_g(fn, arg unsafe.Pointer)
func emptyfunc()
+
+// Used by reflectcall and the reflect package.
+//
+// Spills/loads arguments in registers to/from an internal/abi.RegArgs
+// respectively. Does not follow the Go ABI.
+func spillArgs()
+func unspillArgs()
diff --git a/src/runtime/sys_darwin_arm64.go b/src/runtime/sys_darwin_arm64.go
index 9c14f33a1c..7dabaca08d 100644
--- a/src/runtime/sys_darwin_arm64.go
+++ b/src/runtime/sys_darwin_arm64.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -14,14 +15,14 @@ import (
//go:nosplit
//go:cgo_unsafe_args
func g0_pthread_key_create(k *pthreadkey, destructor uintptr) int32 {
- return asmcgocall(unsafe.Pointer(funcPC(pthread_key_create_trampoline)), unsafe.Pointer(&k))
+ return asmcgocall(unsafe.Pointer(abi.FuncPCABI0(pthread_key_create_trampoline)), unsafe.Pointer(&k))
}
func pthread_key_create_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func g0_pthread_setspecific(k pthreadkey, value uintptr) int32 {
- return asmcgocall(unsafe.Pointer(funcPC(pthread_setspecific_trampoline)), unsafe.Pointer(&k))
+ return asmcgocall(unsafe.Pointer(abi.FuncPCABI0(pthread_setspecific_trampoline)), unsafe.Pointer(&k))
}
func pthread_setspecific_trampoline()
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index 33cc670b64..64ddc2354e 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -215,13 +215,7 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), BX // BX unchanged by C code.
-#else
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), BX // BX unchanged by C code.
-#endif
// Set vdsoPC and vdsoSP for SIGPROF traceback.
// Save the old values on stack and restore them on exit,
@@ -236,11 +230,7 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
MOVQ CX, m_vdsoPC(BX)
MOVQ DX, m_vdsoSP(BX)
-#ifdef GOEXPERIMENT_regabig
CMPQ R14, m_curg(BX) // Only switch if on curg.
-#else
- CMPQ AX, m_curg(BX) // Only switch if on curg.
-#endif
JNE noswitch
MOVQ m_g0(BX), DX
@@ -328,9 +318,8 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
POPQ BP
RET
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
// Called using C ABI.
-TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
@@ -348,8 +337,7 @@ TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
// Used instead of sigtramp in programs that use cgo.
// Arguments from kernel are in DI, SI, DX.
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·cgoSigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
// If no traceback function, do usual sigtramp.
MOVQ runtime·cgoTraceback(SB), AX
TESTQ AX, AX
@@ -392,12 +380,12 @@ TEXT runtime·cgoSigtramp<ABIInternal>(SB),NOSPLIT,$0
// The first three arguments, and the fifth, are already in registers.
// Set the two remaining arguments now.
MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigtramp<ABIInternal>(SB), R9
+ MOVQ $runtime·sigtramp(SB), R9
MOVQ _cgo_callers(SB), AX
JMP AX
sigtramp:
- JMP runtime·sigtramp<ABIInternal>(SB)
+ JMP runtime·sigtramp(SB)
sigtrampnog:
// Signal arrived on a non-Go thread. If this is SIGPROF, get a
@@ -428,8 +416,7 @@ sigtrampnog:
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/x86_64/sigaction.c
// The code that cares about the precise instructions used is:
// https://gcc.gnu.org/viewcvs/gcc/trunk/libgcc/config/i386/linux-unwind.h?revision=219188&view=markup
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·sigreturn<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigreturn(SB),NOSPLIT,$0
MOVQ $SYS_rt_sigreturn, AX
SYSCALL
INT $3 // not reached
diff --git a/src/runtime/sys_openbsd.go b/src/runtime/sys_openbsd.go
index ab3149558b..15888619b1 100644
--- a/src/runtime/sys_openbsd.go
+++ b/src/runtime/sys_openbsd.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// The *_trampoline functions convert from the Go calling convention to the C calling convention
// and then call the underlying libc function. These are defined in sys_openbsd_$ARCH.s.
@@ -15,35 +18,35 @@ import "unsafe"
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_init(attr *pthreadattr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_init_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_destroy(attr *pthreadattr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_destroy_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_destroy_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_destroy_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_getstacksize_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_setdetachstate_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_create_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_create_trampoline)), unsafe.Pointer(&attr))
}
func pthread_create_trampoline()
diff --git a/src/runtime/sys_openbsd1.go b/src/runtime/sys_openbsd1.go
index cb5d35879c..b4e9f54538 100644
--- a/src/runtime/sys_openbsd1.go
+++ b/src/runtime/sys_openbsd1.go
@@ -7,31 +7,34 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
//go:nosplit
//go:cgo_unsafe_args
func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 {
- return libcCall(unsafe.Pointer(funcPC(thrsleep_trampoline)), unsafe.Pointer(&ident))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrsleep_trampoline)), unsafe.Pointer(&ident))
}
func thrsleep_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func thrwakeup(ident uintptr, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(thrwakeup_trampoline)), unsafe.Pointer(&ident))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrwakeup_trampoline)), unsafe.Pointer(&ident))
}
func thrwakeup_trampoline()
//go:nosplit
func osyield() {
- libcCall(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
}
func sched_yield_trampoline()
//go:nosplit
func osyield_no_g() {
- asmcgocall_no_g(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
}
//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so"
diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go
index cd1a4e879f..190ee4716a 100644
--- a/src/runtime/sys_openbsd2.go
+++ b/src/runtime/sys_openbsd2.go
@@ -7,21 +7,24 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// This is exported via linkname to assembly in runtime/cgo.
//go:linkname exit
//go:nosplit
//go:cgo_unsafe_args
func exit(code int32) {
- libcCall(unsafe.Pointer(funcPC(exit_trampoline)), unsafe.Pointer(&code))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code))
}
func exit_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func getthrid() (tid int32) {
- libcCall(unsafe.Pointer(funcPC(getthrid_trampoline)), unsafe.Pointer(&tid))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(getthrid_trampoline)), unsafe.Pointer(&tid))
return
}
func getthrid_trampoline()
@@ -29,14 +32,14 @@ func getthrid_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func raiseproc(sig uint32) {
- libcCall(unsafe.Pointer(funcPC(raiseproc_trampoline)), unsafe.Pointer(&sig))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig))
}
func raiseproc_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func thrkill(tid int32, sig int) {
- libcCall(unsafe.Pointer(funcPC(thrkill_trampoline)), unsafe.Pointer(&tid))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(thrkill_trampoline)), unsafe.Pointer(&tid))
}
func thrkill_trampoline()
@@ -53,7 +56,7 @@ func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (un
ret1 unsafe.Pointer
ret2 int
}{addr, n, prot, flags, fd, off, nil, 0}
- libcCall(unsafe.Pointer(funcPC(mmap_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args))
return args.ret1, args.ret2
}
func mmap_trampoline()
@@ -61,42 +64,42 @@ func mmap_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func munmap(addr unsafe.Pointer, n uintptr) {
- libcCall(unsafe.Pointer(funcPC(munmap_trampoline)), unsafe.Pointer(&addr))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr))
}
func munmap_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
- libcCall(unsafe.Pointer(funcPC(madvise_trampoline)), unsafe.Pointer(&addr))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr))
}
func madvise_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func open(name *byte, mode, perm int32) (ret int32) {
- return libcCall(unsafe.Pointer(funcPC(open_trampoline)), unsafe.Pointer(&name))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name))
}
func open_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func closefd(fd int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(close_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd))
}
func close_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func read(fd int32, p unsafe.Pointer, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(read_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd))
}
func read_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(write_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd))
}
func write_trampoline()
@@ -110,7 +113,7 @@ func pipe2(flags int32) (r, w int32, errno int32) {
p unsafe.Pointer
flags int32
}{noescape(unsafe.Pointer(&p)), flags}
- errno = libcCall(unsafe.Pointer(funcPC(pipe2_trampoline)), unsafe.Pointer(&args))
+ errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe2_trampoline)), unsafe.Pointer(&args))
return p[0], p[1], errno
}
func pipe2_trampoline()
@@ -118,34 +121,34 @@ func pipe2_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func setitimer(mode int32, new, old *itimerval) {
- libcCall(unsafe.Pointer(funcPC(setitimer_trampoline)), unsafe.Pointer(&mode))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode))
}
func setitimer_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func usleep(usec uint32) {
- libcCall(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
}
func usleep_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func usleep_no_g(usec uint32) {
- asmcgocall_no_g(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
}
//go:nosplit
//go:cgo_unsafe_args
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 {
- return libcCall(unsafe.Pointer(funcPC(sysctl_trampoline)), unsafe.Pointer(&mib))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib))
}
func sysctl_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func fcntl(fd, cmd, arg int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(fcntl_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&fd))
}
func fcntl_trampoline()
@@ -156,7 +159,7 @@ func nanotime1() int64 {
clock_id int32
tp unsafe.Pointer
}{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)}
- libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args))
return ts.tv_sec*1e9 + int64(ts.tv_nsec)
}
func clock_gettime_trampoline()
@@ -168,42 +171,42 @@ func walltime() (int64, int32) {
clock_id int32
tp unsafe.Pointer
}{_CLOCK_REALTIME, unsafe.Pointer(&ts)}
- libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args))
return ts.tv_sec, int32(ts.tv_nsec)
}
//go:nosplit
//go:cgo_unsafe_args
func kqueue() int32 {
- return libcCall(unsafe.Pointer(funcPC(kqueue_trampoline)), nil)
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil)
}
func kqueue_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
- return libcCall(unsafe.Pointer(funcPC(kevent_trampoline)), unsafe.Pointer(&kq))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq))
}
func kevent_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigaction(sig uint32, new *sigactiont, old *sigactiont) {
- libcCall(unsafe.Pointer(funcPC(sigaction_trampoline)), unsafe.Pointer(&sig))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig))
}
func sigaction_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigprocmask(how uint32, new *sigset, old *sigset) {
- libcCall(unsafe.Pointer(funcPC(sigprocmask_trampoline)), unsafe.Pointer(&how))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how))
}
func sigprocmask_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigaltstack(new *stackt, old *stackt) {
- libcCall(unsafe.Pointer(funcPC(sigaltstack_trampoline)), unsafe.Pointer(&new))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new))
}
func sigaltstack_trampoline()
diff --git a/src/runtime/sys_openbsd3.go b/src/runtime/sys_openbsd3.go
index 8d77a4b216..a917ebde61 100644
--- a/src/runtime/sys_openbsd3.go
+++ b/src/runtime/sys_openbsd3.go
@@ -7,7 +7,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// The X versions of syscall expect the libc call to return a 64-bit result.
// Otherwise (the non-X version) expects a 32-bit result.
@@ -20,7 +23,7 @@ import "unsafe"
//go:cgo_unsafe_args
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -31,7 +34,7 @@ func syscall()
//go:cgo_unsafe_args
func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscallX)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -42,7 +45,7 @@ func syscallX()
//go:cgo_unsafe_args
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -53,7 +56,7 @@ func syscall6()
//go:cgo_unsafe_args
func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -64,7 +67,7 @@ func syscall6X()
//go:cgo_unsafe_args
func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall10)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -75,7 +78,7 @@ func syscall10()
//go:cgo_unsafe_args
func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
exitsyscall()
return
}
@@ -85,7 +88,7 @@ func syscall10X()
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
return
}
@@ -93,7 +96,7 @@ func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
return
}
@@ -101,7 +104,7 @@ func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintpt
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
return
}
@@ -109,6 +112,6 @@ func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintp
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
return
}
diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s
index 522e98cf4f..fc89ee6cbb 100644
--- a/src/runtime/sys_openbsd_amd64.s
+++ b/src/runtime/sys_openbsd_amd64.s
@@ -58,7 +58,7 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
RET
// Called using C ABI.
-TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
diff --git a/src/runtime/sys_plan9_386.s b/src/runtime/sys_plan9_386.s
index b3d2f1376d..bdcb98e19e 100644
--- a/src/runtime/sys_plan9_386.s
+++ b/src/runtime/sys_plan9_386.s
@@ -250,3 +250,7 @@ TEXT runtime·errstr(SB),NOSPLIT,$8-8
MOVL 0(SP), AX
MOVL AX, ret_base+0(FP)
RET
+
+// never called on this platform
+TEXT ·sigpanictramp(SB),NOSPLIT,$0-0
+ UNDEF
diff --git a/src/runtime/sys_plan9_amd64.s b/src/runtime/sys_plan9_amd64.s
index 731306ab44..39fc4c68e4 100644
--- a/src/runtime/sys_plan9_amd64.s
+++ b/src/runtime/sys_plan9_amd64.s
@@ -251,3 +251,7 @@ TEXT runtime·errstr(SB),NOSPLIT,$16-16
MOVQ 0(SP), AX
MOVQ AX, ret_base+0(FP)
RET
+
+// never called on this platform
+TEXT ·sigpanictramp(SB),NOSPLIT,$0-0
+ UNDEF
diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s
index 0b3933502a..cf3a439523 100644
--- a/src/runtime/sys_windows_386.s
+++ b/src/runtime/sys_windows_386.s
@@ -8,7 +8,7 @@
#include "time_windows.h"
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT,$0
MOVL fn+0(FP), BX
// SetLastError(0).
@@ -147,21 +147,21 @@ done:
BYTE $0xC2; WORD $4
RET // unreached; make assembler happy
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT,$0
MOVL $runtime·exceptionhandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT,$0-0
// is never called
INT $3
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT,$0-0
MOVL $runtime·lastcontinuehandler(SB), AX
JMP sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·callbackasm1(SB),NOSPLIT,$0
MOVL 0(SP), AX // will use to find our callback context
// remove return address from stack, we are not returning to callbackasm, but to its caller.
@@ -180,7 +180,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$0
CLD
// determine index into runtime·cbs table
- SUBL $runtime·callbackasm<ABIInternal>(SB), AX
+ SUBL $runtime·callbackasm(SB), AX
MOVL $0, DX
MOVL $5, BX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
DIVL BX
@@ -250,7 +250,7 @@ TEXT tstart<>(SB),NOSPLIT,$0
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
MOVL newm+0(FP), BX
PUSHL BX
diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s
index e7782846b2..6cc5bba2b7 100644
--- a/src/runtime/sys_windows_amd64.s
+++ b/src/runtime/sys_windows_amd64.s
@@ -13,7 +13,7 @@
#define maxargs 18
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
// asmcgocall will put first argument into CX.
PUSHQ CX // save for later
MOVQ libcall_fn(CX), AX
@@ -179,15 +179,15 @@ done:
RET
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
MOVQ $runtime·exceptionhandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
MOVQ $runtime·firstcontinuehandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
MOVQ $runtime·lastcontinuehandler(SB), AX
JMP sigtramp<>(SB)
@@ -212,7 +212,7 @@ TEXT runtime·callbackasm1(SB),NOSPLIT,$0
ADDQ $8, SP
// determine index into runtime·cbs table
- MOVQ $runtime·callbackasm<ABIInternal>(SB), DX
+ MOVQ $runtime·callbackasm(SB), DX
SUBQ DX, AX
MOVQ $0, DX
MOVQ $5, CX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
@@ -245,7 +245,7 @@ TEXT runtime·callbackasm1(SB),NOSPLIT,$0
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
// Switch from the host ABI to the Go ABI.
PUSH_REGS_HOST_TO_ABI0()
diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s
index 48f8c7dedf..c9e96cb652 100644
--- a/src/runtime/sys_windows_arm.s
+++ b/src/runtime/sys_windows_arm.s
@@ -10,7 +10,7 @@
// Note: For system ABI, R0-R3 are args, R4-R11 are callee-save.
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr}
MOVW R0, R4 // put libcall * in r4
MOVW R13, R5 // save stack pointer in r5
@@ -222,21 +222,21 @@ TEXT sigresume<>(SB),NOSPLIT|NOFRAME,$0
MOVW R0, R13
B (R1)
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·exceptionhandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·firstcontinuehandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·lastcontinuehandler(SB), R1
B sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm1(SB),NOSPLIT|NOFRAME,$0
// On entry, the trampoline in zcallback_windows_arm.s left
// the callback index in R12 (which is volatile in the C ABI).
@@ -275,7 +275,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
B (R12) // return
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT|NOFRAME,$0
MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr}
MOVW m_g0(R0), g
diff --git a/src/runtime/sys_windows_arm64.s b/src/runtime/sys_windows_arm64.s
index 7a2e11f5ae..44145c53fb 100644
--- a/src/runtime/sys_windows_arm64.s
+++ b/src/runtime/sys_windows_arm64.s
@@ -18,7 +18,7 @@
// load_g and save_g (in tls_arm64.s) clobber R27 (REGTMP) and R0.
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
STP.W (R29, R30), -32(RSP) // allocate C ABI stack frame
STP (R19, R20), 16(RSP) // save old R19, R20
MOVD R0, R19 // save libcall pointer
@@ -290,21 +290,21 @@ TEXT sigresume<>(SB),NOSPLIT|NOFRAME,$0
MOVD R0, RSP
B (R1)
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·exceptionhandler<ABIInternal>(SB), R1
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·exceptionhandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·firstcontinuehandler<ABIInternal>(SB), R1
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·firstcontinuehandler(SB), R1
B sigtramp<>(SB)
TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·lastcontinuehandler<ABIInternal>(SB), R1
+ MOVD $runtime·lastcontinuehandler(SB), R1
B sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$208-0
+TEXT runtime·callbackasm1(SB),NOSPLIT,$208-0
NO_LOCAL_POINTERS
// On entry, the trampoline in zcallback_windows_arm64.s left
@@ -339,7 +339,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$208-0
MOVD R0, callbackArgs_result(R13) // result
// Call cgocallback, which will call callbackWrap(frame).
- MOVD $·callbackWrap(SB), R0 // PC of function to call
+ MOVD $·callbackWrap<ABIInternal>(SB), R0 // PC of function to call, cgocallback takes an ABIInternal entry-point
MOVD R13, R1 // frame (&callbackArgs{...})
MOVD $0, R2 // context
MOVD R0, (1*8)(RSP)
@@ -356,7 +356,7 @@ TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$208-0
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$96-0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$96-0
SAVE_R19_TO_R28(-10*8)
MOVD m_g0(R0), g
diff --git a/src/runtime/syscall_solaris.go b/src/runtime/syscall_solaris.go
index 094516927f..15be8e1c61 100644
--- a/src/runtime/syscall_solaris.go
+++ b/src/runtime/syscall_solaris.go
@@ -35,6 +35,7 @@ func pipe1() // declared for vet; do NOT call
//go:nosplit
//go:linkname syscall_sysvicall6
+//go:cgo_unsafe_args
func syscall_sysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: fn,
@@ -49,6 +50,7 @@ func syscall_sysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err
//go:nosplit
//go:linkname syscall_rawsysvicall6
+//go:cgo_unsafe_args
func syscall_rawsysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: fn,
@@ -104,6 +106,7 @@ func syscall_dup2(oldfd, newfd uintptr) (val, err uintptr) {
//go:nosplit
//go:linkname syscall_execve
+//go:cgo_unsafe_args
func syscall_execve(path, argv, envp uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_execve)),
@@ -123,6 +126,7 @@ func syscall_exit(code uintptr) {
//go:nosplit
//go:linkname syscall_fcntl
+//go:cgo_unsafe_args
func syscall_fcntl(fd, cmd, arg uintptr) (val, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_fcntl)),
@@ -181,6 +185,7 @@ func syscall_getpid() (pid, err uintptr) {
//go:nosplit
//go:linkname syscall_ioctl
+//go:cgo_unsafe_args
func syscall_ioctl(fd, req, arg uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_ioctl)),
@@ -234,6 +239,7 @@ func syscall_setgid(gid uintptr) (err uintptr) {
//go:nosplit
//go:linkname syscall_setgroups
+//go:cgo_unsafe_args
func syscall_setgroups(ngid, gid uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_setgroups)),
@@ -270,6 +276,7 @@ func syscall_setuid(uid uintptr) (err uintptr) {
//go:nosplit
//go:linkname syscall_setpgid
+//go:cgo_unsafe_args
func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_setpgid)),
@@ -281,6 +288,7 @@ func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
}
//go:linkname syscall_syscall
+//go:cgo_unsafe_args
func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_syscall)),
@@ -294,6 +302,7 @@ func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
}
//go:linkname syscall_wait4
+//go:cgo_unsafe_args
func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.Pointer) (wpid int, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_wait4)),
@@ -308,6 +317,7 @@ func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.
//go:nosplit
//go:linkname syscall_write
+//go:cgo_unsafe_args
func syscall_write(fd, buf, nbyte uintptr) (n, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_write)),
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index 4763a440e7..4215d62cc1 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -232,7 +232,7 @@ func callbackasmAddr(i int) uintptr {
// followed by a branch instruction
entrySize = 8
}
- return funcPC(callbackasm) + uintptr(i*entrySize)
+ return abi.FuncPCABI0(callbackasm) + uintptr(i*entrySize)
}
const callbackMaxFrame = 64 * sys.PtrSize
diff --git a/src/runtime/time.go b/src/runtime/time.go
index dee6a674e4..90e9b1139f 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -7,6 +7,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -856,7 +857,7 @@ func runOneTimer(pp *p, t *timer, now int64) {
if raceenabled {
ppcur := getg().m.p.ptr()
if ppcur.timerRaceCtx == 0 {
- ppcur.timerRaceCtx = racegostart(funcPC(runtimer) + sys.PCQuantum)
+ ppcur.timerRaceCtx = racegostart(abi.FuncPCABIInternal(runtimer) + sys.PCQuantum)
}
raceacquirectx(ppcur.timerRaceCtx, unsafe.Pointer(t))
}
diff --git a/src/runtime/time_linux_amd64.s b/src/runtime/time_linux_amd64.s
index 0dd7919896..c88e92bd0c 100644
--- a/src/runtime/time_linux_amd64.s
+++ b/src/runtime/time_linux_amd64.s
@@ -15,13 +15,7 @@
TEXT time·now(SB),NOSPLIT,$16-24
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), BX // BX unchanged by C code.
-#else
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), BX // BX unchanged by C code.
-#endif
// Store CLOCK_REALTIME results directly to return space.
LEAQ sec+0(FP), SI
@@ -38,11 +32,7 @@ TEXT time·now(SB),NOSPLIT,$16-24
MOVQ CX, m_vdsoPC(BX)
MOVQ SI, m_vdsoSP(BX)
-#ifdef GOEXPERIMENT_regabig
CMPQ R14, m_curg(BX) // Only switch if on curg.
-#else
- CMPQ AX, m_curg(BX) // Only switch if on curg.
-#endif
JNE noswitch
MOVQ m_g0(BX), DX
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 814c323634..fa41fdfe2d 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -21,41 +21,6 @@ import (
const usesLR = sys.MinFrameSize > 0
-// Traceback over the deferred function calls.
-// Report them like calls that have been invoked but not started executing yet.
-func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) {
- var frame stkframe
- for d := gp._defer; d != nil; d = d.link {
- fn := d.fn
- if fn == nil {
- // Defer of nil function. Args don't matter.
- frame.pc = 0
- frame.fn = funcInfo{}
- frame.argp = 0
- frame.arglen = 0
- frame.argmap = nil
- } else {
- frame.pc = fn.fn
- f := findfunc(frame.pc)
- if !f.valid() {
- print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
- throw("unknown pc")
- }
- frame.fn = f
- frame.argp = uintptr(deferArgs(d))
- var ok bool
- frame.arglen, frame.argmap, ok = getArgInfoFast(f, true)
- if !ok {
- frame.arglen, frame.argmap = getArgInfo(&frame, f, true, fn)
- }
- }
- frame.continpc = frame.pc
- if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
- return
- }
- }
-}
-
// Generic traceback. Handles runtime stack prints (pcbuf == nil),
// the runtime.Callers function (pcbuf != nil), as well as the garbage
// collector (callback != nil). A little clunky to merge these, but avoids
diff --git a/src/runtime/type.go b/src/runtime/type.go
index 335fc57f4b..52e65a3bd2 100644
--- a/src/runtime/type.go
+++ b/src/runtime/type.go
@@ -6,7 +6,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// tflag is documented in reflect/type.go.
//
@@ -262,7 +265,7 @@ func (t *_type) textOff(off textOff) unsafe.Pointer {
if off == -1 {
// -1 is the sentinel value for unreachable code.
// See cmd/link/internal/ld/data.go:relocsym.
- return unsafe.Pointer(funcPC(unreachableMethod))
+ return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
}
base := uintptr(unsafe.Pointer(t))
var md *moduledata
diff --git a/src/runtime/wincallback.go b/src/runtime/wincallback.go
index a7a787d8f6..73f1e567ce 100644
--- a/src/runtime/wincallback.go
+++ b/src/runtime/wincallback.go
@@ -33,7 +33,7 @@ func genasm386Amd64() {
// CALL instruction in runtime·callbackasm. This determines
// which Go callback function is executed later on.
-TEXT runtime·callbackasm<ABIInternal>(SB),7,$0
+TEXT runtime·callbackasm(SB),7,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString("\tCALL\truntime·callbackasm1(SB)\n")
@@ -61,7 +61,7 @@ func genasmArm() {
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString(fmt.Sprintf("\tMOVW\t$%d, R12\n", i))
@@ -89,7 +89,7 @@ func genasmArm64() {
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString(fmt.Sprintf("\tMOVD\t$%d, R12\n", i))
diff --git a/src/runtime/zcallback_windows.s b/src/runtime/zcallback_windows.s
index e451c2b9d0..561527c90d 100644
--- a/src/runtime/zcallback_windows.s
+++ b/src/runtime/zcallback_windows.s
@@ -11,7 +11,7 @@
// CALL instruction in runtime·callbackasm. This determines
// which Go callback function is executed later on.
-TEXT runtime·callbackasm<ABIInternal>(SB),7,$0
+TEXT runtime·callbackasm(SB),7,$0
CALL runtime·callbackasm1(SB)
CALL runtime·callbackasm1(SB)
CALL runtime·callbackasm1(SB)
diff --git a/src/runtime/zcallback_windows_arm.s b/src/runtime/zcallback_windows_arm.s
index a73a813acb..f943d84cbf 100644
--- a/src/runtime/zcallback_windows_arm.s
+++ b/src/runtime/zcallback_windows_arm.s
@@ -9,7 +9,7 @@
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
MOVW $0, R12
B runtime·callbackasm1(SB)
MOVW $1, R12
diff --git a/src/runtime/zcallback_windows_arm64.s b/src/runtime/zcallback_windows_arm64.s
index 2a6bda0990..69fb05788c 100644
--- a/src/runtime/zcallback_windows_arm64.s
+++ b/src/runtime/zcallback_windows_arm64.s
@@ -9,7 +9,7 @@
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
MOVD $0, R12
B runtime·callbackasm1(SB)
MOVD $1, R12
diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go
index a27a17f6e1..eb0f338036 100644
--- a/test/codegen/arithmetic.go
+++ b/test/codegen/arithmetic.go
@@ -202,7 +202,7 @@ func ConstDivs(n1 uint, n2 int) (uint, int) {
// amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ"
// 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL"
- // arm64:`MOVD`,`SMULH`,-`DIV`
+ // arm64:`SMULH`,-`DIV`
// arm:`MOVW`,`MUL`,-`.*udiv`
b := n2 / 17 // signed
@@ -266,7 +266,7 @@ func ConstMods(n1 uint, n2 int) (uint, int) {
// amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ"
// 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL"
- // arm64:`MOVD`,`SMULH`,-`DIV`
+ // arm64:`SMULH`,-`DIV`
// arm:`MOVW`,`MUL`,-`.*udiv`
b := n2 % 17 // signed
diff --git a/test/codegen/clobberdead.go b/test/codegen/clobberdead.go
index f8d964cba6..c490790bb6 100644
--- a/test/codegen/clobberdead.go
+++ b/test/codegen/clobberdead.go
@@ -1,6 +1,6 @@
// asmcheck -gcflags=-clobberdead
-// +build amd64
+// +build amd64 arm64
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
@@ -13,15 +13,18 @@ type T [2]*int // contain pointer, not SSA-able (so locals are not registerized)
var p1, p2, p3 T
func F() {
- // 3735936685 is 0xdeaddead
+ // 3735936685 is 0xdeaddead. On ARM64 R27 is REGTMP.
// clobber x, y at entry. not clobber z (stack object).
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`, -`MOVL\t\$3735936685, ""\.z`
+ // arm64:`MOVW\tR27, ""\.x`, `MOVW\tR27, ""\.y`, -`MOVW\tR27, ""\.z`
x, y, z := p1, p2, p3
addrTaken(&z)
// x is dead at the call (the value of x is loaded before the CALL), y is not
// amd64:`MOVL\t\$3735936685, ""\.x`, -`MOVL\t\$3735936685, ""\.y`
+ // arm64:`MOVW\tR27, ""\.x`, -`MOVW\tR27, ""\.y`
use(x)
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`
+ // arm64:`MOVW\tR27, ""\.x`, `MOVW\tR27, ""\.y`
use(y)
}
diff --git a/test/complit1.go b/test/complit1.go
index 7c2a4e2996..8cbcd63ee0 100644
--- a/test/complit1.go
+++ b/test/complit1.go
@@ -46,20 +46,20 @@ var (
_ = &T{0, 0, "", nil} // ok
_ = &T{i: 0, f: 0, s: "", next: {}} // ERROR "missing type in composite literal|omit types within composite literal"
_ = &T{0, 0, "", {}} // ERROR "missing type in composite literal|omit types within composite literal"
- _ = TP{i: 0, f: 0, s: "", next: {}} // ERROR "invalid composite literal type TP|omit types within composite literal"
+ _ = TP{i: 0, f: 0, s: ""} // ERROR "invalid composite literal type TP"
_ = &Ti{} // ERROR "invalid composite literal type Ti|expected.*type for composite literal"
)
type M map[T]T
var (
- _ = M{{i:1}: {i:2}}
- _ = M{T{i:1}: {i:2}}
- _ = M{{i:1}: T{i:2}}
- _ = M{T{i:1}: T{i:2}}
+ _ = M{{i: 1}: {i: 2}}
+ _ = M{T{i: 1}: {i: 2}}
+ _ = M{{i: 1}: T{i: 2}}
+ _ = M{T{i: 1}: T{i: 2}}
)
-type S struct { s [1]*M1 }
+type S struct{ s [1]*M1 }
type M1 map[S]int
-var _ = M1{{s:[1]*M1{&M1{{}:1}}}:2}
+var _ = M1{{s: [1]*M1{&M1{{}: 1}}}: 2}
diff --git a/test/ddd1.go b/test/ddd1.go
index ad49b347f4..f7381b7c94 100644
--- a/test/ddd1.go
+++ b/test/ddd1.go
@@ -17,8 +17,8 @@ var (
_ = sum(1, 2, 3)
_ = sum()
_ = sum(1.0, 2.0)
- _ = sum(1.5) // ERROR "integer"
- _ = sum("hello") // ERROR ".hello. .type untyped string. as type int|incompatible"
+ _ = sum(1.5) // ERROR "1\.5 .untyped float constant. as int|integer"
+ _ = sum("hello") // ERROR ".hello. (.untyped string constant. as int|.type untyped string. as type int)|incompatible"
_ = sum([]int{1}) // ERROR "\[\]int{...}.*as type int|incompatible"
)
@@ -27,9 +27,9 @@ func tuple() (int, int, int) { return 1, 2, 3 }
var (
_ = sum(tuple())
- _ = sum(tuple()...) // ERROR "multiple-value"
+ _ = sum(tuple()...) // ERROR "\.{3} with 3-valued|multiple-value"
_ = sum3(tuple())
- _ = sum3(tuple()...) // ERROR "multiple-value" ERROR "invalid use of .*[.][.][.]"
+ _ = sum3(tuple()...) // ERROR "\.{3} in call to non-variadic|multiple-value|invalid use of .*[.][.][.]"
)
type T []T
@@ -60,5 +60,5 @@ func bad(args ...int) {
_ = [...]byte("foo") // ERROR "[.][.][.]"
_ = [...][...]int{{1,2,3},{4,5,6}} // ERROR "[.][.][.]"
- Foo(x...) // ERROR "invalid use of .*[.][.][.]"
+ Foo(x...) // ERROR "\.{3} in call to non-variadic|invalid use of .*[.][.][.]"
}
diff --git a/test/escape2.go b/test/escape2.go
index b9b723d866..04ab635aa5 100644
--- a/test/escape2.go
+++ b/test/escape2.go
@@ -59,7 +59,7 @@ func foo8(xx, yy *int) int { // ERROR "xx does not escape$" "yy does not escape$
return *xx
}
-func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r2 level=0$" "leaking param: yy to result ~r2 level=0$"
+func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r0 level=0$" "leaking param: yy to result ~r0 level=0$"
xx = yy
return xx
}
@@ -343,11 +343,11 @@ func indaddr1(x int) *int { // ERROR "moved to heap: x$"
return &x
}
-func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *&x
}
-func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *(**int)(unsafe.Pointer(&x))
}
@@ -374,11 +374,11 @@ func float64bitsptr(f float64) *uint64 { // ERROR "moved to heap: f$"
return (*uint64)(unsafe.Pointer(&f))
}
-func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r1 level=0$"
+func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(f))
}
-func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch val := i.(type) {
case *int:
return val
@@ -389,7 +389,7 @@ func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level
return nil
}
-func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch j := i; *j + 110 {
case 12:
return j
@@ -401,7 +401,7 @@ func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
}
// assigning to an array element is like assigning to the array
-func foo60(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo60(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
var a [12]*int
a[0] = i
return a[1]
@@ -414,7 +414,7 @@ func foo60a(i *int) *int { // ERROR "i does not escape$"
}
// assigning to a struct field is like assigning to the struct
-func foo61(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo61(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
type S struct {
a, b *int
}
@@ -611,11 +611,11 @@ func foo74c() {
}
}
-func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r2 level=0$" "x does not escape$"
+func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r0 level=0$" "x does not escape$"
return y
}
-func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r2 level=0$" "y does not escape$"
+func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r0 level=0$" "y does not escape$"
return &x[0]
}
@@ -770,7 +770,7 @@ func foo91(x *int) map[*int]*int { // ERROR "leaking param: x$"
return map[*int]*int{x: nil} // ERROR "map\[\*int\]\*int{...} escapes to heap$"
}
-func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r0 level=0$"
return [2]*int{x, nil}
}
@@ -783,7 +783,7 @@ func foo93(c chan *int) *int { // ERROR "c does not escape$"
}
// does not leak m
-func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r2 level=1"
+func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r0 level=1"
for k, v := range m {
if b {
return k
@@ -799,12 +799,12 @@ func foo95(m map[*int]*int, x *int) { // ERROR "m does not escape$" "leaking par
}
// does not leak m but does leak content
-func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
return m[0]
}
// does leak m
-func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[0]
}
@@ -814,12 +814,12 @@ func foo98(m map[int]*int) *int { // ERROR "m does not escape$"
}
// does leak m
-func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[:]
}
// does not leak m
-func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
for _, v := range m {
return v
}
@@ -827,7 +827,7 @@ func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
}
// does leak m
-func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
for _, v := range m {
return v
}
@@ -890,27 +890,27 @@ func foo110(x *int) *int { // ERROR "leaking param: x$"
return m[nil]
}
-func foo111(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0"
+func foo111(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0"
m := []*int{x} // ERROR "\[\]\*int{...} does not escape$"
return m[0]
}
-func foo112(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo112(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := [1]*int{x}
return m[0]
}
-func foo113(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo113(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := Bar{ii: x}
return m.ii
}
-func foo114(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo114(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := &Bar{ii: x} // ERROR "&Bar{...} does not escape$"
return m.ii
}
-func foo115(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo115(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*int)(unsafe.Pointer(uintptr(unsafe.Pointer(x)) + 1))
}
diff --git a/test/escape2n.go b/test/escape2n.go
index 7c8208aa73..01a25795f4 100644
--- a/test/escape2n.go
+++ b/test/escape2n.go
@@ -59,7 +59,7 @@ func foo8(xx, yy *int) int { // ERROR "xx does not escape$" "yy does not escape$
return *xx
}
-func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r2 level=0$" "leaking param: yy to result ~r2 level=0$"
+func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r0 level=0$" "leaking param: yy to result ~r0 level=0$"
xx = yy
return xx
}
@@ -343,11 +343,11 @@ func indaddr1(x int) *int { // ERROR "moved to heap: x$"
return &x
}
-func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *&x
}
-func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *(**int)(unsafe.Pointer(&x))
}
@@ -374,11 +374,11 @@ func float64bitsptr(f float64) *uint64 { // ERROR "moved to heap: f$"
return (*uint64)(unsafe.Pointer(&f))
}
-func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r1 level=0$"
+func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(f))
}
-func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch val := i.(type) {
case *int:
return val
@@ -389,7 +389,7 @@ func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level
return nil
}
-func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch j := i; *j + 110 {
case 12:
return j
@@ -401,7 +401,7 @@ func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
}
// assigning to an array element is like assigning to the array
-func foo60(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo60(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
var a [12]*int
a[0] = i
return a[1]
@@ -414,7 +414,7 @@ func foo60a(i *int) *int { // ERROR "i does not escape$"
}
// assigning to a struct field is like assigning to the struct
-func foo61(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo61(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
type S struct {
a, b *int
}
@@ -611,11 +611,11 @@ func foo74c() {
}
}
-func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r2 level=0$" "x does not escape$"
+func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r0 level=0$" "x does not escape$"
return y
}
-func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r2 level=0$" "y does not escape$"
+func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r0 level=0$" "y does not escape$"
return &x[0]
}
@@ -770,7 +770,7 @@ func foo91(x *int) map[*int]*int { // ERROR "leaking param: x$"
return map[*int]*int{x: nil} // ERROR "map\[\*int\]\*int{...} escapes to heap$"
}
-func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r0 level=0$"
return [2]*int{x, nil}
}
@@ -783,7 +783,7 @@ func foo93(c chan *int) *int { // ERROR "c does not escape$"
}
// does not leak m
-func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r2 level=1"
+func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r0 level=1"
for k, v := range m {
if b {
return k
@@ -799,12 +799,12 @@ func foo95(m map[*int]*int, x *int) { // ERROR "m does not escape$" "leaking par
}
// does not leak m but does leak content
-func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
return m[0]
}
// does leak m
-func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[0]
}
@@ -814,12 +814,12 @@ func foo98(m map[int]*int) *int { // ERROR "m does not escape$"
}
// does leak m
-func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[:]
}
// does not leak m
-func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
for _, v := range m {
return v
}
@@ -827,7 +827,7 @@ func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
}
// does leak m
-func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
for _, v := range m {
return v
}
@@ -890,27 +890,27 @@ func foo110(x *int) *int { // ERROR "leaking param: x$"
return m[nil]
}
-func foo111(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0"
+func foo111(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0"
m := []*int{x} // ERROR "\[\]\*int{...} does not escape$"
return m[0]
}
-func foo112(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo112(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := [1]*int{x}
return m[0]
}
-func foo113(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo113(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := Bar{ii: x}
return m.ii
}
-func foo114(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo114(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := &Bar{ii: x} // ERROR "&Bar{...} does not escape$"
return m.ii
}
-func foo115(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo115(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*int)(unsafe.Pointer(uintptr(unsafe.Pointer(x)) + 1))
}
diff --git a/test/escape5.go b/test/escape5.go
index 82be2c38e7..73acfb46a9 100644
--- a/test/escape5.go
+++ b/test/escape5.go
@@ -22,19 +22,19 @@ func leaktoret(p *int) *int { // ERROR "leaking param: p to result"
return p
}
-func leaktoret2(p *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: p to result ~r2"
+func leaktoret2(p *int) (*int, *int) { // ERROR "leaking param: p to result ~r0" "leaking param: p to result ~r1"
return p, p
}
-func leaktoret22(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r2" "leaking param: q to result ~r3"
+func leaktoret22(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r0" "leaking param: q to result ~r1"
return p, q
}
-func leaktoret22b(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r3" "leaking param: q to result ~r2"
+func leaktoret22b(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: q to result ~r0"
return leaktoret22(q, p)
}
-func leaktoret22c(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r3" "leaking param: q to result ~r2"
+func leaktoret22c(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: q to result ~r0"
r, s := leaktoret22(q, p)
return r, s
}
diff --git a/test/escape_array.go b/test/escape_array.go
index 0d07fd861f..83062c9436 100644
--- a/test/escape_array.go
+++ b/test/escape_array.go
@@ -12,15 +12,15 @@ var Ssink *string
type U [2]*string
-func bar(a, b *string) U { // ERROR "leaking param: a to result ~r2 level=0$" "leaking param: b to result ~r2 level=0$"
+func bar(a, b *string) U { // ERROR "leaking param: a to result ~r0 level=0$" "leaking param: b to result ~r0 level=0$"
return U{a, b}
}
-func foo(x U) U { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo(x U) U { // ERROR "leaking param: x to result ~r0 level=0$"
return U{x[1], x[0]}
}
-func bff(a, b *string) U { // ERROR "leaking param: a to result ~r2 level=0$" "leaking param: b to result ~r2 level=0$"
+func bff(a, b *string) U { // ERROR "leaking param: a to result ~r0 level=0$" "leaking param: b to result ~r0 level=0$"
return foo(foo(bar(a, b)))
}
@@ -41,27 +41,27 @@ func tbff2() *string {
return u[1]
}
-func car(x U) *string { // ERROR "leaking param: x to result ~r1 level=0$"
+func car(x U) *string { // ERROR "leaking param: x to result ~r0 level=0$"
return x[0]
}
// BAD: need fine-grained analysis to track x[0] and x[1] differently.
-func fun(x U, y *string) *string { // ERROR "leaking param: x to result ~r2 level=0$" "leaking param: y to result ~r2 level=0$"
+func fun(x U, y *string) *string { // ERROR "leaking param: x to result ~r0 level=0$" "leaking param: y to result ~r0 level=0$"
x[0] = y
return x[1]
}
-func fup(x *U, y *string) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param: y$"
+func fup(x *U, y *string) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param: y$"
x[0] = y // leaking y to heap is intended
return x[1]
}
-func fum(x *U, y **string) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param content: y$"
+func fum(x *U, y **string) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param content: y$"
x[0] = *y
return x[1]
}
-func fuo(x *U, y *U) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param content: y$"
+func fuo(x *U, y *U) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param content: y$"
x[0] = y[0]
return x[1]
}
diff --git a/test/escape_calls.go b/test/escape_calls.go
index 9e1db5426e..aa7c7f516c 100644
--- a/test/escape_calls.go
+++ b/test/escape_calls.go
@@ -11,7 +11,7 @@
package foo
-func f(buf []byte) []byte { // ERROR "leaking param: buf to result ~r1 level=0$"
+func f(buf []byte) []byte { // ERROR "leaking param: buf to result ~r0 level=0$"
return buf
}
diff --git a/test/escape_closure.go b/test/escape_closure.go
index 9152319fe0..bd6c025476 100644
--- a/test/escape_closure.go
+++ b/test/escape_closure.go
@@ -44,7 +44,7 @@ func ClosureCallArgs3() {
func ClosureCallArgs4() {
x := 0
- _ = func(p *int) *int { // ERROR "leaking param: p to result ~r1" "func literal does not escape"
+ _ = func(p *int) *int { // ERROR "leaking param: p to result ~r0" "func literal does not escape"
return p
}(&x)
}
@@ -111,7 +111,7 @@ func ClosureCallArgs11() {
func ClosureCallArgs12() {
x := 0
- defer func(p *int) *int { // ERROR "leaking param: p to result ~r1" "func literal does not escape"
+ defer func(p *int) *int { // ERROR "leaking param: p to result ~r0" "func literal does not escape"
return p
}(&x)
}
@@ -126,7 +126,7 @@ func ClosureCallArgs13() {
func ClosureCallArgs14() {
x := 0
p := &x
- _ = func(p **int) *int { // ERROR "leaking param: p to result ~r1 level=1" "func literal does not escape"
+ _ = func(p **int) *int { // ERROR "leaking param: p to result ~r0 level=1" "func literal does not escape"
return *p
}(&p)
}
@@ -145,7 +145,7 @@ func ClosureLeak1(s string) string { // ERROR "s does not escape"
}
// See #14409 -- returning part of captured var leaks it.
-func ClosureLeak1a(a ...string) string { // ERROR "leaking param: a to result ~r1 level=1$"
+func ClosureLeak1a(a ...string) string { // ERROR "leaking param: a to result ~r0 level=1$"
return func() string { // ERROR "func literal does not escape"
return a[0]
}()
diff --git a/test/escape_param.go b/test/escape_param.go
index dc93f689cf..b630bae88f 100644
--- a/test/escape_param.go
+++ b/test/escape_param.go
@@ -16,7 +16,7 @@ func zero() int { return 0 }
var sink interface{}
// in -> out
-func param0(p *int) *int { // ERROR "leaking param: p to result ~r1"
+func param0(p *int) *int { // ERROR "leaking param: p to result ~r0"
return p
}
@@ -31,7 +31,7 @@ func caller0b() {
}
// in, in -> out, out
-func param1(p1, p2 *int) (*int, *int) { // ERROR "leaking param: p1 to result ~r2" "leaking param: p2 to result ~r3"
+func param1(p1, p2 *int) (*int, *int) { // ERROR "leaking param: p1 to result ~r0" "leaking param: p2 to result ~r1"
return p1, p2
}
@@ -222,7 +222,7 @@ func caller8() {
}
// *in -> out
-func param9(p ***int) **int { // ERROR "leaking param: p to result ~r1 level=1"
+func param9(p ***int) **int { // ERROR "leaking param: p to result ~r0 level=1"
return *p
}
@@ -241,7 +241,7 @@ func caller9b() {
}
// **in -> out
-func param10(p ***int) *int { // ERROR "leaking param: p to result ~r1 level=2"
+func param10(p ***int) *int { // ERROR "leaking param: p to result ~r0 level=2"
return **p
}
@@ -436,6 +436,6 @@ func param14a(x [4]*int) interface{} { // ERROR "leaking param: x$"
// Convert to a direct interface, does not need an allocation.
// So x only leaks to result.
-func param14b(x *int) interface{} { // ERROR "leaking param: x to result ~r1 level=0"
+func param14b(x *int) interface{} { // ERROR "leaking param: x to result ~r0 level=0"
return x
}
diff --git a/test/escape_runtime_atomic.go b/test/escape_runtime_atomic.go
index 62e8fede27..30d1d0c0c1 100644
--- a/test/escape_runtime_atomic.go
+++ b/test/escape_runtime_atomic.go
@@ -13,8 +13,8 @@ import (
"unsafe"
)
-// BAD: should always be "leaking param: addr to result ~r1 level=1$".
-func Loadp(addr unsafe.Pointer) unsafe.Pointer { // ERROR "leaking param: addr( to result ~r1 level=1)?$"
+// BAD: should always be "leaking param: addr to result ~r0 level=1$".
+func Loadp(addr unsafe.Pointer) unsafe.Pointer { // ERROR "leaking param: addr( to result ~r0 level=1)?$"
return atomic.Loadp(addr)
}
diff --git a/test/escape_slice.go b/test/escape_slice.go
index d60414736c..055b60be41 100644
--- a/test/escape_slice.go
+++ b/test/escape_slice.go
@@ -101,7 +101,7 @@ func slice11() {
_ = s
}
-func slice12(x []int) *[1]int { // ERROR "leaking param: x to result ~r1 level=0$"
+func slice12(x []int) *[1]int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*[1]int)(x)
}
@@ -110,7 +110,7 @@ func envForDir(dir string) []string { // ERROR "dir does not escape"
return mergeEnvLists([]string{"PWD=" + dir}, env) // ERROR ".PWD=. \+ dir escapes to heap" "\[\]string{...} does not escape"
}
-func mergeEnvLists(in, out []string) []string { // ERROR "leaking param content: in" "leaking param content: out" "leaking param: out to result ~r2 level=0"
+func mergeEnvLists(in, out []string) []string { // ERROR "leaking param content: in" "leaking param content: out" "leaking param: out to result ~r0 level=0"
NextVar:
for _, inkv := range in {
k := strings.SplitAfterN(inkv, "=", 2)[0]
diff --git a/test/escape_struct_return.go b/test/escape_struct_return.go
index 222ef8bc22..a42ae1e8c9 100644
--- a/test/escape_struct_return.go
+++ b/test/escape_struct_return.go
@@ -15,11 +15,11 @@ type U struct {
_spp **string
}
-func A(sp *string, spp **string) U { // ERROR "leaking param: sp to result ~r2 level=0$" "leaking param: spp to result ~r2 level=0$"
+func A(sp *string, spp **string) U { // ERROR "leaking param: sp to result ~r0 level=0$" "leaking param: spp to result ~r0 level=0$"
return U{sp, spp}
}
-func B(spp **string) U { // ERROR "leaking param: spp to result ~r1 level=0$"
+func B(spp **string) U { // ERROR "leaking param: spp to result ~r0 level=0$"
return U{*spp, spp}
}
diff --git a/test/escape_unsafe.go b/test/escape_unsafe.go
index b34beacccb..cec6674a14 100644
--- a/test/escape_unsafe.go
+++ b/test/escape_unsafe.go
@@ -15,7 +15,7 @@ import (
// (1) Conversion of a *T1 to Pointer to *T2.
-func convert(p *float64) *uint64 { // ERROR "leaking param: p to result ~r1 level=0$"
+func convert(p *float64) *uint64 { // ERROR "leaking param: p to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(p))
}
@@ -39,12 +39,12 @@ func arithMask() unsafe.Pointer {
// (5) Conversion of the result of reflect.Value.Pointer or
// reflect.Value.UnsafeAddr from uintptr to Pointer.
-// BAD: should be "leaking param: p to result ~r1 level=0$"
+// BAD: should be "leaking param: p to result ~r0 level=0$"
func valuePointer(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
return unsafe.Pointer(reflect.ValueOf(p).Pointer())
}
-// BAD: should be "leaking param: p to result ~r1 level=0$"
+// BAD: should be "leaking param: p to result ~r0 level=0$"
func valueUnsafeAddr(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
return unsafe.Pointer(reflect.ValueOf(p).Elem().UnsafeAddr())
}
@@ -52,11 +52,11 @@ func valueUnsafeAddr(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
// (6) Conversion of a reflect.SliceHeader or reflect.StringHeader
// Data field to or from Pointer.
-func fromSliceData(s []int) unsafe.Pointer { // ERROR "leaking param: s to result ~r1 level=0$"
+func fromSliceData(s []int) unsafe.Pointer { // ERROR "leaking param: s to result ~r0 level=0$"
return unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&s)).Data)
}
-func fromStringData(s string) unsafe.Pointer { // ERROR "leaking param: s to result ~r1 level=0$"
+func fromStringData(s string) unsafe.Pointer { // ERROR "leaking param: s to result ~r0 level=0$"
return unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&s)).Data)
}
diff --git a/test/fixedbugs/bug195.go b/test/fixedbugs/bug195.go
index 94f61fff7f..6d8578d6cb 100644
--- a/test/fixedbugs/bug195.go
+++ b/test/fixedbugs/bug195.go
@@ -1,4 +1,4 @@
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/bug248.dir/bug2.go b/test/fixedbugs/bug248.dir/bug2.go
index c0fdecfdb7..92a7974679 100644
--- a/test/fixedbugs/bug248.dir/bug2.go
+++ b/test/fixedbugs/bug248.dir/bug2.go
@@ -50,8 +50,8 @@ var p0i2 p1.I = t0(0) // ERROR "does not implement|incompatible"
func foobar() {
// check that cannot assign one to the other,
// but can convert.
- v0 = v1 // ERROR "assign"
- v1 = v0 // ERROR "assign"
+ v0 = v1 // ERROR "assign|cannot use"
+ v1 = v0 // ERROR "assign|cannot use"
v0 = p0.T(v1)
v1 = p1.T(v0)
diff --git a/test/fixedbugs/bug345.dir/main.go b/test/fixedbugs/bug345.dir/main.go
index b77a2fad5f..a53d3e8586 100644
--- a/test/fixedbugs/bug345.dir/main.go
+++ b/test/fixedbugs/bug345.dir/main.go
@@ -23,7 +23,7 @@ func main() {
// main.go:27: cannot use &x (type *"io".SectionReader) as type *"/Users/rsc/g/go/test/fixedbugs/bug345.dir/io".SectionReader in function argument
var w io.Writer
- bufio.NewWriter(w) // ERROR "[\w.]+[^.]/io|has incompatible type"
+ bufio.NewWriter(w) // ERROR "[\w.]+[^.]/io|has incompatible type|cannot use"
var x goio.SectionReader
- io.SR(&x) // ERROR "[\w.]+[^.]/io|has incompatible type"
+ io.SR(&x) // ERROR "[\w.]+[^.]/io|has incompatible type|cannot use"
}
diff --git a/test/fixedbugs/bug460.dir/b.go b/test/fixedbugs/bug460.dir/b.go
index ef646946cf..5d388fc413 100644
--- a/test/fixedbugs/bug460.dir/b.go
+++ b/test/fixedbugs/bug460.dir/b.go
@@ -9,9 +9,9 @@ import "./a"
var x a.Foo
func main() {
- x.int = 20 // ERROR "unexported field"
- x.int8 = 20 // ERROR "unexported field"
- x.error = nil // ERROR "unexported field"
- x.rune = 'a' // ERROR "unexported field"
- x.byte = 20 // ERROR "unexported field"
+ x.int = 20 // ERROR "unexported field|undefined"
+ x.int8 = 20 // ERROR "unexported field|undefined"
+ x.error = nil // ERROR "unexported field|undefined"
+ x.rune = 'a' // ERROR "unexported field|undefined"
+ x.byte = 20 // ERROR "unexported field|undefined"
}
diff --git a/test/fixedbugs/issue10975.go b/test/fixedbugs/issue10975.go
index 89ef23c1a8..876ea58ef9 100644
--- a/test/fixedbugs/issue10975.go
+++ b/test/fixedbugs/issue10975.go
@@ -1,4 +1,4 @@
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue11614.go b/test/fixedbugs/issue11614.go
index de15f9827f..6ea463b7fe 100644
--- a/test/fixedbugs/issue11614.go
+++ b/test/fixedbugs/issue11614.go
@@ -1,4 +1,4 @@
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue12006.go b/test/fixedbugs/issue12006.go
index 0a2ef8dad0..e878bc48e2 100644
--- a/test/fixedbugs/issue12006.go
+++ b/test/fixedbugs/issue12006.go
@@ -87,7 +87,7 @@ func TFooI() {
FooI(a, b, c) // ERROR "a escapes to heap" "b escapes to heap" "... argument does not escape"
}
-func FooJ(args ...interface{}) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooJ(args ...interface{}) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < len(args); i++ {
switch x := args[i].(type) {
case nil:
@@ -123,7 +123,7 @@ type fakeSlice struct {
a *[4]interface{}
}
-func FooK(args fakeSlice) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooK(args fakeSlice) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < args.l; i++ {
switch x := (*args.a)[i].(type) {
case nil:
@@ -148,7 +148,7 @@ func TFooK2() {
isink = FooK(fs)
}
-func FooL(args []interface{}) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooL(args []interface{}) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < len(args); i++ {
switch x := args[i].(type) {
case nil:
diff --git a/test/fixedbugs/issue12588.go b/test/fixedbugs/issue12588.go
index 950ef36e20..dc8111198c 100644
--- a/test/fixedbugs/issue12588.go
+++ b/test/fixedbugs/issue12588.go
@@ -35,7 +35,7 @@ func g(a *A) int { // ERROR "a does not escape"
return 0
}
-func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
+func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r0 level=1"
for i, x := range &a.b {
if i == 0 {
return x
@@ -44,7 +44,7 @@ func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
return nil
}
-func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
+func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r0 level=1"
p := &a.b
for i, x := range p {
if i == 0 {
@@ -55,7 +55,7 @@ func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
}
// Seems like below should be level=1, not 0.
-func k(a B) *uint64 { // ERROR "leaking param: a to result ~r1 level=0"
+func k(a B) *uint64 { // ERROR "leaking param: a to result ~r0 level=0"
for i, x := range &a.b {
if i == 0 {
return x
diff --git a/test/fixedbugs/issue14999.go b/test/fixedbugs/issue14999.go
index b648441fc2..a25a50e519 100644
--- a/test/fixedbugs/issue14999.go
+++ b/test/fixedbugs/issue14999.go
@@ -7,11 +7,11 @@
package p
func f(x int) func(int) int {
- return func(y int) int { return x + y } // ERROR "heap-allocated closure, not allowed in runtime"
+ return func(y int) int { return x + y } // ERROR "heap-allocated closure f\.func1, not allowed in runtime"
}
func g(x int) func(int) int { // ERROR "x escapes to heap, not allowed in runtime"
- return func(y int) int { // ERROR "heap-allocated closure, not allowed in runtime"
+ return func(y int) int { // ERROR "heap-allocated closure g\.func1, not allowed in runtime"
x += y
return x + y
}
diff --git a/test/fixedbugs/issue24651a.go b/test/fixedbugs/issue24651a.go
index 6c7bf30908..1bfe8ac1ce 100644
--- a/test/fixedbugs/issue24651a.go
+++ b/test/fixedbugs/issue24651a.go
@@ -21,5 +21,5 @@ var x = 5
//go:noinline Provide a clean, constant reason for not inlining main
func main() { // ERROR "cannot inline main: marked go:noinline$"
println("Foo(", x, ")=", Foo(x))
- println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
+ println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar"
}
diff --git a/test/fixedbugs/issue24651b.go b/test/fixedbugs/issue24651b.go
index aa88a6787b..2af54fc4b5 100644
--- a/test/fixedbugs/issue24651b.go
+++ b/test/fixedbugs/issue24651b.go
@@ -19,6 +19,6 @@ var x = 5
//go:noinline Provide a clean, constant reason for not inlining main
func main() { // ERROR "cannot inline main: marked go:noinline$"
- println("Foo(", x, ")=", Foo(x)) // ERROR "inlining call to Foo func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
- println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
+ println("Foo(", x, ")=", Foo(x)) // ERROR "inlining call to Foo"
+ println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar"
}
diff --git a/test/fixedbugs/issue30898.go b/test/fixedbugs/issue30898.go
index b6376d3f9e..c7f6f2d371 100644
--- a/test/fixedbugs/issue30898.go
+++ b/test/fixedbugs/issue30898.go
@@ -15,5 +15,5 @@ func debugf(format string, args ...interface{}) { // ERROR "can inline debugf" "
func bar() { // ERROR "can inline bar"
value := 10
- debugf("value is %d", value) // ERROR "inlining call to debugf" "value does not escape" "\[\]interface {}{...} does not escape"
+ debugf("value is %d", value) // ERROR "inlining call to debugf" "value does not escape" "\.\.\. argument does not escape"
}
diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go
index ffe9310be3..f7fd80bd20 100644
--- a/test/fixedbugs/issue42284.dir/a.go
+++ b/test/fixedbugs/issue42284.dir/a.go
@@ -13,7 +13,7 @@ func E() I { // ERROR "can inline E"
return T(0) // ERROR "T\(0\) escapes to heap"
}
-func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r1 level=0"
+func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r0 level=0"
i = nil
return i
}
diff --git a/test/fixedbugs/issue44432.go b/test/fixedbugs/issue44432.go
index c5fb67e0d7..eec53f3000 100644
--- a/test/fixedbugs/issue44432.go
+++ b/test/fixedbugs/issue44432.go
@@ -8,6 +8,6 @@ package p
var m = map[string]int{
"a": 1,
- 1: 1, // ERROR "cannot use 1.*as type string in map key"
- 2: 2, // ERROR "cannot use 2.*as type string in map key"
+ 1: 1, // ERROR "cannot use 1.*as.*string.*in map"
+ 2: 2, // ERROR "cannot use 2.*as.*string.*in map"
}
diff --git a/test/fixedbugs/issue46556.go b/test/fixedbugs/issue46556.go
new file mode 100644
index 0000000000..b159f61b0c
--- /dev/null
+++ b/test/fixedbugs/issue46556.go
@@ -0,0 +1,16 @@
+// compile
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A = interface{}
+type B interface{}
+
+// Test that embedding both anonymous and defined types is supported.
+type C interface {
+ A
+ B
+}
diff --git a/test/fixedbugs/issue4909b.go b/test/fixedbugs/issue4909b.go
index 0f594e3db6..7d7922701a 100644
--- a/test/fixedbugs/issue4909b.go
+++ b/test/fixedbugs/issue4909b.go
@@ -73,7 +73,7 @@ func writeDot(ns ...int) {
}
fmt.Print(")")
if isIndirect {
- fmt.Print(` // ERROR "indirection"`)
+ fmt.Print(` // ERROR "indirection|embedded via a pointer"`)
}
fmt.Print("\n")
}
diff --git a/test/inline_big.go b/test/inline_big.go
index 68e1101d3b..83672753f7 100644
--- a/test/inline_big.go
+++ b/test/inline_big.go
@@ -1023,7 +1023,7 @@ func f(a []int) int { // ERROR "cannot inline f:.*" "a does not escape"
a[997] = 0
a[998] = 0
a[999] = 0
- x := small(a) // ERROR "inlining call to small .*"
+ x := small(a) // ERROR "inlining call to small"
y := medium(a) // The crux of this test: medium is not inlined.
return x + y
}
diff --git a/test/inline_variadic.go b/test/inline_variadic.go
index 687048a192..49483d77f7 100644
--- a/test/inline_variadic.go
+++ b/test/inline_variadic.go
@@ -14,6 +14,6 @@ func head(xs ...string) string { // ERROR "can inline head" "leaking param: xs t
}
func f() string { // ERROR "can inline f"
- x := head("hello", "world") // ERROR "inlining call to head" "\[\]string{...} does not escape"
+ x := head("hello", "world") // ERROR "inlining call to head" "\.\.\. argument does not escape"
return x
}
diff --git a/test/live.go b/test/live.go
index bc7b3849cf..5b14932cae 100644
--- a/test/live.go
+++ b/test/live.go
@@ -1,5 +1,5 @@
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
-// +build !ppc64,!ppc64le,!goexperiment.regabi,!goexperiment.regabidefer
+// +build !ppc64,!ppc64le,!goexperiment.regabiargs
// ppc64 needs a better tighten pass to make f18 pass
// rescheduling checks need to be turned off because there are some live variables across the inserted check call
@@ -424,7 +424,7 @@ func f27defer(b bool) {
}
defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ .autotmp_[0-9]+"
- return // ERROR "live at call to call27: .autotmp_[0-9]+"
+ return // ERROR "live at indirect call: .autotmp_[0-9]+"
}
// and newproc (go) escapes to the heap
@@ -432,9 +432,9 @@ func f27defer(b bool) {
func f27go(b bool) {
x := 0
if b {
- go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newproc: &x$"
+ go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go
}
- go call27(func() { x++ }) // ERROR "live at call to newobject: &x$"
+ go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go
printnl()
}
diff --git a/test/live_regabi.go b/test/live_regabi.go
index 2b0278ecb8..c35a27e4e0 100644
--- a/test/live_regabi.go
+++ b/test/live_regabi.go
@@ -1,5 +1,5 @@
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
-// +build amd64,goexperiment.regabidefer,goexperiment.regabiargs
+// +build amd64,goexperiment.regabiargs
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/run.go b/test/run.go
index 5e60de7624..656519e301 100644
--- a/test/run.go
+++ b/test/run.go
@@ -42,6 +42,8 @@ var (
linkshared = flag.Bool("linkshared", false, "")
updateErrors = flag.Bool("update_errors", false, "update error messages in test file based on compiler output")
runoutputLimit = flag.Int("l", defaultRunOutputLimit(), "number of parallel runoutput tests to run")
+ generics = flag.String("G", "0,3", "a comma-separated list of -G compiler flags to test with")
+ force = flag.Bool("f", false, "run expected-failure generics tests rather than skipping them")
shard = flag.Int("shard", 0, "shard index to run. Only applicable if -shards is non-zero.")
shards = flag.Int("shards", 0, "number of shards. If 0, all tests are run. This is used by the continuous build.")
@@ -82,6 +84,15 @@ const maxTests = 5000
func main() {
flag.Parse()
+ var glevels []int
+ for _, s := range strings.Split(*generics, ",") {
+ glevel, err := strconv.Atoi(s)
+ if err != nil {
+ log.Fatalf("invalid -G flag: %v", err)
+ }
+ glevels = append(glevels, glevel)
+ }
+
goos = getenv("GOOS", runtime.GOOS)
goarch = getenv("GOARCH", runtime.GOARCH)
cgoEnv, err := exec.Command(goTool(), "env", "CGO_ENABLED").Output()
@@ -113,11 +124,11 @@ func main() {
}
if fi, err := os.Stat(arg); err == nil && fi.IsDir() {
for _, baseGoFile := range goFiles(arg) {
- tests = append(tests, startTest(arg, baseGoFile))
+ tests = append(tests, startTests(arg, baseGoFile, glevels)...)
}
} else if strings.HasSuffix(arg, ".go") {
dir, file := filepath.Split(arg)
- tests = append(tests, startTest(dir, file))
+ tests = append(tests, startTests(dir, file, glevels)...)
} else {
log.Fatalf("can't yet deal with non-directory and non-go file %q", arg)
}
@@ -125,7 +136,7 @@ func main() {
} else {
for _, dir := range dirs {
for _, baseGoFile := range goFiles(dir) {
- tests = append(tests, startTest(dir, baseGoFile))
+ tests = append(tests, startTests(dir, baseGoFile, glevels)...)
}
}
}
@@ -151,7 +162,8 @@ func main() {
resCount[status]++
dt := fmt.Sprintf("%.3fs", test.dt.Seconds())
if status == "FAIL" {
- fmt.Printf("# go run run.go -- %s\n%s\nFAIL\t%s\t%s\n",
+ fmt.Printf("# go run run.go -G=%v %s\n%s\nFAIL\t%s\t%s\n",
+ test.glevel,
path.Join(test.dir, test.gofile),
errStr, test.goFileName(), dt)
continue
@@ -270,6 +282,7 @@ type test struct {
dir, gofile string
donec chan bool // closed when done
dt time.Duration
+ glevel int // what -G level this test should use
src string
@@ -277,23 +290,27 @@ type test struct {
err error
}
-// startTest
-func startTest(dir, gofile string) *test {
- t := &test{
- dir: dir,
- gofile: gofile,
- donec: make(chan bool, 1),
- }
- if toRun == nil {
- toRun = make(chan *test, maxTests)
- go runTests()
- }
- select {
- case toRun <- t:
- default:
- panic("toRun buffer size (maxTests) is too small")
+func startTests(dir, gofile string, glevels []int) []*test {
+ tests := make([]*test, len(glevels))
+ for i, glevel := range glevels {
+ t := &test{
+ dir: dir,
+ gofile: gofile,
+ glevel: glevel,
+ donec: make(chan bool, 1),
+ }
+ if toRun == nil {
+ toRun = make(chan *test, maxTests)
+ go runTests()
+ }
+ select {
+ case toRun <- t:
+ default:
+ panic("toRun buffer size (maxTests) is too small")
+ }
+ tests[i] = t
}
- return t
+ return tests
}
// runTests runs tests in parallel, but respecting the order they
@@ -480,12 +497,16 @@ func init() { checkShouldTest() }
// This must match the flags used for building the standard library,
// or else the commands will rebuild any needed packages (like runtime)
// over and over.
-func goGcflags() string {
- return "-gcflags=all=" + os.Getenv("GO_GCFLAGS")
+func (t *test) goGcflags() string {
+ flags := os.Getenv("GO_GCFLAGS")
+ if t.glevel != 0 {
+ flags = fmt.Sprintf("%s -G=%v", flags, t.glevel)
+ }
+ return "-gcflags=all=" + flags
}
-func goGcflagsIsEmpty() bool {
- return "" == os.Getenv("GO_GCFLAGS")
+func (t *test) goGcflagsIsEmpty() bool {
+ return "" == os.Getenv("GO_GCFLAGS") && t.glevel == 0
}
var errTimeout = errors.New("command exceeded time limit")
@@ -498,6 +519,17 @@ func (t *test) run() {
close(t.donec)
}()
+ if t.glevel > 0 && !*force {
+ // Files excluded from generics testing.
+ filename := strings.Replace(t.goFileName(), "\\", "/", -1) // goFileName() uses \ on Windows
+ if excludedFiles[filename] {
+ if *verbose {
+ fmt.Printf("excl\t%s\n", filename)
+ }
+ return
+ }
+ }
+
srcBytes, err := ioutil.ReadFile(t.goFileName())
if err != nil {
t.err = err
@@ -541,7 +573,11 @@ func (t *test) run() {
singlefilepkgs := false
setpkgpaths := false
localImports := true
- f := strings.Fields(action)
+ f, err := splitQuoted(action)
+ if err != nil {
+ t.err = fmt.Errorf("invalid test recipe: %v", err)
+ return
+ }
if len(f) > 0 {
action = f[0]
args = f[1:]
@@ -616,6 +652,53 @@ func (t *test) run() {
}
}
+ type Tool int
+
+ const (
+ _ Tool = iota
+ AsmCheck
+ Build
+ Run
+ Compile
+ )
+
+ // validForGLevel reports whether the current test is valid to run
+ // at the specified -G level. If so, it may update flags as
+ // necessary to test with -G.
+ validForGLevel := func(tool Tool) bool {
+ if t.glevel == 0 {
+ // default -G level; always valid
+ return true
+ }
+
+ for _, flag := range flags {
+ if strings.Contains(flag, "-G") {
+ // test provides explicit -G flag already
+ if *verbose {
+ fmt.Printf("excl\t%s\n", t.goFileName())
+ }
+ return false
+ }
+ }
+
+ switch tool {
+ case Build, Run:
+ // ok; handled in goGcflags
+
+ case Compile:
+ flags = append(flags, fmt.Sprintf("-G=%v", t.glevel))
+
+ default:
+ // we don't know how to add -G for this test yet
+ if *verbose {
+ fmt.Printf("excl\t%s\n", t.goFileName())
+ }
+ return false
+ }
+
+ return true
+ }
+
t.makeTempDir()
if !*keep {
defer os.RemoveAll(t.tempDir)
@@ -692,6 +775,10 @@ func (t *test) run() {
t.err = fmt.Errorf("unimplemented action %q", action)
case "asmcheck":
+ if !validForGLevel(AsmCheck) {
+ return
+ }
+
// Compile Go file and match the generated assembly
// against a set of regexps in comments.
ops := t.wantedAsmOpcodes(long)
@@ -746,6 +833,10 @@ func (t *test) run() {
return
case "errorcheck":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Compile Go file.
// Fail if wantError is true and compilation was successful and vice versa.
// Match errors produced by gc against errors in comments.
@@ -774,72 +865,20 @@ func (t *test) run() {
t.updateErrors(string(out), long)
}
t.err = t.errorCheck(string(out), wantAuto, long, t.gofile)
- if t.err != nil {
- return // don't hide error if run below succeeds
- }
-
- // The following is temporary scaffolding to get types2 typechecker
- // up and running against the existing test cases. The explicitly
- // listed files don't pass yet, usually because the error messages
- // are slightly different (this list is not complete). Any errorcheck
- // tests that require output from analysis phases past initial type-
- // checking are also excluded since these phases are not running yet.
- // We can get rid of this code once types2 is fully plugged in.
-
- // For now we're done when we can't handle the file or some of the flags.
- // The first goal is to eliminate the excluded list; the second goal is to
- // eliminate the flag list.
- // Excluded files.
- filename := strings.Replace(t.goFileName(), "\\", "/", -1) // goFileName() uses \ on Windows
- if excluded[filename] {
- if *verbose {
- fmt.Printf("excl\t%s\n", filename)
- }
- return // cannot handle file yet
- }
-
- // Excluded flags.
- for _, flag := range flags {
- for _, pattern := range []string{
- "-m",
- } {
- if strings.Contains(flag, pattern) {
- if *verbose {
- fmt.Printf("excl\t%s\t%s\n", filename, flags)
- }
- return // cannot handle flag
- }
- }
- }
-
- // Run errorcheck again with -G option (new typechecker).
- cmdline = []string{goTool(), "tool", "compile", "-G=3", "-C", "-e", "-o", "a.o"}
- // No need to add -dynlink even if linkshared if we're just checking for errors...
- cmdline = append(cmdline, flags...)
- cmdline = append(cmdline, long)
- out, err = runcmd(cmdline...)
- if wantError {
- if err == nil {
- t.err = fmt.Errorf("compilation succeeded unexpectedly\n%s", out)
- return
- }
- } else {
- if err != nil {
- t.err = err
- return
- }
- }
- if *updateErrors {
- t.updateErrors(string(out), long)
+ case "compile":
+ if !validForGLevel(Compile) {
+ return
}
- t.err = t.errorCheck(string(out), wantAuto, long, t.gofile)
- case "compile":
// Compile Go file.
_, t.err = compileFile(runcmd, long, flags)
case "compiledir":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Compile all files in the directory as packages in lexicographic order.
longdir := filepath.Join(cwd, t.goDirName())
pkgs, err := goDirPackages(longdir, singlefilepkgs)
@@ -855,6 +894,10 @@ func (t *test) run() {
}
case "errorcheckdir", "errorcheckandrundir":
+ if !validForGLevel(Compile) {
+ return
+ }
+
flags = append(flags, "-d=panic")
// Compile and errorCheck all files in the directory as packages in lexicographic order.
// If errorcheckdir and wantError, compilation of the last package must fail.
@@ -900,6 +943,10 @@ func (t *test) run() {
fallthrough
case "rundir":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Compile all files in the directory as packages in lexicographic order.
// In case of errorcheckandrundir, ignore failed compilation of the package before the last.
// Link as if the last file is the main package, run it.
@@ -958,6 +1005,10 @@ func (t *test) run() {
}
case "runindir":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Make a shallow copy of t.goDirName() in its own module and GOPATH, and
// run "go run ." in it. The module path (and hence import path prefix) of
// the copy is equal to the basename of the source directory.
@@ -983,7 +1034,7 @@ func (t *test) run() {
return
}
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -997,13 +1048,21 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "build":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build Go file.
- _, err := runcmd(goTool(), "build", goGcflags(), "-o", "a.exe", long)
+ _, err := runcmd(goTool(), "build", t.goGcflags(), "-o", "a.exe", long)
if err != nil {
t.err = err
}
case "builddir", "buildrundir":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build an executable from all the .go and .s files in a subdirectory.
// Run it and verify its output in the buildrundir case.
longdir := filepath.Join(cwd, t.goDirName())
@@ -1083,10 +1142,14 @@ func (t *test) run() {
}
case "buildrun":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build an executable from Go file, then run it, verify its output.
// Useful for timeout tests where failure mode is infinite loop.
// TODO: not supported on NaCl
- cmd := []string{goTool(), "build", goGcflags(), "-o", "a.exe"}
+ cmd := []string{goTool(), "build", t.goGcflags(), "-o", "a.exe"}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1108,13 +1171,17 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "run":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Run Go file if no special go command flags are provided;
// otherwise build an executable and run it.
// Verify the output.
runInDir = ""
var out []byte
var err error
- if len(flags)+len(args) == 0 && goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS {
+ if len(flags)+len(args) == 0 && t.goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS {
// If we're not using special go command flags,
// skip all the go command machinery.
// This avoids any time the go command would
@@ -1136,7 +1203,7 @@ func (t *test) run() {
}
out, err = runcmd(append([]string{exe}, args...)...)
} else {
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1151,6 +1218,10 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "runoutput":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Run Go file and write its output into temporary Go file.
// Run generated Go file and verify its output.
rungatec <- true
@@ -1158,7 +1229,7 @@ func (t *test) run() {
<-rungatec
}()
runInDir = ""
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1173,7 +1244,7 @@ func (t *test) run() {
t.err = fmt.Errorf("write tempfile:%s", err)
return
}
- cmd = []string{goTool(), "run", goGcflags()}
+ cmd = []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1186,10 +1257,14 @@ func (t *test) run() {
t.checkExpectedOutput(out)
case "errorcheckoutput":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Run Go file and write its output into temporary Go file.
// Compile and errorCheck generated Go file.
runInDir = ""
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
@@ -1941,65 +2016,170 @@ func overlayDir(dstRoot, srcRoot string) error {
})
}
+// The following is temporary scaffolding to get types2 typechecker
+// up and running against the existing test cases. The explicitly
+// listed files don't pass yet, usually because the error messages
+// are slightly different (this list is not complete). Any errorcheck
+// tests that require output from analysis phases past initial type-
+// checking are also excluded since these phases are not running yet.
+// We can get rid of this code once types2 is fully plugged in.
+
// List of files that the compiler cannot errorcheck with the new typechecker (compiler -G option).
// Temporary scaffolding until we pass all the tests at which point this map can be removed.
-var excluded = map[string]bool{
- "complit1.go": true, // types2 reports extra errors
- "const2.go": true, // types2 not run after syntax errors
- "ddd1.go": true, // issue #42987
+var excludedFiles = map[string]bool{
"directive.go": true, // misplaced compiler directive checks
"float_lit3.go": true, // types2 reports extra errors
"import1.go": true, // types2 reports extra errors
- "import5.go": true, // issue #42988
"import6.go": true, // issue #43109
"initializerr.go": true, // types2 reports extra errors
"linkname2.go": true, // error reported by noder (not running for types2 errorcheck test)
"notinheap.go": true, // types2 doesn't report errors about conversions that are invalid due to //go:notinheap
+ "printbig.go": true, // large untyped int passed to print (32-bit)
"shift1.go": true, // issue #42989
"typecheck.go": true, // invalid function is not causing errors when called
"writebarrier.go": true, // correct diagnostics, but different lines (probably irgen's fault)
+ "interface/private.go": true, // types2 phrases errors differently (doesn't use non-spec "private" term)
+
+ "fixedbugs/bug114.go": true, // large untyped int passed to println (32-bit)
"fixedbugs/bug176.go": true, // types2 reports all errors (pref: types2)
"fixedbugs/bug195.go": true, // types2 reports slightly different (but correct) bugs
- "fixedbugs/bug228.go": true, // types2 not run after syntax errors
+ "fixedbugs/bug228.go": true, // types2 doesn't run when there are syntax errors
"fixedbugs/bug231.go": true, // types2 bug? (same error reported twice)
"fixedbugs/bug255.go": true, // types2 reports extra errors
- "fixedbugs/bug351.go": true, // types2 reports extra errors
"fixedbugs/bug374.go": true, // types2 reports extra errors
"fixedbugs/bug385_32.go": true, // types2 doesn't produce missing error "type .* too large" (32-bit specific)
"fixedbugs/bug388.go": true, // types2 not run due to syntax errors
"fixedbugs/bug412.go": true, // types2 produces a follow-on error
+ "fixedbugs/issue10700.go": true, // types2 reports ok hint, but does not match regexp
"fixedbugs/issue11590.go": true, // types2 doesn't report a follow-on error (pref: types2)
"fixedbugs/issue11610.go": true, // types2 not run after syntax errors
"fixedbugs/issue11614.go": true, // types2 reports an extra error
- "fixedbugs/issue13415.go": true, // declared but not used conflict
"fixedbugs/issue14520.go": true, // missing import path error by types2
+ "fixedbugs/issue16133.go": true, // types2 doesn't use package path for qualified identifiers when package name is ambiguous
"fixedbugs/issue16428.go": true, // types2 reports two instead of one error
"fixedbugs/issue17038.go": true, // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue17270.go": true, // ICE in irgen
"fixedbugs/issue17645.go": true, // multiple errors on same line
"fixedbugs/issue18331.go": true, // missing error about misuse of //go:noescape (irgen needs code from noder)
- "fixedbugs/issue18393.go": true, // types2 not run after syntax errors
+ "fixedbugs/issue18419.go": true, // types2 reports
"fixedbugs/issue19012.go": true, // multiple errors on same line
+ "fixedbugs/issue20174.go": true, // ICE due to width not calculated (probably irgen's fault)
"fixedbugs/issue20233.go": true, // types2 reports two instead of one error (pref: compiler)
"fixedbugs/issue20245.go": true, // types2 reports two instead of one error (pref: compiler)
"fixedbugs/issue20250.go": true, // correct diagnostics, but different lines (probably irgen's fault)
"fixedbugs/issue21979.go": true, // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue23305.go": true, // large untyped int passed to println (32-bit)
"fixedbugs/issue23732.go": true, // types2 reports different (but ok) line numbers
"fixedbugs/issue25958.go": true, // types2 doesn't report a follow-on error (pref: types2)
"fixedbugs/issue28079b.go": true, // types2 reports follow-on errors
"fixedbugs/issue28268.go": true, // types2 reports follow-on errors
+ "fixedbugs/issue31053.go": true, // types2 reports "unknown field" instead of "cannot refer to unexported field"
"fixedbugs/issue33460.go": true, // types2 reports alternative positions in separate error
- "fixedbugs/issue41575.go": true, // types2 reports alternative positions in separate error
"fixedbugs/issue42058a.go": true, // types2 doesn't report "channel element type too large"
"fixedbugs/issue42058b.go": true, // types2 doesn't report "channel element type too large"
+ "fixedbugs/issue46725.go": true, // fix applied to typecheck needs to be ported to irgen/transform
"fixedbugs/issue4232.go": true, // types2 reports (correct) extra errors
"fixedbugs/issue4452.go": true, // types2 reports (correct) extra errors
+ "fixedbugs/issue4510.go": true, // types2 reports different (but ok) line numbers
"fixedbugs/issue5609.go": true, // types2 needs a better error message
- "fixedbugs/issue6889.go": true, // types2 can handle this without constant overflow
- "fixedbugs/issue7525.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525b.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525c.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525d.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525e.go": true, // types2 reports init cycle error on different line - ok otherwise
+ "fixedbugs/issue7525.go": true, // types2 reports init cycle error on different line - ok otherwise
+ "fixedbugs/issue9691.go": true, // "cannot assign to int(.autotmp_4)" (probably irgen's fault)
+
+ // tests that rely on -m diagnostics, which currently differ with -G=3
+ //
+ // TODO(mdempsky): Triage, though most of the issues seem to fall into:
+ // - Anonymous result parameters given different names (e.g., ~r0 vs ~r1)
+ // - Some escape analysis diagnostics being printed without position information
+ // - Some expressions printed differently (e.g., "int(100)" instead
+ // of "100" or "&composite literal" instead of "&[4]int{...}").
+ "closure3.go": true,
+ "escape2.go": true,
+ "escape2n.go": true,
+ "escape4.go": true,
+ "escape_calls.go": true,
+ "escape_field.go": true,
+ "escape_iface.go": true,
+ "escape_indir.go": true,
+ "escape_level.go": true,
+ "escape_map.go": true,
+ "escape_param.go": true,
+ "escape_slice.go": true,
+ "escape_struct_param1.go": true,
+ "escape_struct_param2.go": true,
+ "fixedbugs/issue12006.go": true,
+ "fixedbugs/issue13799.go": true,
+ "fixedbugs/issue21709.go": true,
+ "fixedbugs/issue31573.go": true,
+ "fixedbugs/issue37837.go": true,
+ "fixedbugs/issue39292.go": true,
+ "fixedbugs/issue7921.go": true,
+ "inline.go": true,
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+// [copied from src/go/build/build.go]
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
}
diff --git a/test/typeparam/absdiff.go b/test/typeparam/absdiff.go
index 1381d7c92c..e76a998b4d 100644
--- a/test/typeparam/absdiff.go
+++ b/test/typeparam/absdiff.go
@@ -12,10 +12,10 @@ import (
)
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
// numericAbs matches numeric types with an Abs method.
@@ -33,14 +33,14 @@ func absDifference[T numericAbs[T]](a, b T) T {
// orderedNumeric matches numeric types that support the < operator.
type orderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// orderedAbs is a helper type that defines an Abs method for
@@ -48,8 +48,7 @@ type Complex interface {
type orderedAbs[T orderedNumeric] T
func (a orderedAbs[T]) Abs() orderedAbs[T] {
- // TODO(danscales): orderedAbs[T] conversion shouldn't be needed
- if a < orderedAbs[T](0) {
+ if a < 0 {
return -a
}
return a
diff --git a/test/typeparam/absdiffimp.dir/a.go b/test/typeparam/absdiffimp.dir/a.go
new file mode 100644
index 0000000000..df81dcf538
--- /dev/null
+++ b/test/typeparam/absdiffimp.dir/a.go
@@ -0,0 +1,75 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "math"
+)
+
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
+}
+
+// numericAbs matches numeric types with an Abs method.
+type numericAbs[T any] interface {
+ Numeric
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func absDifference[T numericAbs[T]](a, b T) T {
+ d := a - b
+ return d.Abs()
+}
+
+// orderedNumeric matches numeric types that support the < operator.
+type orderedNumeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// Complex matches the two complex types, which do not have a < operator.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// orderedAbs is a helper type that defines an Abs method for
+// ordered numeric types.
+type orderedAbs[T orderedNumeric] T
+
+func (a orderedAbs[T]) Abs() orderedAbs[T] {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
+
+// complexAbs is a helper type that defines an Abs method for
+// complex types.
+type complexAbs[T Complex] T
+
+func (a complexAbs[T]) Abs() complexAbs[T] {
+ r := float64(real(a))
+ i := float64(imag(a))
+ d := math.Sqrt(r * r + i * i)
+ return complexAbs[T](complex(d, 0))
+}
+
+// OrderedAbsDifference returns the absolute value of the difference
+// between a and b, where a and b are of an ordered type.
+func OrderedAbsDifference[T orderedNumeric](a, b T) T {
+ return T(absDifference(orderedAbs[T](a), orderedAbs[T](b)))
+}
+
+// ComplexAbsDifference returns the absolute value of the difference
+// between a and b, where a and b are of a complex type.
+func ComplexAbsDifference[T Complex](a, b T) T {
+ return T(absDifference(complexAbs[T](a), complexAbs[T](b)))
+}
diff --git a/test/typeparam/absdiffimp.dir/main.go b/test/typeparam/absdiffimp.dir/main.go
new file mode 100644
index 0000000000..8eefdbdf38
--- /dev/null
+++ b/test/typeparam/absdiffimp.dir/main.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ if got, want := a.OrderedAbsDifference(1.0, -2.0), 3.0; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.OrderedAbsDifference(-1.0, 2.0), 3.0; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.OrderedAbsDifference(-20, 15), 35; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+
+ if got, want := a.ComplexAbsDifference(5.0+2.0i, 2.0-2.0i), 5+0i; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.ComplexAbsDifference(2.0-2.0i, 5.0+2.0i), 5+0i; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+}
diff --git a/test/typeparam/absdiffimp.go b/test/typeparam/absdiffimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/absdiffimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/adder.go b/test/typeparam/adder.go
index 0c25ad4ef2..79319bd236 100644
--- a/test/typeparam/adder.go
+++ b/test/typeparam/adder.go
@@ -11,19 +11,19 @@ import (
)
type AddType interface {
- type int, int64, string
+ int | int64 | string
}
-// _Add can add numbers or strings
-func _Add[T AddType](a, b T) T {
+// Add can add numbers or strings
+func Add[T AddType](a, b T) T {
return a + b
}
func main() {
- if got, want := _Add(5, 3), 8; got != want {
+ if got, want := Add(5, 3), 8; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- if got, want := _Add("ab", "cd"), "abcd"; got != want {
+ if got, want := Add("ab", "cd"), "abcd"; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
diff --git a/test/typeparam/aliasimp.dir/a.go b/test/typeparam/aliasimp.dir/a.go
new file mode 100644
index 0000000000..3fac4aac98
--- /dev/null
+++ b/test/typeparam/aliasimp.dir/a.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Rimp[T any] struct {
+ F T
+}
diff --git a/test/typeparam/aliasimp.dir/main.go b/test/typeparam/aliasimp.dir/main.go
new file mode 100644
index 0000000000..6638fa9454
--- /dev/null
+++ b/test/typeparam/aliasimp.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "a"
+
+type R[T any] struct {
+ F T
+}
+
+type S = R
+
+type Sint = R[int]
+
+type Simp = a.Rimp
+
+type SimpString Simp[string]
+
+func main() {
+ var s S[int]
+ if s.F != 0 {
+ panic(s.F)
+ }
+ var s2 Sint
+ if s2.F != 0 {
+ panic(s2.F)
+ }
+ var s3 Simp[string]
+ if s3.F != "" {
+ panic(s3.F)
+ }
+ var s4 SimpString
+ if s4.F != "" {
+ panic(s4.F)
+ }
+}
diff --git a/test/typeparam/aliasimp.go b/test/typeparam/aliasimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/aliasimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/chansimp.dir/a.go b/test/typeparam/chansimp.dir/a.go
new file mode 100644
index 0000000000..a3f73b2199
--- /dev/null
+++ b/test/typeparam/chansimp.dir/a.go
@@ -0,0 +1,232 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "context"
+ "runtime"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ReadAll reads from c until the channel is closed or the context is
+// canceled, returning all the values read.
+func ReadAll[Elem any](ctx context.Context, c <-chan Elem) []Elem {
+ var r []Elem
+ for {
+ select {
+ case <-ctx.Done():
+ return r
+ case v, ok := <-c:
+ if !ok {
+ return r
+ }
+ r = append(r, v)
+ }
+ }
+}
+
+// Merge merges two channels into a single channel.
+// This will leave a goroutine running until either both channels are closed
+// or the context is canceled, at which point the returned channel is closed.
+func Merge[Elem any](ctx context.Context, c1, c2 <-chan Elem) <-chan Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, c1, c2 <-chan Elem, r chan<- Elem) {
+ defer close(r)
+ for c1 != nil || c2 != nil {
+ select {
+ case <-ctx.Done():
+ return
+ case v1, ok := <-c1:
+ if ok {
+ r <- v1
+ } else {
+ c1 = nil
+ }
+ case v2, ok := <-c2:
+ if ok {
+ r <- v2
+ } else {
+ c2 = nil
+ }
+ }
+ }
+ }(ctx, c1, c2, r)
+ return r
+}
+
+// Filter calls f on each value read from c. If f returns true the value
+// is sent on the returned channel. This will leave a goroutine running
+// until c is closed or the context is canceled, at which point the
+// returned channel is closed.
+func Filter[Elem any](ctx context.Context, c <-chan Elem, f func(Elem) bool) <-chan Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, c <-chan Elem, f func(Elem) bool, r chan<- Elem) {
+ defer close(r)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case v, ok := <-c:
+ if !ok {
+ return
+ }
+ if f(v) {
+ r <- v
+ }
+ }
+ }
+ }(ctx, c, f, r)
+ return r
+}
+
+// Sink returns a channel that discards all values sent to it.
+// This will leave a goroutine running until the context is canceled
+// or the returned channel is closed.
+func Sink[Elem any](ctx context.Context) chan<- Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, r <-chan Elem) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case _, ok := <-r:
+ if !ok {
+ return
+ }
+ }
+ }
+ }(ctx, r)
+ return r
+}
+
+// An Exclusive is a value that may only be used by a single goroutine
+// at a time. This is implemented using channels rather than a mutex.
+type Exclusive[Val any] struct {
+ c chan Val
+}
+
+// MakeExclusive makes an initialized exclusive value.
+func MakeExclusive[Val any](initial Val) *Exclusive[Val] {
+ r := &Exclusive[Val]{
+ c: make(chan Val, 1),
+ }
+ r.c <- initial
+ return r
+}
+
+// Acquire acquires the exclusive value for private use.
+// It must be released using the Release method.
+func (e *Exclusive[Val]) Acquire() Val {
+ return <-e.c
+}
+
+// TryAcquire attempts to acquire the value. The ok result reports whether
+// the value was acquired. If the value is acquired, it must be released
+// using the Release method.
+func (e *Exclusive[Val]) TryAcquire() (v Val, ok bool) {
+ select {
+ case r := <-e.c:
+ return r, true
+ default:
+ return v, false
+ }
+}
+
+// Release updates and releases the value.
+// This method panics if the value has not been acquired.
+func (e *Exclusive[Val]) Release(v Val) {
+ select {
+ case e.c <- v:
+ default:
+ panic("Exclusive Release without Acquire")
+ }
+}
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[Elem any]() (*Sender[Elem], *Receiver[Elem]) {
+ c := make(chan Elem)
+ d := make(chan struct{})
+ s := &Sender[Elem]{
+ values: c,
+ done: d,
+ }
+ r := &Receiver[Elem] {
+ values: c,
+ done: d,
+ }
+ runtime.SetFinalizer(r, (*Receiver[Elem]).finalize)
+ return s, r
+}
+
+// A Sender is used to send values to a Receiver.
+type Sender[Elem any] struct {
+ values chan<- Elem
+ done <-chan struct{}
+}
+
+// Send sends a value to the receiver. It reports whether the value was sent.
+// The value will not be sent if the context is closed or the receiver
+// is freed.
+func (s *Sender[Elem]) Send(ctx context.Context, v Elem) bool {
+ select {
+ case <-ctx.Done():
+ return false
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[Elem]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[Elem any] struct {
+ values <-chan Elem
+ done chan<- struct{}
+}
+
+// Next returns the next value from the channel. The bool result indicates
+// whether the value is valid.
+func (r *Receiver[Elem]) Next(ctx context.Context) (v Elem, ok bool) {
+ select {
+ case <-ctx.Done():
+ case v, ok = <-r.values:
+ }
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[Elem]) finalize() {
+ close(r.done)
+}
diff --git a/test/typeparam/chansimp.dir/main.go b/test/typeparam/chansimp.dir/main.go
new file mode 100644
index 0000000000..ca27167598
--- /dev/null
+++ b/test/typeparam/chansimp.dir/main.go
@@ -0,0 +1,189 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "context"
+ "fmt"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+)
+
+func TestReadAll() {
+ c := make(chan int)
+ go func() {
+ c <- 4
+ c <- 2
+ c <- 5
+ close(c)
+ }()
+ got := a.ReadAll(context.Background(), c)
+ want := []int{4, 2, 5}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("ReadAll returned %v, want %v", got, want))
+ }
+}
+
+func TestMerge() {
+ c1 := make(chan int)
+ c2 := make(chan int)
+ go func() {
+ c1 <- 1
+ c1 <- 3
+ c1 <- 5
+ close(c1)
+ }()
+ go func() {
+ c2 <- 2
+ c2 <- 4
+ c2 <- 6
+ close(c2)
+ }()
+ ctx := context.Background()
+ got := a.ReadAll(ctx, a.Merge(ctx, c1, c2))
+ sort.Ints(got)
+ want := []int{1, 2, 3, 4, 5, 6}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Merge returned %v, want %v", got, want))
+ }
+}
+
+func TestFilter() {
+ c := make(chan int)
+ go func() {
+ c <- 1
+ c <- 2
+ c <- 3
+ close(c)
+ }()
+ even := func(i int) bool { return i%2 == 0 }
+ ctx := context.Background()
+ got := a.ReadAll(ctx, a.Filter(ctx, c, even))
+ want := []int{2}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Filter returned %v, want %v", got, want))
+ }
+}
+
+func TestSink() {
+ c := a.Sink[int](context.Background())
+ after := time.NewTimer(time.Minute)
+ defer after.Stop()
+ send := func(v int) {
+ select {
+ case c <- v:
+ case <-after.C:
+ panic("timed out sending to Sink")
+ }
+ }
+ send(1)
+ send(2)
+ send(3)
+ close(c)
+}
+
+func TestExclusive() {
+ val := 0
+ ex := a.MakeExclusive(&val)
+
+ var wg sync.WaitGroup
+ f := func() {
+ defer wg.Done()
+ for i := 0; i < 10; i++ {
+ p := ex.Acquire()
+ (*p)++
+ ex.Release(p)
+ }
+ }
+
+ wg.Add(2)
+ go f()
+ go f()
+
+ wg.Wait()
+ if val != 20 {
+ panic(fmt.Sprintf("after Acquire/Release loop got %d, want 20", val))
+ }
+}
+
+func TestExclusiveTry() {
+ s := ""
+ ex := a.MakeExclusive(&s)
+ p, ok := ex.TryAcquire()
+ if !ok {
+ panic("TryAcquire failed")
+ }
+ *p = "a"
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, ok := ex.TryAcquire()
+ if ok {
+ panic(fmt.Sprintf("TryAcquire succeeded unexpectedly"))
+ }
+ }()
+ wg.Wait()
+
+ ex.Release(p)
+
+ p, ok = ex.TryAcquire()
+ if !ok {
+ panic(fmt.Sprintf("TryAcquire failed"))
+ }
+}
+
+func TestRanger() {
+ s, r := a.Ranger[int]()
+
+ ctx := context.Background()
+ go func() {
+ // Receive one value then exit.
+ v, ok := r.Next(ctx)
+ if !ok {
+ panic(fmt.Sprintf("did not receive any values"))
+ } else if v != 1 {
+ panic(fmt.Sprintf("received %d, want 1", v))
+ }
+ }()
+
+ c1 := make(chan bool)
+ c2 := make(chan bool)
+ go func() {
+ defer close(c2)
+ if !s.Send(ctx, 1) {
+ panic(fmt.Sprintf("Send failed unexpectedly"))
+ }
+ close(c1)
+ if s.Send(ctx, 2) {
+ panic(fmt.Sprintf("Send succeeded unexpectedly"))
+ }
+ }()
+
+ <-c1
+
+ // Force a garbage collection to try to get the finalizers to run.
+ runtime.GC()
+
+ select {
+ case <-c2:
+ case <-time.After(time.Minute):
+ panic("Ranger Send should have failed, but timed out")
+ }
+}
+
+func main() {
+ TestReadAll()
+ TestMerge()
+ TestFilter()
+ TestSink()
+ TestExclusive()
+ TestExclusiveTry()
+ TestRanger()
+}
diff --git a/test/typeparam/chansimp.go b/test/typeparam/chansimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/chansimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/combine.go b/test/typeparam/combine.go
index d4a2988a7b..0e120cf242 100644
--- a/test/typeparam/combine.go
+++ b/test/typeparam/combine.go
@@ -10,9 +10,9 @@ import (
"fmt"
)
-type _Gen[A any] func() (A, bool)
+type Gen[A any] func() (A, bool)
-func combine[T1, T2, T any](g1 _Gen[T1], g2 _Gen[T2], join func(T1, T2) T) _Gen[T] {
+func Combine[T1, T2, T any](g1 Gen[T1], g2 Gen[T2], join func(T1, T2) T) Gen[T] {
return func() (T, bool) {
var t T
t1, ok := g1()
@@ -27,38 +27,38 @@ func combine[T1, T2, T any](g1 _Gen[T1], g2 _Gen[T2], join func(T1, T2) T) _Gen[
}
}
-type _Pair[A, B any] struct {
+type Pair[A, B any] struct {
A A
B B
}
-func _NewPair[A, B any](a A, b B) _Pair[A, B] {
- return _Pair[A, B]{a, b}
+func _NewPair[A, B any](a A, b B) Pair[A, B] {
+ return Pair[A, B]{a, b}
}
-func _Combine2[A, B any](ga _Gen[A], gb _Gen[B]) _Gen[_Pair[A, B]] {
- return combine(ga, gb, _NewPair[A, B])
+func Combine2[A, B any](ga Gen[A], gb Gen[B]) Gen[Pair[A, B]] {
+ return Combine(ga, gb, _NewPair[A, B])
}
func main() {
- var g1 _Gen[int] = func() (int, bool) { return 3, true }
- var g2 _Gen[string] = func() (string, bool) { return "x", false }
- var g3 _Gen[string] = func() (string, bool) { return "y", true }
+ var g1 Gen[int] = func() (int, bool) { return 3, true }
+ var g2 Gen[string] = func() (string, bool) { return "x", false }
+ var g3 Gen[string] = func() (string, bool) { return "y", true }
- gc := combine(g1, g2, _NewPair[int, string])
+ gc := Combine(g1, g2, _NewPair[int, string])
if got, ok := gc(); ok {
panic(fmt.Sprintf("got %v, %v, wanted -/false", got, ok))
}
- gc2 := _Combine2(g1, g2)
+ gc2 := Combine2(g1, g2)
if got, ok := gc2(); ok {
panic(fmt.Sprintf("got %v, %v, wanted -/false", got, ok))
}
- gc3 := combine(g1, g3, _NewPair[int, string])
+ gc3 := Combine(g1, g3, _NewPair[int, string])
if got, ok := gc3(); !ok || got.A != 3 || got.B != "y" {
panic(fmt.Sprintf("got %v, %v, wanted {3, y}, true", got, ok))
}
- gc4 := _Combine2(g1, g3)
+ gc4 := Combine2(g1, g3)
if got, ok := gc4(); !ok || got.A != 3 || got.B != "y" {
panic (fmt.Sprintf("got %v, %v, wanted {3, y}, true", got, ok))
}
diff --git a/test/typeparam/cons.go b/test/typeparam/cons.go
index 8d255ebdb8..f20514fb66 100644
--- a/test/typeparam/cons.go
+++ b/test/typeparam/cons.go
@@ -12,7 +12,7 @@ import "fmt"
// argument
type any interface{}
-type _Function[a, b any] interface {
+type Function[a, b any] interface {
Apply(x a) b
}
@@ -29,8 +29,8 @@ func (this pos) Apply(x int) bool {
}
type compose[a, b, c any] struct {
- f _Function[a, b]
- g _Function[b, c]
+ f Function[a, b]
+ g Function[b, c]
}
func (this compose[a, b, c]) Apply(x a) c {
@@ -47,52 +47,52 @@ func (this Int) Equal(that int) bool {
return int(this) == that
}
-type _List[a any] interface {
- Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any
+type List[a any] interface {
+ Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any
}
-type _Nil[a any] struct{
+type Nil[a any] struct{
}
-func (xs _Nil[a]) Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any {
+func (xs Nil[a]) Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any {
return casenil.Apply(xs)
}
-type _Cons[a any] struct {
+type Cons[a any] struct {
Head a
- Tail _List[a]
+ Tail List[a]
}
-func (xs _Cons[a]) Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any {
+func (xs Cons[a]) Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any {
return casecons.Apply(xs)
}
type mapNil[a, b any] struct{
}
-func (m mapNil[a, b]) Apply(_ _Nil[a]) any {
- return _Nil[b]{}
+func (m mapNil[a, b]) Apply(_ Nil[a]) any {
+ return Nil[b]{}
}
type mapCons[a, b any] struct {
- f _Function[a, b]
+ f Function[a, b]
}
-func (m mapCons[a, b]) Apply(xs _Cons[a]) any {
- return _Cons[b]{m.f.Apply(xs.Head), _Map[a, b](m.f, xs.Tail)}
+func (m mapCons[a, b]) Apply(xs Cons[a]) any {
+ return Cons[b]{m.f.Apply(xs.Head), Map[a, b](m.f, xs.Tail)}
}
-func _Map[a, b any](f _Function[a, b], xs _List[a]) _List[b] {
- return xs.Match(mapNil[a, b]{}, mapCons[a, b]{f}).(_List[b])
+func Map[a, b any](f Function[a, b], xs List[a]) List[b] {
+ return xs.Match(mapNil[a, b]{}, mapCons[a, b]{f}).(List[b])
}
func main() {
- var xs _List[int] = _Cons[int]{3, _Cons[int]{6, _Nil[int]{}}}
- var ys _List[int] = _Map[int, int](incr{-5}, xs)
- var xz _List[bool] = _Map[int, bool](pos{}, ys)
- cs1 := xz.(_Cons[bool])
- cs2 := cs1.Tail.(_Cons[bool])
- _, ok := cs2.Tail.(_Nil[bool])
+ var xs List[int] = Cons[int]{3, Cons[int]{6, Nil[int]{}}}
+ var ys List[int] = Map[int, int](incr{-5}, xs)
+ var xz List[bool] = Map[int, bool](pos{}, ys)
+ cs1 := xz.(Cons[bool])
+ cs2 := cs1.Tail.(Cons[bool])
+ _, ok := cs2.Tail.(Nil[bool])
if cs1.Head != false || cs2.Head != true || !ok {
panic(fmt.Sprintf("got %v, %v, %v, expected false, true, true",
cs1.Head, cs2.Head, ok))
diff --git a/test/typeparam/dictionaryCapture-noinline.go b/test/typeparam/dictionaryCapture-noinline.go
new file mode 100644
index 0000000000..4b46d5f57f
--- /dev/null
+++ b/test/typeparam/dictionaryCapture-noinline.go
@@ -0,0 +1,126 @@
+// run -gcflags="-G=3 -l"
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test situations where functions/methods are not
+// immediately called and we need to capture the dictionary
+// required for later invocation.
+
+package main
+
+func main() {
+ functions()
+ methodExpressions()
+ methodValues()
+ interfaceMethods()
+ globals()
+}
+
+func g0[T any](x T) {
+}
+func g1[T any](x T) T {
+ return x
+}
+func g2[T any](x T) (T, T) {
+ return x, x
+}
+
+func functions() {
+ f0 := g0[int]
+ f0(7)
+ f1 := g1[int]
+ is7(f1(7))
+ f2 := g2[int]
+ is77(f2(7))
+}
+
+func is7(x int) {
+ if x != 7 {
+ println(x)
+ panic("assertion failed")
+ }
+}
+func is77(x, y int) {
+ if x != 7 || y != 7 {
+ println(x,y)
+ panic("assertion failed")
+ }
+}
+
+type s[T any] struct {
+ a T
+}
+
+func (x s[T]) g0() {
+}
+func (x s[T]) g1() T {
+ return x.a
+}
+func (x s[T]) g2() (T, T) {
+ return x.a, x.a
+}
+
+func methodExpressions() {
+ x := s[int]{a:7}
+ f0 := s[int].g0
+ f0(x)
+ f1 := s[int].g1
+ is7(f1(x))
+ f2 := s[int].g2
+ is77(f2(x))
+}
+
+func methodValues() {
+ x := s[int]{a:7}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ is7(f1())
+ f2 := x.g2
+ is77(f2())
+}
+
+var x interface{
+ g0()
+ g1()int
+ g2()(int,int)
+} = s[int]{a:7}
+var y interface{} = s[int]{a:7}
+
+func interfaceMethods() {
+ x.g0()
+ is7(x.g1())
+ is77(x.g2())
+ y.(interface{g0()}).g0()
+ is7(y.(interface{g1()int}).g1())
+ is77(y.(interface{g2()(int,int)}).g2())
+}
+
+// Also check for instantiations outside functions.
+var gg0 = g0[int]
+var gg1 = g1[int]
+var gg2 = g2[int]
+
+var hh0 = s[int].g0
+var hh1 = s[int].g1
+var hh2 = s[int].g2
+
+var xtop = s[int]{a:7}
+var ii0 = x.g0
+var ii1 = x.g1
+var ii2 = x.g2
+
+func globals() {
+ gg0(7)
+ is7(gg1(7))
+ is77(gg2(7))
+ x := s[int]{a:7}
+ hh0(x)
+ is7(hh1(x))
+ is77(hh2(x))
+ ii0()
+ is7(ii1())
+ is77(ii2())
+}
diff --git a/test/typeparam/dictionaryCapture.go b/test/typeparam/dictionaryCapture.go
new file mode 100644
index 0000000000..1b2ee1de91
--- /dev/null
+++ b/test/typeparam/dictionaryCapture.go
@@ -0,0 +1,126 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test situations where functions/methods are not
+// immediately called and we need to capture the dictionary
+// required for later invocation.
+
+package main
+
+func main() {
+ functions()
+ methodExpressions()
+ methodValues()
+ interfaceMethods()
+ globals()
+}
+
+func g0[T any](x T) {
+}
+func g1[T any](x T) T {
+ return x
+}
+func g2[T any](x T) (T, T) {
+ return x, x
+}
+
+func functions() {
+ f0 := g0[int]
+ f0(7)
+ f1 := g1[int]
+ is7(f1(7))
+ f2 := g2[int]
+ is77(f2(7))
+}
+
+func is7(x int) {
+ if x != 7 {
+ println(x)
+ panic("assertion failed")
+ }
+}
+func is77(x, y int) {
+ if x != 7 || y != 7 {
+ println(x,y)
+ panic("assertion failed")
+ }
+}
+
+type s[T any] struct {
+ a T
+}
+
+func (x s[T]) g0() {
+}
+func (x s[T]) g1() T {
+ return x.a
+}
+func (x s[T]) g2() (T, T) {
+ return x.a, x.a
+}
+
+func methodExpressions() {
+ x := s[int]{a:7}
+ f0 := s[int].g0
+ f0(x)
+ f1 := s[int].g1
+ is7(f1(x))
+ f2 := s[int].g2
+ is77(f2(x))
+}
+
+func methodValues() {
+ x := s[int]{a:7}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ is7(f1())
+ f2 := x.g2
+ is77(f2())
+}
+
+var x interface{
+ g0()
+ g1()int
+ g2()(int,int)
+} = s[int]{a:7}
+var y interface{} = s[int]{a:7}
+
+func interfaceMethods() {
+ x.g0()
+ is7(x.g1())
+ is77(x.g2())
+ y.(interface{g0()}).g0()
+ is7(y.(interface{g1()int}).g1())
+ is77(y.(interface{g2()(int,int)}).g2())
+}
+
+// Also check for instantiations outside functions.
+var gg0 = g0[int]
+var gg1 = g1[int]
+var gg2 = g2[int]
+
+var hh0 = s[int].g0
+var hh1 = s[int].g1
+var hh2 = s[int].g2
+
+var xtop = s[int]{a:7}
+var ii0 = x.g0
+var ii1 = x.g1
+var ii2 = x.g2
+
+func globals() {
+ gg0(7)
+ is7(gg1(7))
+ is77(gg2(7))
+ x := s[int]{a:7}
+ hh0(x)
+ is7(hh1(x))
+ is77(hh2(x))
+ ii0()
+ is7(ii1())
+ is77(ii2())
+}
diff --git a/test/typeparam/double.go b/test/typeparam/double.go
index ce78ec9748..6652613814 100644
--- a/test/typeparam/double.go
+++ b/test/typeparam/double.go
@@ -12,7 +12,7 @@ import (
)
type Number interface {
- type int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64
}
type MySlice []int
diff --git a/test/typeparam/fact.go b/test/typeparam/fact.go
index 16b2adf6fb..baa7fbc68e 100644
--- a/test/typeparam/fact.go
+++ b/test/typeparam/fact.go
@@ -8,11 +8,11 @@ package main
import "fmt"
-func fact[T interface { type int, int64, float64 }](n T) T {
- if n == T(1) {
- return T(1)
+func fact[T interface { ~int | ~int64 | ~float64 }](n T) T {
+ if n == 1 {
+ return 1
}
- return n * fact(n - T(1))
+ return n * fact(n - 1)
}
func main() {
diff --git a/test/typeparam/factimp.dir/a.go b/test/typeparam/factimp.dir/a.go
new file mode 100644
index 0000000000..cb1ff2615b
--- /dev/null
+++ b/test/typeparam/factimp.dir/a.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func Fact[T interface { int | int64 | float64 }](n T) T {
+ if n == 1 {
+ return 1
+ }
+ return n * Fact(n - 1)
+}
diff --git a/test/typeparam/factimp.dir/main.go b/test/typeparam/factimp.dir/main.go
new file mode 100644
index 0000000000..c2238002ae
--- /dev/null
+++ b/test/typeparam/factimp.dir/main.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 120
+
+ if got := a.Fact(5); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Fact[int64](5); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Fact(5.0); got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+}
diff --git a/test/typeparam/factimp.go b/test/typeparam/factimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/factimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/ifaceconv.go b/test/typeparam/ifaceconv.go
new file mode 100644
index 0000000000..0b0776815c
--- /dev/null
+++ b/test/typeparam/ifaceconv.go
@@ -0,0 +1,58 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that we can convert type parameters to both empty
+// and nonempty interfaces, and named and nonnamed versions
+// thereof.
+
+package main
+
+import "fmt"
+
+type E interface{}
+
+func f[T any](x T) interface{} {
+ var i interface{} = x
+ return i
+}
+func g[T any](x T) E {
+ var i E = x
+ return i
+}
+
+type C interface {
+ foo() int
+}
+
+type myInt int
+
+func (x myInt) foo() int {
+ return int(x+1)
+}
+
+func h[T C](x T) interface{foo() int} {
+ var i interface{foo()int} = x
+ return i
+}
+func i[T C](x T) C {
+ var i C = x
+ return i
+}
+
+func main() {
+ if got, want := f[int](7), 7; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := g[int](7), 7; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := h[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := i[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+}
diff --git a/test/typeparam/index.go b/test/typeparam/index.go
index 83e65acdd0..cb9b2613c3 100644
--- a/test/typeparam/index.go
+++ b/test/typeparam/index.go
@@ -11,7 +11,7 @@ import (
)
// Index returns the index of x in s, or -1 if not found.
-func index[T comparable](s []T, x T) int {
+func Index[T comparable](s []T, x T) int {
for i, v := range s {
// v and x are type T, which has the comparable
// constraint, so we can use == here.
@@ -30,17 +30,17 @@ func main() {
want := 2
vec1 := []string{"ab", "cd", "ef"}
- if got := index(vec1, "ef"); got != want {
+ if got := Index(vec1, "ef"); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
vec2 := []byte{'c', '6', '@'}
- if got := index(vec2, '@'); got != want {
+ if got := Index(vec2, '@'); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
vec3 := []*obj{&obj{2}, &obj{42}, &obj{1}}
- if got := index(vec3, vec3[2]); got != want {
+ if got := Index(vec3, vec3[2]); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
diff --git a/test/typeparam/issue45817.go b/test/typeparam/issue45817.go
new file mode 100644
index 0000000000..744698f40b
--- /dev/null
+++ b/test/typeparam/issue45817.go
@@ -0,0 +1,25 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+)
+
+type s[T any] struct {
+ a T
+}
+func (x s[T]) f() T {
+ return x.a
+}
+func main() {
+ x := s[int]{a:7}
+ f := x.f
+ if got, want := f(), 7; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+}
diff --git a/test/typeparam/issue46472.go b/test/typeparam/issue46472.go
new file mode 100644
index 0000000000..bab48e7d2f
--- /dev/null
+++ b/test/typeparam/issue46472.go
@@ -0,0 +1,20 @@
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func foo[T any](d T) {
+ switch v := interface{}(d).(type) {
+ case string:
+ if v != "x" {
+ panic("unexpected v: "+v)
+ }
+ }
+
+}
+func main() {
+ foo("x")
+}
diff --git a/test/typeparam/list.go b/test/typeparam/list.go
index 579078f02f..c63c9bff79 100644
--- a/test/typeparam/list.go
+++ b/test/typeparam/list.go
@@ -11,10 +11,10 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
// _List is a linked list of ordered values of type T.
@@ -34,9 +34,9 @@ func (l *_List[T]) Largest() T {
}
type OrderedNum interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// _ListNum is a linked _List of ordered numeric values of type T.
diff --git a/test/typeparam/list2.go b/test/typeparam/list2.go
index 385193d876..32023cf319 100644
--- a/test/typeparam/list2.go
+++ b/test/typeparam/list2.go
@@ -597,5 +597,14 @@ func TestTransform() {
func main() {
TestList()
+ TestExtending()
+ TestRemove()
+ TestIssue4103()
+ TestIssue6349()
+ TestMove()
+ TestZeroList()
+ TestInsertBeforeUnknownMark()
+ TestInsertAfterUnknownMark()
+ TestTransform()
}
diff --git a/test/typeparam/listimp.dir/a.go b/test/typeparam/listimp.dir/a.go
new file mode 100644
index 0000000000..2b5b23cde3
--- /dev/null
+++ b/test/typeparam/listimp.dir/a.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// List is a linked list of ordered values of type T.
+type List[T Ordered] struct {
+ Next *List[T]
+ Val T
+}
+
+func (l *List[T]) Largest() T {
+ var max T
+ for p := l; p != nil; p = p.Next {
+ if p.Val > max {
+ max = p.Val
+ }
+ }
+ return max
+}
+
+type OrderedNum interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// ListNum is a linked _List of ordered numeric values of type T.
+type ListNum[T OrderedNum] struct {
+ Next *ListNum[T]
+ Val T
+}
+
+const Clip = 5
+
+// clippedLargest returns the largest in the list of OrderNums, but a max of 5.
+func (l *ListNum[T]) ClippedLargest() T {
+ var max T
+ for p := l; p != nil; p = p.Next {
+ if p.Val > max && p.Val < Clip {
+ max = p.Val
+ }
+ }
+ return max
+}
diff --git a/test/typeparam/listimp.dir/main.go b/test/typeparam/listimp.dir/main.go
new file mode 100644
index 0000000000..d43ad508be
--- /dev/null
+++ b/test/typeparam/listimp.dir/main.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ i3 := &a.List[int]{nil, 1}
+ i2 := &a.List[int]{i3, 3}
+ i1 := &a.List[int]{i2, 2}
+ if got, want := i1.Largest(), 3; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ b3 := &a.List[byte]{nil, byte(1)}
+ b2 := &a.List[byte]{b3, byte(3)}
+ b1 := &a.List[byte]{b2, byte(2)}
+ if got, want := b1.Largest(), byte(3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ f3 := &a.List[float64]{nil, 13.5}
+ f2 := &a.List[float64]{f3, 1.2}
+ f1 := &a.List[float64]{f2, 4.5}
+ if got, want := f1.Largest(), 13.5; got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+
+ s3 := &a.List[string]{nil, "dd"}
+ s2 := &a.List[string]{s3, "aa"}
+ s1 := &a.List[string]{s2, "bb"}
+ if got, want := s1.Largest(), "dd"; got != want {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+ j3 := &a.ListNum[int]{nil, 1}
+ j2 := &a.ListNum[int]{j3, 32}
+ j1 := &a.ListNum[int]{j2, 2}
+ if got, want := j1.ClippedLargest(), 2; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+ g3 := &a.ListNum[float64]{nil, 13.5}
+ g2 := &a.ListNum[float64]{g3, 1.2}
+ g1 := &a.ListNum[float64]{g2, 4.5}
+ if got, want := g1.ClippedLargest(), 4.5; got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+}
diff --git a/test/typeparam/listimp.go b/test/typeparam/listimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/listimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/listimp2.dir/a.go b/test/typeparam/listimp2.dir/a.go
new file mode 100644
index 0000000000..76ad669767
--- /dev/null
+++ b/test/typeparam/listimp2.dir/a.go
@@ -0,0 +1,298 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "fmt"
+)
+
+// Element is an element of a linked list.
+type Element[T any] struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *Element[T]
+
+ // The list to which this element belongs.
+ list *List[T]
+
+ // The value stored with this element.
+ Value T
+}
+
+// Next returns the next list element or nil.
+func (e *Element[T]) Next() *Element[T] {
+ if p := e.next; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// Prev returns the previous list element or nil.
+func (e *Element[T]) Prev() *Element[T] {
+ if p := e.prev; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// List represents a doubly linked list.
+// The zero value for List is an empty list ready to use.
+type List[T any] struct {
+ root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list length excluding (this) sentinel element
+}
+
+// Init initializes or clears list l.
+func (l *List[T]) Init() *List[T] {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ return l
+}
+
+// New returns an initialized list.
+func New[T any]() *List[T] { return new(List[T]).Init() }
+
+// Len returns the number of elements of list l.
+// The complexity is O(1).
+func (l *List[_]) Len() int { return l.len }
+
+// Front returns the first element of list l or nil if the list is empty.
+func (l *List[T]) Front() *Element[T] {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.next
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *List[T]) Back() *Element[T] {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.prev
+}
+
+// lazyInit lazily initializes a zero List value.
+func (l *List[_]) lazyInit() {
+ if l.root.next == nil {
+ l.Init()
+ }
+}
+
+// insert inserts e after at, increments l.len, and returns e.
+func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+ e.list = l
+ l.len++
+ return e
+}
+
+// insertValue is a convenience wrapper for insert(&Element[T]{Value: v}, at).
+func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
+ return l.insert(&Element[T]{Value: v}, at)
+}
+
+// remove removes e from its list, decrements l.len, and returns e.
+func (l *List[T]) remove(e *Element[T]) *Element[T] {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+ e.list = nil
+ l.len--
+ return e
+}
+
+// move moves e to next to at and returns e.
+func (l *List[T]) move(e, at *Element[T]) *Element[T] {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+
+ return e
+}
+
+// Remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *List[T]) Remove(e *Element[T]) T {
+ if e.list == l {
+ // if e.list == l, l must have been initialized when e was inserted
+ // in l or l == nil (e is a zero Element) and l.remove will crash
+ l.remove(e)
+ }
+ return e.Value
+}
+
+// PushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *List[T]) PushFront(v T) *Element[T] {
+ l.lazyInit()
+ return l.insertValue(v, &l.root)
+}
+
+// PushBack inserts a new element e with value v at the back of list l and returns e.
+func (l *List[T]) PushBack(v T) *Element[T] {
+ l.lazyInit()
+ return l.insertValue(v, l.root.prev)
+}
+
+// InsertBefore inserts a new element e with value v immediately before mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark.prev)
+}
+
+// InsertAfter inserts a new element e with value v immediately after mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark)
+}
+
+// MoveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[T]) MoveToFront(e *Element[T]) {
+ if e.list != l || l.root.next == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, &l.root)
+}
+
+// MoveToBack moves element e to the back of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[T]) MoveToBack(e *Element[T]) {
+ if e.list != l || l.root.prev == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, l.root.prev)
+}
+
+// MoveBefore moves element e to its new position before mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[T]) MoveBefore(e, mark *Element[T]) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark.prev)
+}
+
+// MoveAfter moves element e to its new position after mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[T]) MoveAfter(e, mark *Element[T]) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark)
+}
+
+// PushBackList inserts a copy of an other list at the back of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List[T]) PushBackList(other *List[T]) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
+ l.insertValue(e.Value, l.root.prev)
+ }
+}
+
+// PushFrontList inserts a copy of an other list at the front of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List[T]) PushFrontList(other *List[T]) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
+ l.insertValue(e.Value, &l.root)
+ }
+}
+
+// Transform runs a transform function on a list returning a new list.
+func Transform[TElem1, TElem2 any](lst *List[TElem1], f func(TElem1) TElem2) *List[TElem2] {
+ ret := New[TElem2]()
+ for p := lst.Front(); p != nil; p = p.Next() {
+ ret.PushBack(f(p.Value))
+ }
+ return ret
+}
+
+func CheckListLen[T any](l *List[T], len int) bool {
+ if n := l.Len(); n != len {
+ panic(fmt.Sprintf("l.Len() = %d, want %d", n, len))
+ return false
+ }
+ return true
+}
+
+func CheckListPointers[T any](l *List[T], es []*Element[T]) {
+ root := &l.root
+
+ if !CheckListLen(l, len(es)) {
+ return
+ }
+
+ // zero length lists must be the zero value or properly initialized (sentinel circle)
+ if len(es) == 0 {
+ if l.root.next != nil && l.root.next != root || l.root.prev != nil && l.root.prev != root {
+ panic(fmt.Sprintf("l.root.next = %p, l.root.prev = %p; both should both be nil or %p", l.root.next, l.root.prev, root))
+ }
+ return
+ }
+ // len(es) > 0
+
+ // check internal and external prev/next connections
+ for i, e := range es {
+ prev := root
+ Prev := (*Element[T])(nil)
+ if i > 0 {
+ prev = es[i-1]
+ Prev = prev
+ }
+ if p := e.prev; p != prev {
+ panic(fmt.Sprintf("elt[%d](%p).prev = %p, want %p", i, e, p, prev))
+ }
+ if p := e.Prev(); p != Prev {
+ panic(fmt.Sprintf("elt[%d](%p).Prev() = %p, want %p", i, e, p, Prev))
+ }
+
+ next := root
+ Next := (*Element[T])(nil)
+ if i < len(es)-1 {
+ next = es[i+1]
+ Next = next
+ }
+ if n := e.next; n != next {
+ panic(fmt.Sprintf("elt[%d](%p).next = %p, want %p", i, e, n, next))
+ }
+ if n := e.Next(); n != Next {
+ panic(fmt.Sprintf("elt[%d](%p).Next() = %p, want %p", i, e, n, Next))
+ }
+ }
+}
diff --git a/test/typeparam/listimp2.dir/main.go b/test/typeparam/listimp2.dir/main.go
new file mode 100644
index 0000000000..0c2c38e399
--- /dev/null
+++ b/test/typeparam/listimp2.dir/main.go
@@ -0,0 +1,316 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "strconv"
+)
+
+func TestList() {
+ l := a.New[string]()
+ a.CheckListPointers(l, []*(a.Element[string]){})
+
+ // Single element list
+ e := l.PushFront("a")
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.MoveToFront(e)
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.MoveToBack(e)
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[string]){})
+
+ // Bigger list
+ l2 := a.New[int]()
+ e2 := l2.PushFront(2)
+ e1 := l2.PushFront(1)
+ e3 := l2.PushBack(3)
+ e4 := l2.PushBack(600)
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l2.Remove(e2)
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e3, e4})
+
+ l2.MoveToFront(e3) // move from middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+
+ l2.MoveToFront(e1)
+ l2.MoveToBack(e3) // move from middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+
+ l2.MoveToFront(e3) // move from back
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+ l2.MoveToFront(e3) // should be no-op
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+
+ l2.MoveToBack(e3) // move from front
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+ l2.MoveToBack(e3) // should be no-op
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+
+ e2 = l2.InsertBefore(2, e1) // insert before front
+ a.CheckListPointers(l2, []*(a.Element[int]){e2, e1, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertBefore(2, e4) // insert before middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertBefore(2, e3) // insert before back
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e2, e3})
+ l2.Remove(e2)
+
+ e2 = l2.InsertAfter(2, e1) // insert after front
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertAfter(2, e4) // insert after middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e2, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertAfter(2, e3) // insert after back
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3, e2})
+ l2.Remove(e2)
+
+ // Check standard iteration.
+ sum := 0
+ for e := l2.Front(); e != nil; e = e.Next() {
+ sum += e.Value
+ }
+ if sum != 604 {
+ panic(fmt.Sprintf("sum over l = %d, want 604", sum))
+ }
+
+ // Clear all elements by iterating
+ var next *a.Element[int]
+ for e := l2.Front(); e != nil; e = next {
+ next = e.Next()
+ l2.Remove(e)
+ }
+ a.CheckListPointers(l2, []*(a.Element[int]){})
+}
+
+func checkList[T comparable](l *a.List[T], es []interface{}) {
+ if !a.CheckListLen(l, len(es)) {
+ return
+ }
+
+ i := 0
+ for e := l.Front(); e != nil; e = e.Next() {
+ le := e.Value
+ // Comparison between a generically-typed variable le and an interface.
+ if le != es[i] {
+ panic(fmt.Sprintf("elt[%d].Value = %v, want %v", i, le, es[i]))
+ }
+ i++
+ }
+}
+
+func TestExtending() {
+ l1 := a.New[int]()
+ l2 := a.New[int]()
+
+ l1.PushBack(1)
+ l1.PushBack(2)
+ l1.PushBack(3)
+
+ l2.PushBack(4)
+ l2.PushBack(5)
+
+ l3 := a.New[int]()
+ l3.PushBackList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushBackList(l2)
+ checkList(l3, []interface{}{1, 2, 3, 4, 5})
+
+ l3 = a.New[int]()
+ l3.PushFrontList(l2)
+ checkList(l3, []interface{}{4, 5})
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1, 2, 3, 4, 5})
+
+ checkList(l1, []interface{}{1, 2, 3})
+ checkList(l2, []interface{}{4, 5})
+
+ l3 = a.New[int]()
+ l3.PushBackList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushBackList(l3)
+ checkList(l3, []interface{}{1, 2, 3, 1, 2, 3})
+
+ l3 = a.New[int]()
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushFrontList(l3)
+ checkList(l3, []interface{}{1, 2, 3, 1, 2, 3})
+
+ l3 = a.New[int]()
+ l1.PushBackList(l3)
+ checkList(l1, []interface{}{1, 2, 3})
+ l1.PushFrontList(l3)
+ checkList(l1, []interface{}{1, 2, 3})
+}
+
+func TestRemove() {
+ l := a.New[int]()
+ e1 := l.PushBack(1)
+ e2 := l.PushBack(2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2})
+ e := l.Front()
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[int]){e2})
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[int]){e2})
+}
+
+func TestIssue4103() {
+ l1 := a.New[int]()
+ l1.PushBack(1)
+ l1.PushBack(2)
+
+ l2 := a.New[int]()
+ l2.PushBack(3)
+ l2.PushBack(4)
+
+ e := l1.Front()
+ l2.Remove(e) // l2 should not change because e is not an element of l2
+ if n := l2.Len(); n != 2 {
+ panic(fmt.Sprintf("l2.Len() = %d, want 2", n))
+ }
+
+ l1.InsertBefore(8, e)
+ if n := l1.Len(); n != 3 {
+ panic(fmt.Sprintf("l1.Len() = %d, want 3", n))
+ }
+}
+
+func TestIssue6349() {
+ l := a.New[int]()
+ l.PushBack(1)
+ l.PushBack(2)
+
+ e := l.Front()
+ l.Remove(e)
+ if e.Value != 1 {
+ panic(fmt.Sprintf("e.value = %d, want 1", e.Value))
+ }
+ if e.Next() != nil {
+ panic(fmt.Sprintf("e.Next() != nil"))
+ }
+ if e.Prev() != nil {
+ panic(fmt.Sprintf("e.Prev() != nil"))
+ }
+}
+
+func TestMove() {
+ l := a.New[int]()
+ e1 := l.PushBack(1)
+ e2 := l.PushBack(2)
+ e3 := l.PushBack(3)
+ e4 := l.PushBack(4)
+
+ l.MoveAfter(e3, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+ l.MoveBefore(e2, e2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l.MoveAfter(e3, e2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+ l.MoveBefore(e2, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l.MoveBefore(e2, e4)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e3, e2, e4})
+ e2, e3 = e3, e2
+
+ l.MoveBefore(e4, e1)
+ a.CheckListPointers(l, []*(a.Element[int]){e4, e1, e2, e3})
+ e1, e2, e3, e4 = e4, e1, e2, e3
+
+ l.MoveAfter(e4, e1)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e4, e2, e3})
+ e2, e3, e4 = e4, e2, e3
+
+ l.MoveAfter(e2, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e3, e2, e4})
+ e2, e3 = e3, e2
+}
+
+// Test PushFront, PushBack, PushFrontList, PushBackList with uninitialized a.List
+func TestZeroList() {
+ var l1 = new(a.List[int])
+ l1.PushFront(1)
+ checkList(l1, []interface{}{1})
+
+ var l2 = new(a.List[int])
+ l2.PushBack(1)
+ checkList(l2, []interface{}{1})
+
+ var l3 = new(a.List[int])
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1})
+
+ var l4 = new(a.List[int])
+ l4.PushBackList(l2)
+ checkList(l4, []interface{}{1})
+}
+
+// Test that a list l is not modified when calling InsertBefore with a mark that is not an element of l.
+func TestInsertBeforeUnknownMark() {
+ var l a.List[int]
+ l.PushBack(1)
+ l.PushBack(2)
+ l.PushBack(3)
+ l.InsertBefore(1, new(a.Element[int]))
+ checkList(&l, []interface{}{1, 2, 3})
+}
+
+// Test that a list l is not modified when calling InsertAfter with a mark that is not an element of l.
+func TestInsertAfterUnknownMark() {
+ var l a.List[int]
+ l.PushBack(1)
+ l.PushBack(2)
+ l.PushBack(3)
+ l.InsertAfter(1, new(a.Element[int]))
+ checkList(&l, []interface{}{1, 2, 3})
+}
+
+// Test that a list l is not modified when calling MoveAfter or MoveBefore with a mark that is not an element of l.
+func TestMoveUnknownMark() {
+ var l1 a.List[int]
+ e1 := l1.PushBack(1)
+
+ var l2 a.List[int]
+ e2 := l2.PushBack(2)
+
+ l1.MoveAfter(e1, e2)
+ checkList(&l1, []interface{}{1})
+ checkList(&l2, []interface{}{2})
+
+ l1.MoveBefore(e1, e2)
+ checkList(&l1, []interface{}{1})
+ checkList(&l2, []interface{}{2})
+}
+
+// Test the Transform function.
+func TestTransform() {
+ l1 := a.New[int]()
+ l1.PushBack(1)
+ l1.PushBack(2)
+ l2 := a.Transform(l1, strconv.Itoa)
+ checkList(l2, []interface{}{"1", "2"})
+}
+
+
+func main() {
+ TestList()
+ TestExtending()
+ TestRemove()
+ TestIssue4103()
+ TestIssue6349()
+ TestMove()
+ TestZeroList()
+ TestInsertBeforeUnknownMark()
+ TestInsertAfterUnknownMark()
+ TestTransform()
+}
diff --git a/test/typeparam/listimp2.go b/test/typeparam/listimp2.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/listimp2.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/lockable.go b/test/typeparam/lockable.go
index d53817521f..3a03652cd8 100644
--- a/test/typeparam/lockable.go
+++ b/test/typeparam/lockable.go
@@ -8,29 +8,29 @@ package main
import "sync"
-// A _Lockable is a value that may be safely simultaneously accessed
+// A Lockable is a value that may be safely simultaneously accessed
// from multiple goroutines via the Get and Set methods.
-type _Lockable[T any] struct {
+type Lockable[T any] struct {
T
mu sync.Mutex
}
-// Get returns the value stored in a _Lockable.
-func (l *_Lockable[T]) get() T {
+// Get returns the value stored in a Lockable.
+func (l *Lockable[T]) get() T {
l.mu.Lock()
defer l.mu.Unlock()
return l.T
}
-// set sets the value in a _Lockable.
-func (l *_Lockable[T]) set(v T) {
+// set sets the value in a Lockable.
+func (l *Lockable[T]) set(v T) {
l.mu.Lock()
defer l.mu.Unlock()
l.T = v
}
func main() {
- sl := _Lockable[string]{T: "a"}
+ sl := Lockable[string]{T: "a"}
if got := sl.get(); got != "a" {
panic(got)
}
@@ -39,7 +39,7 @@ func main() {
panic(got)
}
- il := _Lockable[int]{T: 1}
+ il := Lockable[int]{T: 1}
if got := il.get(); got != 1 {
panic(got)
}
diff --git a/test/typeparam/mapimp.dir/a.go b/test/typeparam/mapimp.dir/a.go
new file mode 100644
index 0000000000..6835e214b8
--- /dev/null
+++ b/test/typeparam/mapimp.dir/a.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// Map calls the function f on every element of the slice s,
+// returning a new slice of the results.
+func Mapper[F, T any](s []F, f func(F) T) []T {
+ r := make([]T, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
diff --git a/test/typeparam/mapimp.dir/main.go b/test/typeparam/mapimp.dir/main.go
new file mode 100644
index 0000000000..4d4a4d9eb0
--- /dev/null
+++ b/test/typeparam/mapimp.dir/main.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+func main() {
+ got := a.Mapper([]int{1, 2, 3}, strconv.Itoa)
+ want := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got, want) {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+
+ fgot := a.Mapper([]float64{2.5, 2.3, 3.5}, func(f float64) string {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+ })
+ fwant := []string{"2.5", "2.3", "3.5"}
+ if !reflect.DeepEqual(fgot, fwant) {
+ panic(fmt.Sprintf("got %s, want %s", fgot, fwant))
+ }
+}
diff --git a/test/typeparam/mapimp.go b/test/typeparam/mapimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/mapimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mapsimp.dir/a.go b/test/typeparam/mapsimp.dir/a.go
new file mode 100644
index 0000000000..696e2a5680
--- /dev/null
+++ b/test/typeparam/mapsimp.dir/a.go
@@ -0,0 +1,108 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// SliceEqual reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Keys returns the keys of the map m.
+// The keys will be an indeterminate order.
+func Keys[K comparable, V any](m map[K]V) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
+
+// Values returns the values of the map m.
+// The values will be in an indeterminate order.
+func Values[K comparable, V any](m map[K]V) []V {
+ r := make([]V, 0, len(m))
+ for _, v := range m {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Equal reports whether two maps contain the same key/value pairs.
+// Values are compared using ==.
+func Equal[K, V comparable](m1, m2 map[K]V) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[k]; !ok || v1 != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy returns a copy of m.
+func Copy[K comparable, V any](m map[K]V) map[K]V {
+ r := make(map[K]V, len(m))
+ for k, v := range m {
+ r[k] = v
+ }
+ return r
+}
+
+// Add adds all key/value pairs in m2 to m1. Keys in m2 that are already
+// present in m1 will be overwritten with the value in m2.
+func Add[K comparable, V any](m1, m2 map[K]V) {
+ for k, v := range m2 {
+ m1[k] = v
+ }
+}
+
+// Sub removes all keys in m2 from m1. Keys in m2 that are not present
+// in m1 are ignored. The values in m2 are ignored.
+func Sub[K comparable, V any](m1, m2 map[K]V) {
+ for k := range m2 {
+ delete(m1, k)
+ }
+}
+
+// Intersect removes all keys from m1 that are not present in m2.
+// Keys in m2 that are not in m1 are ignored. The values in m2 are ignored.
+func Intersect[K comparable, V any](m1, m2 map[K]V) {
+ for k := range m1 {
+ if _, ok := m2[k]; !ok {
+ delete(m1, k)
+ }
+ }
+}
+
+// Filter deletes any key/value pairs from m for which f returns false.
+func Filter[K comparable, V any](m map[K]V, f func(K, V) bool) {
+ for k, v := range m {
+ if !f(k, v) {
+ delete(m, k)
+ }
+ }
+}
+
+// TransformValues applies f to each value in m. The keys remain unchanged.
+func TransformValues[K comparable, V any](m map[K]V, f func(V) V) {
+ for k, v := range m {
+ m[k] = f(v)
+ }
+}
diff --git a/test/typeparam/mapsimp.dir/main.go b/test/typeparam/mapsimp.dir/main.go
new file mode 100644
index 0000000000..873660e4cd
--- /dev/null
+++ b/test/typeparam/mapsimp.dir/main.go
@@ -0,0 +1,156 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "math"
+ "sort"
+)
+
+var m1 = map[int]int{1: 2, 2: 4, 4: 8, 8: 16}
+var m2 = map[int]string{1: "2", 2: "4", 4: "8", 8: "16"}
+
+func TestKeys() {
+ want := []int{1, 2, 4, 8}
+
+ got1 := a.Keys(m1)
+ sort.Ints(got1)
+ if !a.SliceEqual(got1, want) {
+ panic(fmt.Sprintf("a.Keys(%v) = %v, want %v", m1, got1, want))
+ }
+
+ got2 := a.Keys(m2)
+ sort.Ints(got2)
+ if !a.SliceEqual(got2, want) {
+ panic(fmt.Sprintf("a.Keys(%v) = %v, want %v", m2, got2, want))
+ }
+}
+
+func TestValues() {
+ got1 := a.Values(m1)
+ want1 := []int{2, 4, 8, 16}
+ sort.Ints(got1)
+ if !a.SliceEqual(got1, want1) {
+ panic(fmt.Sprintf("a.Values(%v) = %v, want %v", m1, got1, want1))
+ }
+
+ got2 := a.Values(m2)
+ want2 := []string{"16", "2", "4", "8"}
+ sort.Strings(got2)
+ if !a.SliceEqual(got2, want2) {
+ panic(fmt.Sprintf("a.Values(%v) = %v, want %v", m2, got2, want2))
+ }
+}
+
+func TestEqual() {
+ if !a.Equal(m1, m1) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", m1, m1))
+ }
+ if a.Equal(m1, nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil) = true, want false", m1))
+ }
+ if a.Equal(nil, m1) {
+ panic(fmt.Sprintf("a.Equal(nil, %v) = true, want false", m1))
+ }
+ if !a.Equal[int, int](nil, nil) {
+ panic("a.Equal(nil, nil) = false, want true")
+ }
+ if ms := map[int]int{1: 2}; a.Equal(m1, ms) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", m1, ms))
+ }
+
+ // Comparing NaN for equality is expected to fail.
+ mf := map[int]float64{1: 0, 2: math.NaN()}
+ if a.Equal(mf, mf) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", mf, mf))
+ }
+}
+
+func TestCopy() {
+ m2 := a.Copy(m1)
+ if !a.Equal(m1, m2) {
+ panic(fmt.Sprintf("a.Copy(%v) = %v, want %v", m1, m2, m1))
+ }
+ m2[16] = 32
+ if a.Equal(m1, m2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", m1, m2))
+ }
+}
+
+func TestAdd() {
+ mc := a.Copy(m1)
+ a.Add(mc, mc)
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Add(%v, %v) = %v, want %v", m1, m1, mc, m1))
+ }
+ a.Add(mc, map[int]int{16: 32})
+ want := map[int]int{1: 2, 2: 4, 4: 8, 8: 16, 16: 32}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Add result = %v, want %v", mc, want))
+ }
+}
+
+func TestSub() {
+ mc := a.Copy(m1)
+ a.Sub(mc, mc)
+ if len(mc) > 0 {
+ panic(fmt.Sprintf("a.Sub(%v, %v) = %v, want empty map", m1, m1, mc))
+ }
+ mc = a.Copy(m1)
+ a.Sub(mc, map[int]int{1: 0})
+ want := map[int]int{2: 4, 4: 8, 8: 16}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Sub result = %v, want %v", mc, want))
+ }
+}
+
+func TestIntersect() {
+ mc := a.Copy(m1)
+ a.Intersect(mc, mc)
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Intersect(%v, %v) = %v, want %v", m1, m1, mc, m1))
+ }
+ a.Intersect(mc, map[int]int{1: 0, 2: 0})
+ want := map[int]int{1: 2, 2: 4}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Intersect result = %v, want %v", mc, want))
+ }
+}
+
+func TestFilter() {
+ mc := a.Copy(m1)
+ a.Filter(mc, func(int, int) bool { return true })
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Filter(%v, true) = %v, want %v", m1, mc, m1))
+ }
+ a.Filter(mc, func(k, v int) bool { return k < 3 })
+ want := map[int]int{1: 2, 2: 4}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Filter result = %v, want %v", mc, want))
+ }
+}
+
+func TestTransformValues() {
+ mc := a.Copy(m1)
+ a.TransformValues(mc, func(i int) int { return i / 2 })
+ want := map[int]int{1: 1, 2: 2, 4: 4, 8: 8}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.TransformValues result = %v, want %v", mc, want))
+ }
+}
+
+func main() {
+ TestKeys()
+ TestValues()
+ TestEqual()
+ TestCopy()
+ TestAdd()
+ TestSub()
+ TestIntersect()
+ TestFilter()
+ TestTransformValues()
+}
diff --git a/test/typeparam/mapsimp.go b/test/typeparam/mapsimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/mapsimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/min.go b/test/typeparam/min.go
index a3e4464a30..d6c65d68b7 100644
--- a/test/typeparam/min.go
+++ b/test/typeparam/min.go
@@ -11,7 +11,7 @@ import (
)
type Ordered interface {
- type int, int64, float64
+ ~int | ~int64 | ~float64 | ~string
}
func min[T Ordered](x, y T) T {
@@ -38,4 +38,13 @@ func main() {
if got := min(3.5, 2.0); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
+
+ const want2 = "ay"
+ if got := min[string]("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := min("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
}
diff --git a/test/typeparam/mincheck.dir/a.go b/test/typeparam/mincheck.dir/a.go
new file mode 100644
index 0000000000..7d42492b74
--- /dev/null
+++ b/test/typeparam/mincheck.dir/a.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ int | int64 | float64
+}
+
+func Min[T Ordered](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
diff --git a/test/typeparam/mincheck.dir/main.go b/test/typeparam/mincheck.dir/main.go
new file mode 100644
index 0000000000..72d8effcc5
--- /dev/null
+++ b/test/typeparam/mincheck.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 2
+ if got := a.Min[int](2, 3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(2, 3); got != want {
+ panic(fmt.Sprintf("want %d, got %d", want, got))
+ }
+
+ if got := a.Min[float64](3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ const want2 = "ay"
+ if got := a.Min[string]("bb", "ay"); got != want2 { // ERROR "string does not satisfy interface{int|int64|float64}"
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := a.Min("bb", "ay"); got != want2 { // ERROR "string does not satisfy interface{int|int64|float64}"
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+}
diff --git a/test/typeparam/mincheck.go b/test/typeparam/mincheck.go
new file mode 100644
index 0000000000..32cf4b830d
--- /dev/null
+++ b/test/typeparam/mincheck.go
@@ -0,0 +1,7 @@
+// errorcheckdir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/minimp.dir/a.go b/test/typeparam/minimp.dir/a.go
new file mode 100644
index 0000000000..6c3e0eba36
--- /dev/null
+++ b/test/typeparam/minimp.dir/a.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int64 | ~float64 | ~string
+}
+
+func Min[T Ordered](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
diff --git a/test/typeparam/minimp.dir/main.go b/test/typeparam/minimp.dir/main.go
new file mode 100644
index 0000000000..509f5aaed2
--- /dev/null
+++ b/test/typeparam/minimp.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 2
+ if got := a.Min[int](2, 3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(2, 3); got != want {
+ panic(fmt.Sprintf("want %d, got %d", want, got))
+ }
+
+ if got := a.Min[float64](3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ const want2 = "ay"
+ if got := a.Min[string]("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := a.Min("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+}
diff --git a/test/typeparam/minimp.go b/test/typeparam/minimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/minimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/mutualimp.dir/a.go b/test/typeparam/mutualimp.dir/a.go
new file mode 100644
index 0000000000..56ca57cea5
--- /dev/null
+++ b/test/typeparam/mutualimp.dir/a.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type X int
+func (x X) M() X { return x }
+
+func F[T interface{ M() U }, U interface{ M() T }]() {}
+func G() { F[X, X]() }
diff --git a/test/typeparam/mutualimp.dir/b.go b/test/typeparam/mutualimp.dir/b.go
new file mode 100644
index 0000000000..83cc3af283
--- /dev/null
+++ b/test/typeparam/mutualimp.dir/b.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "./a"
+
+func H() {
+ a.F[a.X, a.X]()
+ a.G()
+}
diff --git a/test/typeparam/mutualimp.go b/test/typeparam/mutualimp.go
new file mode 100644
index 0000000000..87b4ff46c1
--- /dev/null
+++ b/test/typeparam/mutualimp.go
@@ -0,0 +1,7 @@
+// compiledir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/ordered.go b/test/typeparam/ordered.go
index 448db68bb5..699505ec75 100644
--- a/test/typeparam/ordered.go
+++ b/test/typeparam/ordered.go
@@ -13,10 +13,10 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
type orderedSlice[Elem Ordered] []Elem
diff --git a/test/typeparam/orderedmap.go b/test/typeparam/orderedmap.go
index db1b374267..6a895bd396 100644
--- a/test/typeparam/orderedmap.go
+++ b/test/typeparam/orderedmap.go
@@ -15,10 +15,10 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
// _Map is an ordered map.
diff --git a/test/typeparam/orderedmapsimp.dir/a.go b/test/typeparam/orderedmapsimp.dir/a.go
new file mode 100644
index 0000000000..37fc3e79b9
--- /dev/null
+++ b/test/typeparam/orderedmapsimp.dir/a.go
@@ -0,0 +1,226 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "context"
+ "runtime"
+)
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map. It takes a comparison function that compares two
+// keys and returns < 0 if the first is less, == 0 if they are equal,
+// > 0 if the first is greater.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// NewOrdered returns a new map whose key is an ordered type.
+// This is like New, but does not require providing a compare function.
+// The map compare function uses the obvious key ordering.
+func NewOrdered[K Ordered, V any]() *Map[K, V] {
+ return New[K, V](func(k1, k2 K) int {
+ switch {
+ case k1 < k2:
+ return -1
+ case k1 > k2:
+ return 1
+ default:
+ return 0
+ }
+ })
+}
+
+// find looks up key in the map, returning either a pointer to the slot of the
+// node holding key, or a pointer to the slot where a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Reports whether this is a new key.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or the zero value
+// if not present. The second result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used while iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// iterate returns an iterator that traverses the map.
+func (m *Map[K, V]) Iterate() *Iterator[K, V] {
+ sender, receiver := Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop the traversal if Send fails, which means that
+ // nothing is listening to the receiver.
+ return f(n.left) &&
+ sender.Send(context.Background(), keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean that reports
+// whether they are valid. If not valid, we have reached the end of the map.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next(context.Background())
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[Elem any]() (*Sender[Elem], *Receiver[Elem]) {
+ c := make(chan Elem)
+ d := make(chan struct{})
+ s := &Sender[Elem]{
+ values: c,
+ done: d,
+ }
+ r := &Receiver[Elem] {
+ values: c,
+ done: d,
+ }
+ runtime.SetFinalizer(r, (*Receiver[Elem]).finalize)
+ return s, r
+}
+
+// A Sender is used to send values to a Receiver.
+type Sender[Elem any] struct {
+ values chan<- Elem
+ done <-chan struct{}
+}
+
+// Send sends a value to the receiver. It reports whether the value was sent.
+// The value will not be sent if the context is closed or the receiver
+// is freed.
+func (s *Sender[Elem]) Send(ctx context.Context, v Elem) bool {
+ select {
+ case <-ctx.Done():
+ return false
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[Elem]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[Elem any] struct {
+ values <-chan Elem
+ done chan<- struct{}
+}
+
+// Next returns the next value from the channel. The bool result indicates
+// whether the value is valid.
+func (r *Receiver[Elem]) Next(ctx context.Context) (v Elem, ok bool) {
+ select {
+ case <-ctx.Done():
+ case v, ok = <-r.values:
+ }
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[Elem]) finalize() {
+ close(r.done)
+}
diff --git a/test/typeparam/orderedmapsimp.dir/main.go b/test/typeparam/orderedmapsimp.dir/main.go
new file mode 100644
index 0000000000..ac4cee6a78
--- /dev/null
+++ b/test/typeparam/orderedmapsimp.dir/main.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "bytes"
+ "fmt"
+)
+
+func TestMap() {
+ m := a.New[[]byte, int](bytes.Compare)
+
+ if _, found := m.Find([]byte("a")); found {
+ panic(fmt.Sprintf("unexpectedly found %q in empty map", []byte("a")))
+ }
+
+ for _, c := range []int{ 'a', 'c', 'b' } {
+ if !m.Insert([]byte(string(c)), c) {
+ panic(fmt.Sprintf("key %q unexpectedly already present", []byte(string(c))))
+ }
+ }
+ if m.Insert([]byte("c"), 'x') {
+ panic(fmt.Sprintf("key %q unexpectedly not present", []byte("c")))
+ }
+
+ if v, found := m.Find([]byte("a")); !found {
+ panic(fmt.Sprintf("did not find %q", []byte("a")))
+ } else if v != 'a' {
+ panic(fmt.Sprintf("key %q returned wrong value %c, expected %c", []byte("a"), v, 'a'))
+ }
+ if v, found := m.Find([]byte("c")); !found {
+ panic(fmt.Sprintf("did not find %q", []byte("c")))
+ } else if v != 'x' {
+ panic(fmt.Sprintf("key %q returned wrong value %c, expected %c", []byte("c"), v, 'x'))
+ }
+
+ if _, found := m.Find([]byte("d")); found {
+ panic(fmt.Sprintf("unexpectedly found %q", []byte("d")))
+ }
+
+ gather := func(it *a.Iterator[[]byte, int]) []int {
+ var r []int
+ for {
+ _, v, ok := it.Next()
+ if !ok {
+ return r
+ }
+ r = append(r, v)
+ }
+ }
+ got := gather(m.Iterate())
+ want := []int{'a', 'b', 'x'}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Iterate returned %v, want %v", got, want))
+ }
+
+}
+
+func main() {
+ TestMap()
+}
diff --git a/test/typeparam/orderedmapsimp.go b/test/typeparam/orderedmapsimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/orderedmapsimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/pair.go b/test/typeparam/pair.go
index 7faf083c89..57742022b1 100644
--- a/test/typeparam/pair.go
+++ b/test/typeparam/pair.go
@@ -24,6 +24,7 @@ func main() {
if got, want := unsafe.Sizeof(p.f2), uintptr(8); got != want {
panic(fmt.Sprintf("unexpected f2 size == %d, want %d", got, want))
}
+
type mypair struct { f1 int32; f2 int64 }
mp := mypair(p)
if mp.f1 != 1 || mp.f2 != 2 {
diff --git a/test/typeparam/pairimp.dir/a.go b/test/typeparam/pairimp.dir/a.go
new file mode 100644
index 0000000000..27b2412961
--- /dev/null
+++ b/test/typeparam/pairimp.dir/a.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Pair[F1, F2 any] struct {
+ Field1 F1
+ Field2 F2
+}
diff --git a/test/typeparam/pairimp.dir/main.go b/test/typeparam/pairimp.dir/main.go
new file mode 100644
index 0000000000..fc2face81d
--- /dev/null
+++ b/test/typeparam/pairimp.dir/main.go
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "unsafe"
+)
+
+func main() {
+ p := a.Pair[int32, int64]{1, 2}
+ if got, want := unsafe.Sizeof(p.Field1), uintptr(4); got != want {
+ panic(fmt.Sprintf("unexpected f1 size == %d, want %d", got, want))
+ }
+ if got, want := unsafe.Sizeof(p.Field2), uintptr(8); got != want {
+ panic(fmt.Sprintf("unexpected f2 size == %d, want %d", got, want))
+ }
+
+ type mypair struct { Field1 int32; Field2 int64 }
+ mp := mypair(p)
+ if mp.Field1 != 1 || mp.Field2 != 2 {
+ panic(fmt.Sprintf("mp == %#v, want %#v", mp, mypair{1, 2}))
+ }
+}
diff --git a/test/typeparam/pairimp.go b/test/typeparam/pairimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/pairimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/setsimp.dir/a.go b/test/typeparam/setsimp.dir/a.go
new file mode 100644
index 0000000000..92449ce956
--- /dev/null
+++ b/test/typeparam/setsimp.dir/a.go
@@ -0,0 +1,128 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// SliceEqual reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A Set is a set of elements of some type.
+type Set[Elem comparable] struct {
+ m map[Elem]struct{}
+}
+
+// Make makes a new set.
+func Make[Elem comparable]() Set[Elem] {
+ return Set[Elem]{m: make(map[Elem]struct{})}
+}
+
+// Add adds an element to a set.
+func (s Set[Elem]) Add(v Elem) {
+ s.m[v] = struct{}{}
+}
+
+// Delete removes an element from a set. If the element is not present
+// in the set, this does nothing.
+func (s Set[Elem]) Delete(v Elem) {
+ delete(s.m, v)
+}
+
+// Contains reports whether v is in the set.
+func (s Set[Elem]) Contains(v Elem) bool {
+ _, ok := s.m[v]
+ return ok
+}
+
+// Len returns the number of elements in the set.
+func (s Set[Elem]) Len() int {
+ return len(s.m)
+}
+
+// Values returns the values in the set.
+// The values will be in an indeterminate order.
+func (s Set[Elem]) Values() []Elem {
+ r := make([]Elem, 0, len(s.m))
+ for v := range s.m {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Equal reports whether two sets contain the same elements.
+func Equal[Elem comparable](s1, s2 Set[Elem]) bool {
+ if len(s1.m) != len(s2.m) {
+ return false
+ }
+ for v1 := range s1.m {
+ if !s2.Contains(v1) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy returns a copy of s.
+func (s Set[Elem]) Copy() Set[Elem] {
+ r := Set[Elem]{m: make(map[Elem]struct{}, len(s.m))}
+ for v := range s.m {
+ r.m[v] = struct{}{}
+ }
+ return r
+}
+
+// AddSet adds all the elements of s2 to s.
+func (s Set[Elem]) AddSet(s2 Set[Elem]) {
+ for v := range s2.m {
+ s.m[v] = struct{}{}
+ }
+}
+
+// SubSet removes all elements in s2 from s.
+// Values in s2 that are not in s are ignored.
+func (s Set[Elem]) SubSet(s2 Set[Elem]) {
+ for v := range s2.m {
+ delete(s.m, v)
+ }
+}
+
+// Intersect removes all elements from s that are not present in s2.
+// Values in s2 that are not in s are ignored.
+func (s Set[Elem]) Intersect(s2 Set[Elem]) {
+ for v := range s.m {
+ if !s2.Contains(v) {
+ delete(s.m, v)
+ }
+ }
+}
+
+// Iterate calls f on every element in the set.
+func (s Set[Elem]) Iterate(f func(Elem)) {
+ for v := range s.m {
+ f(v)
+ }
+}
+
+// Filter deletes any elements from s for which f returns false.
+func (s Set[Elem]) Filter(f func(Elem) bool) {
+ for v := range s.m {
+ if !f(v) {
+ delete(s.m, v)
+ }
+ }
+}
diff --git a/test/typeparam/setsimp.dir/main.go b/test/typeparam/setsimp.dir/main.go
new file mode 100644
index 0000000000..8fd1657143
--- /dev/null
+++ b/test/typeparam/setsimp.dir/main.go
@@ -0,0 +1,156 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "sort"
+)
+
+func TestSet() {
+ s1 := a.Make[int]()
+ if got := s1.Len(); got != 0 {
+ panic(fmt.Sprintf("Len of empty set = %d, want 0", got))
+ }
+ s1.Add(1)
+ s1.Add(1)
+ s1.Add(1)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ s1.Add(2)
+ s1.Add(3)
+ s1.Add(4)
+ if got := s1.Len(); got != 4 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 4", s1, got))
+ }
+ if !s1.Contains(1) {
+ panic(fmt.Sprintf("(%v).Contains(1) == false, want true", s1))
+ }
+ if s1.Contains(5) {
+ panic(fmt.Sprintf("(%v).Contains(5) == true, want false", s1))
+ }
+ vals := s1.Values()
+ sort.Ints(vals)
+ w1 := []int{1, 2, 3, 4}
+ if !a.SliceEqual(vals, w1) {
+ panic(fmt.Sprintf("(%v).Values() == %v, want %v", s1, vals, w1))
+ }
+}
+
+func TestEqual() {
+ s1 := a.Make[string]()
+ s2 := a.Make[string]()
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s1.Add("hello")
+ s1.Add("world")
+ if got := s1.Len(); got != 2 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 2", s1, got))
+ }
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+}
+
+func TestCopy() {
+ s1 := a.Make[float64]()
+ s1.Add(0)
+ s2 := s1.Copy()
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s1.Add(1)
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+}
+
+func TestAddSet() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.AddSet(s2)
+ if got := s1.Len(); got != 3 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 3", s1, got))
+ }
+ s2.Add(1)
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+}
+
+func TestSubSet() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.SubSet(s2)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ if vals, want := s1.Values(), []int{1}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after SubSet got %v, want %v", vals, want))
+ }
+}
+
+func TestIntersect() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.Intersect(s2)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ if vals, want := s1.Values(), []int{2}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after Intersect got %v, want %v", vals, want))
+ }
+}
+
+func TestIterate() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s1.Add(3)
+ s1.Add(4)
+ tot := 0
+ s1.Iterate(func(i int) { tot += i })
+ if tot != 10 {
+ panic(fmt.Sprintf("total of %v == %d, want 10", s1, tot))
+ }
+}
+
+func TestFilter() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s1.Add(3)
+ s1.Filter(func(v int) bool { return v%2 == 0 })
+ if vals, want := s1.Values(), []int{2}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after Filter got %v, want %v", vals, want))
+ }
+
+}
+
+func main() {
+ TestSet()
+ TestEqual()
+ TestCopy()
+ TestAddSet()
+ TestSubSet()
+ TestIntersect()
+ TestIterate()
+ TestFilter()
+}
diff --git a/test/typeparam/setsimp.go b/test/typeparam/setsimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/setsimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/settable.go b/test/typeparam/settable.go
index 588166da85..d0b831b533 100644
--- a/test/typeparam/settable.go
+++ b/test/typeparam/settable.go
@@ -13,13 +13,13 @@ import (
// Various implementations of fromStrings().
-type _Setter[B any] interface {
+type Setter[B any] interface {
Set(string)
type *B
}
// Takes two type parameters where PT = *T
-func fromStrings1[T any, PT _Setter[T]](s []string) []T {
+func fromStrings1[T any, PT Setter[T]](s []string) []T {
result := make([]T, len(s))
for i, v := range s {
// The type of &result[i] is *T which is in the type list
@@ -31,7 +31,7 @@ func fromStrings1[T any, PT _Setter[T]](s []string) []T {
return result
}
-func fromStrings1a[T any, PT _Setter[T]](s []string) []PT {
+func fromStrings1a[T any, PT Setter[T]](s []string) []PT {
result := make([]PT, len(s))
for i, v := range s {
// The type new(T) is *T which is in the type list
@@ -54,12 +54,12 @@ func fromStrings2[T any](s []string, set func(*T, string)) []T {
return results
}
-type _Setter2 interface {
+type Setter2 interface {
Set(string)
}
// Takes only one type parameter, but causes a panic (see below)
-func fromStrings3[T _Setter2](s []string) []T {
+func fromStrings3[T Setter2](s []string) []T {
results := make([]T, len(s))
for i, v := range s {
// Panics if T is a pointer type because receiver is T(nil).
diff --git a/test/typeparam/sliceimp.dir/a.go b/test/typeparam/sliceimp.dir/a.go
new file mode 100644
index 0000000000..61b1b17a98
--- /dev/null
+++ b/test/typeparam/sliceimp.dir/a.go
@@ -0,0 +1,141 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// Max returns the maximum of two values of some ordered type.
+func Max[T Ordered](a, b T) T {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// Min returns the minimum of two values of some ordered type.
+func Min[T Ordered](a, b T) T {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func Equal[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// EqualFn reports whether two slices are equal using a comparision
+// function on each element.
+func EqualFn[Elem any](s1, s2 []Elem, eq func(Elem, Elem) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Map turns a []Elem1 to a []Elem2 using a mapping function.
+func Map[Elem1, Elem2 any](s []Elem1, f func(Elem1) Elem2) []Elem2 {
+ r := make([]Elem2, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
+
+// Reduce reduces a []Elem1 to a single value of type Elem2 using
+// a reduction function.
+func Reduce[Elem1, Elem2 any](s []Elem1, initializer Elem2, f func(Elem2, Elem1) Elem2) Elem2 {
+ r := initializer
+ for _, v := range s {
+ r = f(r, v)
+ }
+ return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[Elem any](s []Elem, f func(Elem) bool) []Elem {
+ var r []Elem
+ for _, v := range s {
+ if f(v) {
+ r = append(r, v)
+ }
+ }
+ return r
+}
+
+// Max returns the maximum element in a slice of some ordered type.
+// If the slice is empty it returns the zero value of the element type.
+func SliceMax[Elem Ordered](s []Elem) Elem {
+ if len(s) == 0 {
+ var zero Elem
+ return zero
+ }
+ return Reduce(s[1:], s[0], Max[Elem])
+}
+
+// Min returns the minimum element in a slice of some ordered type.
+// If the slice is empty it returns the zero value of the element type.
+func SliceMin[Elem Ordered](s []Elem) Elem {
+ if len(s) == 0 {
+ var zero Elem
+ return zero
+ }
+ return Reduce(s[1:], s[0], Min[Elem])
+}
+
+// Append adds values to the end of a slice, returning a new slice.
+// This is like the predeclared append function; it's an example
+// of how to write it using generics. We used to write code like
+// this before append was added to the language, but we had to write
+// a separate copy for each type.
+func Append[T any](s []T, t ...T) []T {
+ lens := len(s)
+ tot := lens + len(t)
+ if tot <= cap(s) {
+ s = s[:tot]
+ } else {
+ news := make([]T, tot, tot + tot/2)
+ Copy(news, s)
+ s = news
+ }
+ Copy(s[lens:tot], t)
+ return s
+}
+
+// Copy copies values from t to s, stopping when either slice is full,
+// returning the number of values copied. This is like the predeclared
+// copy function; it's an example of how to write it using generics.
+func Copy[T any](s, t []T) int {
+ i := 0
+ for ; i < len(s) && i < len(t); i++ {
+ s[i] = t[i]
+ }
+ return i
+}
diff --git a/test/typeparam/sliceimp.dir/main.go b/test/typeparam/sliceimp.dir/main.go
new file mode 100644
index 0000000000..2d4d3b2831
--- /dev/null
+++ b/test/typeparam/sliceimp.dir/main.go
@@ -0,0 +1,179 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "math"
+ "strings"
+)
+
+type Integer interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+func TestEqual() {
+ s1 := []int{1, 2, 3}
+ if !a.Equal(s1, s1) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s1))
+ }
+ s2 := []int{1, 2, 3}
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s2 = append(s2, 4)
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+
+ s3 := []float64{1, 2, math.NaN()}
+ if !a.Equal(s3, s3) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s3, s3))
+ }
+
+ if a.Equal(s1, nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil) = true, want false", s1))
+ }
+ if a.Equal(nil, s1) {
+ panic(fmt.Sprintf("a.Equal(nil, %v) = true, want false", s1))
+ }
+ if !a.Equal(s1[:0], nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil = false, want true", s1[:0]))
+ }
+}
+
+func offByOne[Elem Integer](a, b Elem) bool {
+ return a == b + 1 || a == b - 1
+}
+
+func TestEqualFn() {
+ s1 := []int{1, 2, 3}
+ s2 := []int{2, 3, 4}
+ if a.EqualFn(s1, s1, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, offByOne) = true, want false", s1, s1))
+ }
+ if !a.EqualFn(s1, s2, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, offByOne) = false, want true", s1, s2))
+ }
+
+ if !a.EqualFn(s1[:0], nil, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, nil, offByOne) = false, want true", s1[:0]))
+ }
+
+ s3 := []string{"a", "b", "c"}
+ s4 := []string{"A", "B", "C"}
+ if !a.EqualFn(s3, s4, strings.EqualFold) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, strings.EqualFold) = false, want true", s3, s4))
+ }
+}
+
+func TestMap() {
+ s1 := []int{1, 2, 3}
+ s2 := a.Map(s1, func(i int) float64 { return float64(i) * 2.5 })
+ if want := []float64{2.5, 5, 7.5}; !a.Equal(s2, want) {
+ panic(fmt.Sprintf("a.Map(%v, ...) = %v, want %v", s1, s2, want))
+ }
+
+ s3 := []string{"Hello", "World"}
+ s4 := a.Map(s3, strings.ToLower)
+ if want := []string{"hello", "world"}; !a.Equal(s4, want) {
+ panic(fmt.Sprintf("a.Map(%v, strings.ToLower) = %v, want %v", s3, s4, want))
+ }
+
+ s5 := a.Map(nil, func(i int) int { return i })
+ if len(s5) != 0 {
+ panic(fmt.Sprintf("a.Map(nil, identity) = %v, want empty slice", s5))
+ }
+}
+
+func TestReduce() {
+ s1 := []int{1, 2, 3}
+ r := a.Reduce(s1, 0, func(f float64, i int) float64 { return float64(i) * 2.5 + f })
+ if want := 15.0; r != want {
+ panic(fmt.Sprintf("a.Reduce(%v, 0, ...) = %v, want %v", s1, r, want))
+ }
+
+ if got := a.Reduce(nil, 0, func(i, j int) int { return i + j}); got != 0 {
+ panic(fmt.Sprintf("a.Reduce(nil, 0, add) = %v, want 0", got))
+ }
+}
+
+func TestFilter() {
+ s1 := []int{1, 2, 3}
+ s2 := a.Filter(s1, func(i int) bool { return i%2 == 0 })
+ if want := []int{2}; !a.Equal(s2, want) {
+ panic(fmt.Sprintf("a.Filter(%v, even) = %v, want %v", s1, s2, want))
+ }
+
+ if s3 := a.Filter(s1[:0], func(i int) bool { return true }); len(s3) > 0 {
+ panic(fmt.Sprintf("a.Filter(%v, identity) = %v, want empty slice", s1[:0], s3))
+ }
+}
+
+func TestMax() {
+ s1 := []int{1, 2, 3, -5}
+ if got, want := a.SliceMax(s1), 3; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %d, want %d", s1, got, want))
+ }
+
+ s2 := []string{"aaa", "a", "aa", "aaaa"}
+ if got, want := a.SliceMax(s2), "aaaa"; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %q, want %q", s2, got, want))
+ }
+
+ if got, want := a.SliceMax(s2[:0]), ""; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %q, want %q", s2[:0], got, want))
+ }
+}
+
+func TestMin() {
+ s1 := []int{1, 2, 3, -5}
+ if got, want := a.SliceMin(s1), -5; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %d, want %d", s1, got, want))
+ }
+
+ s2 := []string{"aaa", "a", "aa", "aaaa"}
+ if got, want := a.SliceMin(s2), "a"; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %q, want %q", s2, got, want))
+ }
+
+ if got, want := a.SliceMin(s2[:0]), ""; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %q, want %q", s2[:0], got, want))
+ }
+}
+
+func TestAppend() {
+ s := []int{1, 2, 3}
+ s = a.Append(s, 4, 5, 6)
+ want := []int{1, 2, 3, 4, 5, 6}
+ if !a.Equal(s, want) {
+ panic(fmt.Sprintf("after a.Append got %v, want %v", s, want))
+ }
+}
+
+func TestCopy() {
+ s1 := []int{1, 2, 3}
+ s2 := []int{4, 5}
+ if got := a.Copy(s1, s2); got != 2 {
+ panic(fmt.Sprintf("a.Copy returned %d, want 2", got))
+ }
+ want := []int{4, 5, 3}
+ if !a.Equal(s1, want) {
+ panic(fmt.Sprintf("after a.Copy got %v, want %v", s1, want))
+ }
+}
+func main() {
+ TestEqual()
+ TestEqualFn()
+ TestMap()
+ TestReduce()
+ TestFilter()
+ TestMax()
+ TestMin()
+ TestAppend()
+ TestCopy()
+}
diff --git a/test/typeparam/sliceimp.go b/test/typeparam/sliceimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/sliceimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/slices.go b/test/typeparam/slices.go
index 149199eb64..50783a5439 100644
--- a/test/typeparam/slices.go
+++ b/test/typeparam/slices.go
@@ -15,15 +15,15 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
type Integer interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}
// Max returns the maximum of two values of some ordered type.
diff --git a/test/typeparam/smallest.go b/test/typeparam/smallest.go
index 63dd9ddb70..3fead6a067 100644
--- a/test/typeparam/smallest.go
+++ b/test/typeparam/smallest.go
@@ -11,13 +11,13 @@ import (
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
-func smallest[T Ordered](s []T) T {
+func Smallest[T Ordered](s []T) T {
r := s[0] // panics if slice is empty
for _, v := range s[1:] {
if v < r {
@@ -32,11 +32,11 @@ func main() {
vec2 := []string{"abc", "def", "aaa"}
want1 := 1.2
- if got := smallest(vec1); got != want1 {
+ if got := Smallest(vec1); got != want1 {
panic(fmt.Sprintf("got %d, want %d", got, want1))
}
want2 := "aaa"
- if got := smallest(vec2); got != want2 {
+ if got := Smallest(vec2); got != want2 {
panic(fmt.Sprintf("got %d, want %d", got, want2))
}
}
diff --git a/test/typeparam/smoketest.go b/test/typeparam/smoketest.go
index b7d6201b2c..d92e02713d 100644
--- a/test/typeparam/smoketest.go
+++ b/test/typeparam/smoketest.go
@@ -37,7 +37,7 @@ func (x T2[P1, P2, P3]) m() {}
type _ interface {
m1()
m2()
- type int, float32, string
+ int | float32 | string
m3()
}
diff --git a/test/typeparam/stringable.go b/test/typeparam/stringable.go
index 9340a3b10a..20da012cb8 100644
--- a/test/typeparam/stringable.go
+++ b/test/typeparam/stringable.go
@@ -16,11 +16,11 @@ type Stringer interface {
String() string
}
-// stringableList is a slice of some type, where the type
+// StringableList is a slice of some type, where the type
// must have a String method.
-type stringableList[T Stringer] []T
+type StringableList[T Stringer] []T
-func (s stringableList[T]) String() string {
+func (s StringableList[T]) String() string {
var sb strings.Builder
for i, v := range s {
if i > 0 {
@@ -38,7 +38,7 @@ func (a myint) String() string {
}
func main() {
- v := stringableList[myint]{ myint(1), myint(2) }
+ v := StringableList[myint]{ myint(1), myint(2) }
if got, want := v.String(), "1, 2"; got != want {
panic(fmt.Sprintf("got %s, want %s", got, want))
diff --git a/test/typeparam/stringerimp.dir/a.go b/test/typeparam/stringerimp.dir/a.go
new file mode 100644
index 0000000000..3f70937ff5
--- /dev/null
+++ b/test/typeparam/stringerimp.dir/a.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Stringer interface {
+ String() string
+}
+
+func Stringify[T Stringer](s []T) (ret []string) {
+ for _, v := range s {
+ ret = append(ret, v.String())
+ }
+ return ret
+}
diff --git a/test/typeparam/stringerimp.dir/main.go b/test/typeparam/stringerimp.dir/main.go
new file mode 100644
index 0000000000..e30bdf1abe
--- /dev/null
+++ b/test/typeparam/stringerimp.dir/main.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+type myint int
+
+func (i myint) String() string {
+ return strconv.Itoa(int(i))
+}
+
+func main() {
+ x := []myint{myint(1), myint(2), myint(3)}
+
+ got := a.Stringify(x)
+ want := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got, want) {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+
+ m1 := myint(1)
+ m2 := myint(2)
+ m3 := myint(3)
+ y := []*myint{&m1, &m2, &m3}
+ got2 := a.Stringify(y)
+ want2 := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got2, want2) {
+ panic(fmt.Sprintf("got %s, want %s", got2, want2))
+ }
+}
diff --git a/test/typeparam/stringerimp.go b/test/typeparam/stringerimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/stringerimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/struct.go b/test/typeparam/struct.go
index 98f0fcd888..093f6935e6 100644
--- a/test/typeparam/struct.go
+++ b/test/typeparam/struct.go
@@ -10,40 +10,40 @@ import (
"fmt"
)
-type _E[T any] struct {
+type E[T any] struct {
v T
}
-type _S1 struct {
- _E[int]
+type S1 struct {
+ E[int]
v string
}
-type _Eint = _E[int]
-type _Ebool = _E[bool]
+type Eint = E[int]
+type Ebool = E[bool]
-type _S2 struct {
- _Eint
- _Ebool
+type S2 struct {
+ Eint
+ Ebool
v string
}
-type _S3 struct {
- *_E[int]
+type S3 struct {
+ *E[int]
}
func main() {
- s1 := _S1{_Eint{2}, "foo"}
- if got, want := s1._E.v, 2; got != want {
+ s1 := S1{Eint{2}, "foo"}
+ if got, want := s1.E.v, 2; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- s2 := _S2{_Eint{3}, _Ebool{true}, "foo"}
- if got, want := s2._Eint.v, 3; got != want {
+ s2 := S2{Eint{3}, Ebool{true}, "foo"}
+ if got, want := s2.Eint.v, 3; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- var s3 _S3
- s3._E = &_Eint{4}
- if got, want := s3._E.v, 4; got != want {
+ var s3 S3
+ s3.E = &Eint{4}
+ if got, want := s3.E.v, 4; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
diff --git a/test/typeparam/sum.go b/test/typeparam/sum.go
index f0f5e6aa07..53e6face11 100644
--- a/test/typeparam/sum.go
+++ b/test/typeparam/sum.go
@@ -10,7 +10,7 @@ import (
"fmt"
)
-func sum[T interface{ type int, float64 }](vec []T) T {
+func Sum[T interface{ int | float64 }](vec []T) T {
var sum T
for _, elt := range vec {
sum = sum + elt
@@ -18,7 +18,7 @@ func sum[T interface{ type int, float64 }](vec []T) T {
return sum
}
-func abs(f float64) float64 {
+func Abs(f float64) float64 {
if f < 0.0 {
return -f
}
@@ -28,23 +28,23 @@ func abs(f float64) float64 {
func main() {
vec1 := []int{3, 4}
vec2 := []float64{5.8, 9.6}
- got := sum[int](vec1)
+ got := Sum[int](vec1)
want := vec1[0] + vec1[1]
if got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- got = sum(vec1)
+ got = Sum(vec1)
if want != got {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
fwant := vec2[0] + vec2[1]
- fgot := sum[float64](vec2)
- if abs(fgot - fwant) > 1e-10 {
+ fgot := Sum[float64](vec2)
+ if Abs(fgot - fwant) > 1e-10 {
panic(fmt.Sprintf("got %f, want %f", fgot, fwant))
}
- fgot = sum(vec2)
- if abs(fgot - fwant) > 1e-10 {
+ fgot = Sum(vec2)
+ if Abs(fgot - fwant) > 1e-10 {
panic(fmt.Sprintf("got %f, want %f", fgot, fwant))
}
}
diff --git a/test/typeparam/valimp.dir/a.go b/test/typeparam/valimp.dir/a.go
new file mode 100644
index 0000000000..5aa5ebfa97
--- /dev/null
+++ b/test/typeparam/valimp.dir/a.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Value[T any] struct {
+ val T
+}
+
+// The noinline directive should survive across import, and prevent instantiations
+// of these functions from being inlined.
+
+//go:noinline
+func Get[T any](v *Value[T]) T {
+ return v.val
+}
+
+//go:noinline
+func Set[T any](v *Value[T], val T) {
+ v.val = val
+}
+
+//go:noinline
+func (v *Value[T]) Set(val T) {
+ v.val = val
+}
+
+//go:noinline
+func (v *Value[T]) Get() T {
+ return v.val
+}
diff --git a/test/typeparam/valimp.dir/main.go b/test/typeparam/valimp.dir/main.go
new file mode 100644
index 0000000000..925fb1e699
--- /dev/null
+++ b/test/typeparam/valimp.dir/main.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ var v1 a.Value[int]
+
+ a.Set(&v1, 1)
+ if got, want := a.Get(&v1), 1; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+ v1.Set(2)
+ if got, want := v1.Get(), 2; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+ v1p := new(a.Value[int])
+ a.Set(v1p, 3)
+ if got, want := a.Get(v1p), 3; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ v1p.Set(4)
+ if got, want := v1p.Get(), 4; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ var v2 a.Value[string]
+ a.Set(&v2, "a")
+ if got, want := a.Get(&v2), "a"; got != want {
+ panic(fmt.Sprintf("Get() == %q, want %q", got, want))
+ }
+
+ v2.Set("b")
+ if got, want := a.Get(&v2), "b"; got != want {
+ panic(fmt.Sprintf("Get() == %q, want %q", got, want))
+ }
+
+ v2p := new(a.Value[string])
+ a.Set(v2p, "c")
+ if got, want := a.Get(v2p), "c"; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ v2p.Set("d")
+ if got, want := v2p.Get(), "d"; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+}
+
diff --git a/test/typeparam/valimp.go b/test/typeparam/valimp.go
new file mode 100644
index 0000000000..76930e5e4f
--- /dev/null
+++ b/test/typeparam/valimp.go
@@ -0,0 +1,7 @@
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
diff --git a/test/typeparam/value.go b/test/typeparam/value.go
index 5dd7449d9c..6c6dabcf7c 100644
--- a/test/typeparam/value.go
+++ b/test/typeparam/value.go
@@ -12,7 +12,7 @@ type value[T any] struct {
val T
}
-func get[T2 any](v *value[T2]) T2 {
+func get[T any](v *value[T]) T {
return v.val
}
@@ -20,11 +20,11 @@ func set[T any](v *value[T], val T) {
v.val = val
}
-func (v *value[T2]) set(val T2) {
+func (v *value[T]) set(val T) {
v.val = val
}
-func (v *value[T2]) get() T2 {
+func (v *value[T]) get() T {
return v.val
}